ia64/xen-unstable

view xen/include/asm-x86/hvm/vmx/vmx.h @ 14635:5c52e5ca8459

hvm: Clean up handling of exception intercepts.
Only intercept #DB/#BP if a debugger is attached.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Wed Mar 28 18:47:17 2007 +0100 (2007-03-28)
parents d2a91b73899a
children 2b715386b4cf
line source
1 /*
2 * vmx.h: VMX Architecture related definitions
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
19 #ifndef __ASM_X86_HVM_VMX_VMX_H__
20 #define __ASM_X86_HVM_VMX_VMX_H__
22 #include <xen/sched.h>
23 #include <asm/types.h>
24 #include <asm/regs.h>
25 #include <asm/processor.h>
26 #include <asm/hvm/vmx/vmcs.h>
27 #include <asm/i387.h>
28 #include <asm/hvm/trace.h>
30 void vmx_asm_vmexit_handler(struct cpu_user_regs);
31 void vmx_asm_do_vmentry(void);
32 void vmx_intr_assist(void);
33 void vmx_do_resume(struct vcpu *);
34 void set_guest_time(struct vcpu *v, u64 gtime);
36 extern unsigned int cpu_rev;
38 /*
39 * Exit Reasons
40 */
41 #define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
43 #define EXIT_REASON_EXCEPTION_NMI 0
44 #define EXIT_REASON_EXTERNAL_INTERRUPT 1
45 #define EXIT_REASON_TRIPLE_FAULT 2
46 #define EXIT_REASON_INIT 3
47 #define EXIT_REASON_SIPI 4
48 #define EXIT_REASON_IO_SMI 5
49 #define EXIT_REASON_OTHER_SMI 6
50 #define EXIT_REASON_PENDING_INTERRUPT 7
52 #define EXIT_REASON_TASK_SWITCH 9
53 #define EXIT_REASON_CPUID 10
54 #define EXIT_REASON_HLT 12
55 #define EXIT_REASON_INVD 13
56 #define EXIT_REASON_INVLPG 14
57 #define EXIT_REASON_RDPMC 15
58 #define EXIT_REASON_RDTSC 16
59 #define EXIT_REASON_RSM 17
60 #define EXIT_REASON_VMCALL 18
61 #define EXIT_REASON_VMCLEAR 19
62 #define EXIT_REASON_VMLAUNCH 20
63 #define EXIT_REASON_VMPTRLD 21
64 #define EXIT_REASON_VMPTRST 22
65 #define EXIT_REASON_VMREAD 23
66 #define EXIT_REASON_VMRESUME 24
67 #define EXIT_REASON_VMWRITE 25
68 #define EXIT_REASON_VMXOFF 26
69 #define EXIT_REASON_VMXON 27
70 #define EXIT_REASON_CR_ACCESS 28
71 #define EXIT_REASON_DR_ACCESS 29
72 #define EXIT_REASON_IO_INSTRUCTION 30
73 #define EXIT_REASON_MSR_READ 31
74 #define EXIT_REASON_MSR_WRITE 32
76 #define EXIT_REASON_INVALID_GUEST_STATE 33
77 #define EXIT_REASON_MSR_LOADING 34
79 #define EXIT_REASON_MWAIT_INSTRUCTION 36
80 #define EXIT_REASON_MONITOR_INSTRUCTION 39
81 #define EXIT_REASON_PAUSE_INSTRUCTION 40
83 #define EXIT_REASON_MACHINE_CHECK 41
85 #define EXIT_REASON_TPR_BELOW_THRESHOLD 43
87 /*
88 * Interruption-information format
89 */
90 #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */
91 #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */
92 #define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */
93 #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */
95 #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
96 #define INTR_TYPE_NMI (2 << 8) /* NMI */
97 #define INTR_TYPE_HW_EXCEPTION (3 << 8) /* hardware exception */
98 #define INTR_TYPE_SW_EXCEPTION (6 << 8) /* software exception */
100 /*
101 * Exit Qualifications for MOV for Control Register Access
102 */
103 #define CONTROL_REG_ACCESS_NUM 0xf /* 3:0, number of control register */
104 #define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */
105 #define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose register */
106 #define LMSW_SOURCE_DATA (0xFFFF << 16) /* 16:31 lmsw source */
107 #define REG_EAX (0 << 8)
108 #define REG_ECX (1 << 8)
109 #define REG_EDX (2 << 8)
110 #define REG_EBX (3 << 8)
111 #define REG_ESP (4 << 8)
112 #define REG_EBP (5 << 8)
113 #define REG_ESI (6 << 8)
114 #define REG_EDI (7 << 8)
115 #define REG_R8 (8 << 8)
116 #define REG_R9 (9 << 8)
117 #define REG_R10 (10 << 8)
118 #define REG_R11 (11 << 8)
119 #define REG_R12 (12 << 8)
120 #define REG_R13 (13 << 8)
121 #define REG_R14 (14 << 8)
122 #define REG_R15 (15 << 8)
124 /*
125 * Exit Qualifications for MOV for Debug Register Access
126 */
127 #define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug register */
128 #define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */
129 #define TYPE_MOV_TO_DR (0 << 4)
130 #define TYPE_MOV_FROM_DR (1 << 4)
131 #define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose register */
133 /* These bits in the CR4 are owned by the host */
134 #if CONFIG_PAGING_LEVELS >= 3
135 #define VMX_CR4_HOST_MASK (X86_CR4_VMXE | X86_CR4_PAE)
136 #else
137 #define VMX_CR4_HOST_MASK (X86_CR4_VMXE)
138 #endif
140 #define VMCALL_OPCODE ".byte 0x0f,0x01,0xc1\n"
141 #define VMCLEAR_OPCODE ".byte 0x66,0x0f,0xc7\n" /* reg/opcode: /6 */
142 #define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n"
143 #define VMPTRLD_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /6 */
144 #define VMPTRST_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /7 */
145 #define VMREAD_OPCODE ".byte 0x0f,0x78\n"
146 #define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3\n"
147 #define VMWRITE_OPCODE ".byte 0x0f,0x79\n"
148 #define VMXOFF_OPCODE ".byte 0x0f,0x01,0xc4\n"
149 #define VMXON_OPCODE ".byte 0xf3,0x0f,0xc7\n"
151 #define MODRM_EAX_06 ".byte 0x30\n" /* [EAX], with reg/opcode: /6 */
152 #define MODRM_EAX_07 ".byte 0x38\n" /* [EAX], with reg/opcode: /7 */
153 #define MODRM_EAX_ECX ".byte 0xc1\n" /* [EAX], [ECX] */
155 static inline void __vmptrld(u64 addr)
156 {
157 __asm__ __volatile__ ( VMPTRLD_OPCODE
158 MODRM_EAX_06
159 /* CF==1 or ZF==1 --> crash (ud2) */
160 "ja 1f ; ud2 ; 1:\n"
161 :
162 : "a" (&addr)
163 : "memory");
164 }
166 static inline void __vmptrst(u64 addr)
167 {
168 __asm__ __volatile__ ( VMPTRST_OPCODE
169 MODRM_EAX_07
170 :
171 : "a" (&addr)
172 : "memory");
173 }
175 static inline void __vmpclear(u64 addr)
176 {
177 __asm__ __volatile__ ( VMCLEAR_OPCODE
178 MODRM_EAX_06
179 /* CF==1 or ZF==1 --> crash (ud2) */
180 "ja 1f ; ud2 ; 1:\n"
181 :
182 : "a" (&addr)
183 : "memory");
184 }
186 static inline unsigned long __vmread(unsigned long field)
187 {
188 unsigned long ecx;
190 __asm__ __volatile__ ( VMREAD_OPCODE
191 MODRM_EAX_ECX
192 /* CF==1 or ZF==1 --> crash (ud2) */
193 "ja 1f ; ud2 ; 1:\n"
194 : "=c" (ecx)
195 : "a" (field)
196 : "memory");
198 return ecx;
199 }
201 static inline void __vmwrite(unsigned long field, unsigned long value)
202 {
203 __asm__ __volatile__ ( VMWRITE_OPCODE
204 MODRM_EAX_ECX
205 /* CF==1 or ZF==1 --> crash (ud2) */
206 "ja 1f ; ud2 ; 1:\n"
207 :
208 : "a" (field) , "c" (value)
209 : "memory");
210 }
212 static inline unsigned long __vmread_safe(unsigned long field, int *error)
213 {
214 unsigned long ecx;
216 __asm__ __volatile__ ( VMREAD_OPCODE
217 MODRM_EAX_ECX
218 /* CF==1 or ZF==1 --> rc = -1 */
219 "setna %b0 ; neg %0"
220 : "=q" (*error), "=c" (ecx)
221 : "0" (0), "a" (field)
222 : "memory");
224 return ecx;
225 }
227 static inline void __vm_set_bit(unsigned long field, unsigned int bit)
228 {
229 __vmwrite(field, __vmread(field) | (1UL << bit));
230 }
232 static inline void __vm_clear_bit(unsigned long field, unsigned int bit)
233 {
234 __vmwrite(field, __vmread(field) & ~(1UL << bit));
235 }
237 static inline void __vmxoff (void)
238 {
239 __asm__ __volatile__ ( VMXOFF_OPCODE
240 ::: "memory");
241 }
243 static inline int __vmxon (u64 addr)
244 {
245 int rc;
247 __asm__ __volatile__ ( VMXON_OPCODE
248 MODRM_EAX_06
249 /* CF==1 or ZF==1 --> rc = -1 */
250 "setna %b0 ; neg %0"
251 : "=q" (rc)
252 : "0" (0), "a" (&addr)
253 : "memory");
255 return rc;
256 }
258 static inline int vmx_paging_enabled(struct vcpu *v)
259 {
260 unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
261 return ((cr0 & (X86_CR0_PE|X86_CR0_PG)) == (X86_CR0_PE|X86_CR0_PG));
262 }
264 static inline int vmx_long_mode_enabled(struct vcpu *v)
265 {
266 u64 efer = v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER];
267 return efer & EFER_LMA;
268 }
270 static inline int vmx_lme_is_set(struct vcpu *v)
271 {
272 u64 efer = v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER];
273 return efer & EFER_LME;
274 }
276 static inline int vmx_pgbit_test(struct vcpu *v)
277 {
278 unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
279 return (cr0 & X86_CR0_PG);
280 }
282 static inline void __vmx_inject_exception(struct vcpu *v, int trap, int type,
283 int error_code, int ilen)
284 {
285 unsigned long intr_fields;
287 /* Reflect it back into the guest */
288 intr_fields = (INTR_INFO_VALID_MASK | type | trap);
289 if ( error_code != VMX_DELIVER_NO_ERROR_CODE ) {
290 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
291 intr_fields |= INTR_INFO_DELIVER_CODE_MASK;
292 }
294 if ( ilen )
295 __vmwrite(VM_ENTRY_INSTRUCTION_LEN, ilen);
297 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
299 if (trap == TRAP_page_fault)
300 HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vmx.cpu_cr2, error_code);
301 else
302 HVMTRACE_2D(INJ_EXC, v, trap, error_code);
303 }
305 static inline void vmx_inject_hw_exception(
306 struct vcpu *v, int trap, int error_code)
307 {
308 v->arch.hvm_vmx.vector_injected = 1;
309 __vmx_inject_exception(v, trap, INTR_TYPE_HW_EXCEPTION, error_code, 0);
310 }
312 static inline void vmx_inject_sw_exception(
313 struct vcpu *v, int trap, int instruction_len)
314 {
315 v->arch.hvm_vmx.vector_injected = 1;
316 __vmx_inject_exception(v, trap, INTR_TYPE_SW_EXCEPTION,
317 VMX_DELIVER_NO_ERROR_CODE,
318 instruction_len);
319 }
321 static inline void vmx_inject_extint(struct vcpu *v, int trap, int error_code)
322 {
323 __vmx_inject_exception(v, trap, INTR_TYPE_EXT_INTR, error_code, 0);
324 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
325 }
327 #endif /* __ASM_X86_HVM_VMX_VMX_H__ */