ia64/xen-unstable

view xen/include/asm-x86/hvm/vmx/vmx.h @ 10658:d6363854fb35

[HVM][VMX] More descriptive failed vmentry.
Signed-off-by: Xin Li <xin.b.li@intel.com>
author kfraser@localhost.localdomain
date Wed Jul 05 14:31:22 2006 +0100 (2006-07-05)
parents 34ff26fb2240
children f42039dcdc81
line source
1 /*
2 * vmx.h: VMX Architecture related definitions
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
19 #ifndef __ASM_X86_HVM_VMX_VMX_H__
20 #define __ASM_X86_HVM_VMX_VMX_H__
22 #include <xen/sched.h>
23 #include <asm/types.h>
24 #include <asm/regs.h>
25 #include <asm/processor.h>
26 #include <asm/hvm/vmx/vmcs.h>
27 #include <asm/i387.h>
29 extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
30 extern void vmx_asm_do_vmentry(void);
31 extern void vmx_intr_assist(void);
32 extern void vmx_migrate_timers(struct vcpu *v);
33 extern void arch_vmx_do_launch(struct vcpu *);
34 extern void arch_vmx_do_resume(struct vcpu *);
35 extern void set_guest_time(struct vcpu *v, u64 gtime);
37 extern unsigned int cpu_rev;
39 /*
40 * Need fill bits for SENTER
41 */
43 #define MONITOR_PIN_BASED_EXEC_CONTROLS_RESERVED_VALUE 0x00000016
45 #define MONITOR_PIN_BASED_EXEC_CONTROLS \
46 ( \
47 MONITOR_PIN_BASED_EXEC_CONTROLS_RESERVED_VALUE | \
48 PIN_BASED_EXT_INTR_MASK | \
49 PIN_BASED_NMI_EXITING \
50 )
52 #define MONITOR_CPU_BASED_EXEC_CONTROLS_RESERVED_VALUE 0x0401e172
54 #define _MONITOR_CPU_BASED_EXEC_CONTROLS \
55 ( \
56 MONITOR_CPU_BASED_EXEC_CONTROLS_RESERVED_VALUE | \
57 CPU_BASED_HLT_EXITING | \
58 CPU_BASED_INVDPG_EXITING | \
59 CPU_BASED_MWAIT_EXITING | \
60 CPU_BASED_MOV_DR_EXITING | \
61 CPU_BASED_ACTIVATE_IO_BITMAP | \
62 CPU_BASED_USE_TSC_OFFSETING \
63 )
65 #define MONITOR_CPU_BASED_EXEC_CONTROLS_IA32E_MODE \
66 ( \
67 CPU_BASED_CR8_LOAD_EXITING | \
68 CPU_BASED_CR8_STORE_EXITING \
69 )
71 #define MONITOR_VM_EXIT_CONTROLS_RESERVED_VALUE 0x0003edff
73 #define MONITOR_VM_EXIT_CONTROLS_IA32E_MODE 0x00000200
75 #define _MONITOR_VM_EXIT_CONTROLS \
76 ( \
77 MONITOR_VM_EXIT_CONTROLS_RESERVED_VALUE |\
78 VM_EXIT_ACK_INTR_ON_EXIT \
79 )
81 #if defined (__x86_64__)
82 #define MONITOR_CPU_BASED_EXEC_CONTROLS \
83 ( \
84 _MONITOR_CPU_BASED_EXEC_CONTROLS | \
85 MONITOR_CPU_BASED_EXEC_CONTROLS_IA32E_MODE \
86 )
87 #define MONITOR_VM_EXIT_CONTROLS \
88 ( \
89 _MONITOR_VM_EXIT_CONTROLS | \
90 MONITOR_VM_EXIT_CONTROLS_IA32E_MODE \
91 )
92 #else
93 #define MONITOR_CPU_BASED_EXEC_CONTROLS \
94 _MONITOR_CPU_BASED_EXEC_CONTROLS
96 #define MONITOR_VM_EXIT_CONTROLS \
97 _MONITOR_VM_EXIT_CONTROLS
98 #endif
100 #define VM_ENTRY_CONTROLS_RESERVED_VALUE 0x000011ff
101 #define VM_ENTRY_CONTROLS_IA32E_MODE 0x00000200
102 #define MONITOR_VM_ENTRY_CONTROLS VM_ENTRY_CONTROLS_RESERVED_VALUE
103 /*
104 * Exit Reasons
105 */
106 #define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
108 #define EXIT_REASON_EXCEPTION_NMI 0
109 #define EXIT_REASON_EXTERNAL_INTERRUPT 1
111 #define EXIT_REASON_PENDING_INTERRUPT 7
113 #define EXIT_REASON_TASK_SWITCH 9
114 #define EXIT_REASON_CPUID 10
115 #define EXIT_REASON_HLT 12
116 #define EXIT_REASON_INVLPG 14
117 #define EXIT_REASON_RDPMC 15
118 #define EXIT_REASON_RDTSC 16
119 #define EXIT_REASON_VMCALL 18
120 #define EXIT_REASON_VMCLEAR 19
121 #define EXIT_REASON_VMLAUNCH 20
122 #define EXIT_REASON_VMPTRLD 21
123 #define EXIT_REASON_VMPTRST 22
124 #define EXIT_REASON_VMREAD 23
125 #define EXIT_REASON_VMRESUME 24
126 #define EXIT_REASON_VMWRITE 25
127 #define EXIT_REASON_VMOFF 26
128 #define EXIT_REASON_VMON 27
129 #define EXIT_REASON_CR_ACCESS 28
130 #define EXIT_REASON_DR_ACCESS 29
131 #define EXIT_REASON_IO_INSTRUCTION 30
132 #define EXIT_REASON_MSR_READ 31
133 #define EXIT_REASON_MSR_WRITE 32
134 #define EXIT_REASON_MWAIT_INSTRUCTION 36
136 #define EXIT_REASON_INVALID_GUEST_STATE 33
137 #define EXIT_REASON_MSR_LOADING 34
138 #define EXIT_REASON_MACHINE_CHECK 41
141 /*
142 * Interruption-information format
143 */
144 #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */
145 #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */
146 #define INTR_INFO_DELIEVER_CODE_MASK 0x800 /* 11 */
147 #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */
149 #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
150 #define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */
152 /*
153 * Exit Qualifications for MOV for Control Register Access
154 */
155 #define CONTROL_REG_ACCESS_NUM 0x7 /* 2:0, number of control register */
156 #define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */
157 #define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose register */
158 #define LMSW_SOURCE_DATA (0xFFFF << 16) /* 16:31 lmsw source */
159 #define REG_EAX (0 << 8)
160 #define REG_ECX (1 << 8)
161 #define REG_EDX (2 << 8)
162 #define REG_EBX (3 << 8)
163 #define REG_ESP (4 << 8)
164 #define REG_EBP (5 << 8)
165 #define REG_ESI (6 << 8)
166 #define REG_EDI (7 << 8)
167 #define REG_R8 (8 << 8)
168 #define REG_R9 (9 << 8)
169 #define REG_R10 (10 << 8)
170 #define REG_R11 (11 << 8)
171 #define REG_R12 (12 << 8)
172 #define REG_R13 (13 << 8)
173 #define REG_R14 (14 << 8)
174 #define REG_R15 (15 << 8)
176 /*
177 * Exit Qualifications for MOV for Debug Register Access
178 */
179 #define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug register */
180 #define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */
181 #define TYPE_MOV_TO_DR (0 << 4)
182 #define TYPE_MOV_FROM_DR (1 << 4)
183 #define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose register */
185 /* These bits in the CR4 are owned by the host */
186 #if CONFIG_PAGING_LEVELS >= 3
187 #define VMX_CR4_HOST_MASK (X86_CR4_VMXE | X86_CR4_PAE)
188 #else
189 #define VMX_CR4_HOST_MASK (X86_CR4_VMXE)
190 #endif
192 #define VMCALL_OPCODE ".byte 0x0f,0x01,0xc1\n"
193 #define VMCLEAR_OPCODE ".byte 0x66,0x0f,0xc7\n" /* reg/opcode: /6 */
194 #define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n"
195 #define VMPTRLD_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /6 */
196 #define VMPTRST_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /7 */
197 #define VMREAD_OPCODE ".byte 0x0f,0x78\n"
198 #define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3\n"
199 #define VMWRITE_OPCODE ".byte 0x0f,0x79\n"
200 #define VMXOFF_OPCODE ".byte 0x0f,0x01,0xc4\n"
201 #define VMXON_OPCODE ".byte 0xf3,0x0f,0xc7\n"
203 #define MODRM_EAX_06 ".byte 0x30\n" /* [EAX], with reg/opcode: /6 */
204 #define MODRM_EAX_07 ".byte 0x38\n" /* [EAX], with reg/opcode: /7 */
205 #define MODRM_EAX_ECX ".byte 0xc1\n" /* [EAX], [ECX] */
207 static inline void __vmptrld(u64 addr)
208 {
209 __asm__ __volatile__ ( VMPTRLD_OPCODE
210 MODRM_EAX_06
211 /* CF==1 or ZF==1 --> crash (ud2) */
212 "ja 1f ; ud2 ; 1:\n"
213 :
214 : "a" (&addr)
215 : "memory");
216 }
218 static inline void __vmptrst(u64 addr)
219 {
220 __asm__ __volatile__ ( VMPTRST_OPCODE
221 MODRM_EAX_07
222 :
223 : "a" (&addr)
224 : "memory");
225 }
227 static inline void __vmpclear(u64 addr)
228 {
229 __asm__ __volatile__ ( VMCLEAR_OPCODE
230 MODRM_EAX_06
231 /* CF==1 or ZF==1 --> crash (ud2) */
232 "ja 1f ; ud2 ; 1:\n"
233 :
234 : "a" (&addr)
235 : "memory");
236 }
238 #define __vmread(x, ptr) ___vmread((x), (ptr), sizeof(*(ptr)))
240 static always_inline int ___vmread(
241 const unsigned long field, void *ptr, const int size)
242 {
243 unsigned long ecx = 0;
244 int rc;
246 __asm__ __volatile__ ( VMREAD_OPCODE
247 MODRM_EAX_ECX
248 /* CF==1 or ZF==1 --> rc = -1 */
249 "setna %b0 ; neg %0"
250 : "=q" (rc), "=c" (ecx)
251 : "0" (0), "a" (field)
252 : "memory");
254 switch (size) {
255 case 1:
256 *((u8 *) (ptr)) = ecx;
257 break;
258 case 2:
259 *((u16 *) (ptr)) = ecx;
260 break;
261 case 4:
262 *((u32 *) (ptr)) = ecx;
263 break;
264 case 8:
265 *((u64 *) (ptr)) = ecx;
266 break;
267 default:
268 domain_crash_synchronous();
269 break;
270 }
272 return rc;
273 }
276 static always_inline void __vmwrite_vcpu(struct vcpu *v, unsigned long field, unsigned long value)
277 {
278 switch(field) {
279 case CR0_READ_SHADOW:
280 v->arch.hvm_vmx.cpu_shadow_cr0 = value;
281 break;
282 case GUEST_CR0:
283 v->arch.hvm_vmx.cpu_cr0 = value;
284 break;
285 case CPU_BASED_VM_EXEC_CONTROL:
286 v->arch.hvm_vmx.cpu_based_exec_control = value;
287 break;
288 default:
289 printk("__vmwrite_cpu: invalid field %lx\n", field);
290 break;
291 }
292 }
294 static always_inline void __vmread_vcpu(struct vcpu *v, unsigned long field, unsigned long *value)
295 {
296 switch(field) {
297 case CR0_READ_SHADOW:
298 *value = v->arch.hvm_vmx.cpu_shadow_cr0;
299 break;
300 case GUEST_CR0:
301 *value = v->arch.hvm_vmx.cpu_cr0;
302 break;
303 case CPU_BASED_VM_EXEC_CONTROL:
304 *value = v->arch.hvm_vmx.cpu_based_exec_control;
305 break;
306 default:
307 printk("__vmread_cpu: invalid field %lx\n", field);
308 break;
309 }
310 }
312 static inline int __vmwrite (unsigned long field, unsigned long value)
313 {
314 struct vcpu *v = current;
315 int rc;
317 __asm__ __volatile__ ( VMWRITE_OPCODE
318 MODRM_EAX_ECX
319 /* CF==1 or ZF==1 --> rc = -1 */
320 "setna %b0 ; neg %0"
321 : "=q" (rc)
322 : "0" (0), "a" (field) , "c" (value)
323 : "memory");
325 switch(field) {
326 case CR0_READ_SHADOW:
327 case GUEST_CR0:
328 case CPU_BASED_VM_EXEC_CONTROL:
329 __vmwrite_vcpu(v, field, value);
330 break;
331 }
333 return rc;
334 }
336 static inline int __vm_set_bit(unsigned long field, unsigned long mask)
337 {
338 unsigned long tmp;
339 int err = 0;
341 err |= __vmread(field, &tmp);
342 tmp |= mask;
343 err |= __vmwrite(field, tmp);
345 return err;
346 }
348 static inline int __vm_clear_bit(unsigned long field, unsigned long mask)
349 {
350 unsigned long tmp;
351 int err = 0;
353 err |= __vmread(field, &tmp);
354 tmp &= ~mask;
355 err |= __vmwrite(field, tmp);
357 return err;
358 }
360 static inline void __vmxoff (void)
361 {
362 __asm__ __volatile__ ( VMXOFF_OPCODE
363 ::: "memory");
364 }
366 static inline int __vmxon (u64 addr)
367 {
368 int rc;
370 __asm__ __volatile__ ( VMXON_OPCODE
371 MODRM_EAX_06
372 /* CF==1 or ZF==1 --> rc = -1 */
373 "setna %b0 ; neg %0"
374 : "=q" (rc)
375 : "0" (0), "a" (&addr)
376 : "memory");
378 return rc;
379 }
381 /* Make sure that xen intercepts any FP accesses from current */
382 static inline void vmx_stts(void)
383 {
384 unsigned long cr0;
385 struct vcpu *v = current;
387 /* FPU state already dirty? Then no need to setup_fpu() lazily. */
388 if ( test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
389 return;
391 /*
392 * If the guest does not have TS enabled then we must cause and handle an
393 * exception on first use of the FPU. If the guest *does* have TS enabled
394 * then this is not necessary: no FPU activity can occur until the guest
395 * clears CR0.TS, and we will initialise the FPU when that happens.
396 */
397 __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
398 if ( !(cr0 & X86_CR0_TS) )
399 {
400 __vmread_vcpu(v, GUEST_CR0, &cr0);
401 __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS);
402 __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
403 }
404 }
406 /* Works only for vcpu == current */
407 static inline int vmx_paging_enabled(struct vcpu *v)
408 {
409 unsigned long cr0;
411 __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
412 return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
413 }
415 static inline int vmx_pgbit_test(struct vcpu *v)
416 {
417 unsigned long cr0;
419 __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
420 return (cr0 & X86_CR0_PG);
421 }
423 static inline int __vmx_inject_exception(struct vcpu *v, int trap, int type,
424 int error_code)
425 {
426 unsigned long intr_fields;
428 /* Reflect it back into the guest */
429 intr_fields = (INTR_INFO_VALID_MASK | type | trap);
430 if (error_code != VMX_DELIVER_NO_ERROR_CODE) {
431 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
432 intr_fields |= INTR_INFO_DELIEVER_CODE_MASK;
433 }
435 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
436 return 0;
437 }
439 static inline int vmx_inject_exception(struct vcpu *v, int trap, int error_code)
440 {
441 v->arch.hvm_vmx.vector_injected = 1;
442 return __vmx_inject_exception(v, trap, INTR_TYPE_EXCEPTION, error_code);
443 }
445 static inline int vmx_inject_extint(struct vcpu *v, int trap, int error_code)
446 {
447 __vmx_inject_exception(v, trap, INTR_TYPE_EXT_INTR, error_code);
448 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
450 return 0;
451 }
453 static inline int vmx_reflect_exception(struct vcpu *v)
454 {
455 int error_code, vector;
457 __vmread(VM_EXIT_INTR_INFO, &vector);
458 if (vector & INTR_INFO_DELIEVER_CODE_MASK)
459 __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
460 else
461 error_code = VMX_DELIVER_NO_ERROR_CODE;
462 vector &= 0xff;
464 #ifndef NDEBUG
465 {
466 unsigned long eip;
468 __vmread(GUEST_RIP, &eip);
469 HVM_DBG_LOG(DBG_LEVEL_1,
470 "vmx_reflect_exception: eip = %lx, error_code = %x",
471 eip, error_code);
472 }
473 #endif /* NDEBUG */
475 vmx_inject_exception(v, vector, error_code);
476 return 0;
477 }
479 #endif /* __ASM_X86_HVM_VMX_VMX_H__ */