ia64/xen-unstable

view xen/include/asm-x86/hvm/vmx/vmx.h @ 9334:56a775219c88

This patch fix HVM/VMX time resolution issue that cause IA32E complain
"loss tick" occationally and APIC time calibration issue.

Signed-off-by: Xiaowei Yang <xiaowei.yang@intel.com>
Signed-off-by: Eddie Dong <eddie.dong@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Sun Mar 19 18:52:20 2006 +0100 (2006-03-19)
parents 3f8123ae34ba
children c6557cad2670
line source
1 /*
2 * vmx.h: VMX Architecture related definitions
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
19 #ifndef __ASM_X86_HVM_VMX_VMX_H__
20 #define __ASM_X86_HVM_VMX_VMX_H__
22 #include <xen/sched.h>
23 #include <asm/types.h>
24 #include <asm/regs.h>
25 #include <asm/processor.h>
26 #include <asm/hvm/vmx/vmcs.h>
27 #include <asm/i387.h>
29 extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
30 extern void vmx_asm_do_resume(void);
31 extern void vmx_asm_do_launch(void);
32 extern void vmx_intr_assist(void);
33 extern void vmx_migrate_timers(struct vcpu *v);
34 extern void arch_vmx_do_launch(struct vcpu *);
35 extern void arch_vmx_do_resume(struct vcpu *);
36 extern void set_guest_time(struct vcpu *v, u64 gtime);
37 extern u64 get_guest_time(struct vcpu *v);
39 extern unsigned int cpu_rev;
41 /*
42 * Need fill bits for SENTER
43 */
45 #define MONITOR_PIN_BASED_EXEC_CONTROLS_RESERVED_VALUE 0x00000016
47 #define MONITOR_PIN_BASED_EXEC_CONTROLS \
48 ( \
49 MONITOR_PIN_BASED_EXEC_CONTROLS_RESERVED_VALUE | \
50 PIN_BASED_EXT_INTR_MASK | \
51 PIN_BASED_NMI_EXITING \
52 )
54 #define MONITOR_CPU_BASED_EXEC_CONTROLS_RESERVED_VALUE 0x0401e172
56 #define _MONITOR_CPU_BASED_EXEC_CONTROLS \
57 ( \
58 MONITOR_CPU_BASED_EXEC_CONTROLS_RESERVED_VALUE | \
59 CPU_BASED_HLT_EXITING | \
60 CPU_BASED_INVDPG_EXITING | \
61 CPU_BASED_MWAIT_EXITING | \
62 CPU_BASED_MOV_DR_EXITING | \
63 CPU_BASED_ACTIVATE_IO_BITMAP | \
64 CPU_BASED_USE_TSC_OFFSETING | \
65 CPU_BASED_UNCOND_IO_EXITING \
66 )
68 #define MONITOR_CPU_BASED_EXEC_CONTROLS_IA32E_MODE \
69 ( \
70 CPU_BASED_CR8_LOAD_EXITING | \
71 CPU_BASED_CR8_STORE_EXITING \
72 )
74 #define MONITOR_VM_EXIT_CONTROLS_RESERVED_VALUE 0x0003edff
76 #define MONITOR_VM_EXIT_CONTROLS_IA32E_MODE 0x00000200
78 #define _MONITOR_VM_EXIT_CONTROLS \
79 ( \
80 MONITOR_VM_EXIT_CONTROLS_RESERVED_VALUE |\
81 VM_EXIT_ACK_INTR_ON_EXIT \
82 )
84 #if defined (__x86_64__)
85 #define MONITOR_CPU_BASED_EXEC_CONTROLS \
86 ( \
87 _MONITOR_CPU_BASED_EXEC_CONTROLS | \
88 MONITOR_CPU_BASED_EXEC_CONTROLS_IA32E_MODE \
89 )
90 #define MONITOR_VM_EXIT_CONTROLS \
91 ( \
92 _MONITOR_VM_EXIT_CONTROLS | \
93 MONITOR_VM_EXIT_CONTROLS_IA32E_MODE \
94 )
95 #else
96 #define MONITOR_CPU_BASED_EXEC_CONTROLS \
97 _MONITOR_CPU_BASED_EXEC_CONTROLS
99 #define MONITOR_VM_EXIT_CONTROLS \
100 _MONITOR_VM_EXIT_CONTROLS
101 #endif
103 #define VM_ENTRY_CONTROLS_RESERVED_VALUE 0x000011ff
104 #define VM_ENTRY_CONTROLS_IA32E_MODE 0x00000200
105 #define MONITOR_VM_ENTRY_CONTROLS VM_ENTRY_CONTROLS_RESERVED_VALUE
106 /*
107 * Exit Reasons
108 */
109 #define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
111 #define EXIT_REASON_EXCEPTION_NMI 0
112 #define EXIT_REASON_EXTERNAL_INTERRUPT 1
114 #define EXIT_REASON_PENDING_INTERRUPT 7
116 #define EXIT_REASON_TASK_SWITCH 9
117 #define EXIT_REASON_CPUID 10
118 #define EXIT_REASON_HLT 12
119 #define EXIT_REASON_INVLPG 14
120 #define EXIT_REASON_RDPMC 15
121 #define EXIT_REASON_RDTSC 16
122 #define EXIT_REASON_VMCALL 18
123 #define EXIT_REASON_VMCLEAR 19
124 #define EXIT_REASON_VMLAUNCH 20
125 #define EXIT_REASON_VMPTRLD 21
126 #define EXIT_REASON_VMPTRST 22
127 #define EXIT_REASON_VMREAD 23
128 #define EXIT_REASON_VMRESUME 24
129 #define EXIT_REASON_VMWRITE 25
130 #define EXIT_REASON_VMOFF 26
131 #define EXIT_REASON_VMON 27
132 #define EXIT_REASON_CR_ACCESS 28
133 #define EXIT_REASON_DR_ACCESS 29
134 #define EXIT_REASON_IO_INSTRUCTION 30
135 #define EXIT_REASON_MSR_READ 31
136 #define EXIT_REASON_MSR_WRITE 32
137 #define EXIT_REASON_MWAIT_INSTRUCTION 36
139 /*
140 * Interruption-information format
141 */
142 #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */
143 #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */
144 #define INTR_INFO_DELIEVER_CODE_MASK 0x800 /* 11 */
145 #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */
147 #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
148 #define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */
150 /*
151 * Exit Qualifications for MOV for Control Register Access
152 */
153 #define CONTROL_REG_ACCESS_NUM 0x7 /* 2:0, number of control register */
154 #define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */
155 #define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose register */
156 #define LMSW_SOURCE_DATA (0xFFFF << 16) /* 16:31 lmsw source */
157 #define REG_EAX (0 << 8)
158 #define REG_ECX (1 << 8)
159 #define REG_EDX (2 << 8)
160 #define REG_EBX (3 << 8)
161 #define REG_ESP (4 << 8)
162 #define REG_EBP (5 << 8)
163 #define REG_ESI (6 << 8)
164 #define REG_EDI (7 << 8)
165 #define REG_R8 (8 << 8)
166 #define REG_R9 (9 << 8)
167 #define REG_R10 (10 << 8)
168 #define REG_R11 (11 << 8)
169 #define REG_R12 (12 << 8)
170 #define REG_R13 (13 << 8)
171 #define REG_R14 (14 << 8)
172 #define REG_R15 (15 << 8)
174 /*
175 * Exit Qualifications for MOV for Debug Register Access
176 */
177 #define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug register */
178 #define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */
179 #define TYPE_MOV_TO_DR (0 << 4)
180 #define TYPE_MOV_FROM_DR (1 << 4)
181 #define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose register */
183 /* These bits in the CR4 are owned by the host */
184 #if CONFIG_PAGING_LEVELS >= 3
185 #define VMX_CR4_HOST_MASK (X86_CR4_VMXE | X86_CR4_PAE)
186 #else
187 #define VMX_CR4_HOST_MASK (X86_CR4_VMXE)
188 #endif
190 #define VMCALL_OPCODE ".byte 0x0f,0x01,0xc1\n"
191 #define VMCLEAR_OPCODE ".byte 0x66,0x0f,0xc7\n" /* reg/opcode: /6 */
192 #define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n"
193 #define VMPTRLD_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /6 */
194 #define VMPTRST_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /7 */
195 #define VMREAD_OPCODE ".byte 0x0f,0x78\n"
196 #define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3\n"
197 #define VMWRITE_OPCODE ".byte 0x0f,0x79\n"
198 #define VMXOFF_OPCODE ".byte 0x0f,0x01,0xc4\n"
199 #define VMXON_OPCODE ".byte 0xf3,0x0f,0xc7\n"
201 #define MODRM_EAX_06 ".byte 0x30\n" /* [EAX], with reg/opcode: /6 */
202 #define MODRM_EAX_07 ".byte 0x38\n" /* [EAX], with reg/opcode: /7 */
203 #define MODRM_EAX_ECX ".byte 0xc1\n" /* [EAX], [ECX] */
205 static inline int __vmptrld (u64 addr)
206 {
207 unsigned long eflags;
208 __asm__ __volatile__ ( VMPTRLD_OPCODE
209 MODRM_EAX_06
210 :
211 : "a" (&addr)
212 : "memory");
214 __save_flags(eflags);
215 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
216 return -1;
217 return 0;
218 }
220 static inline void __vmptrst (u64 addr)
221 {
222 __asm__ __volatile__ ( VMPTRST_OPCODE
223 MODRM_EAX_07
224 :
225 : "a" (&addr)
226 : "memory");
227 }
229 static inline int __vmpclear (u64 addr)
230 {
231 unsigned long eflags;
233 __asm__ __volatile__ ( VMCLEAR_OPCODE
234 MODRM_EAX_06
235 :
236 : "a" (&addr)
237 : "memory");
238 __save_flags(eflags);
239 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
240 return -1;
241 return 0;
242 }
244 #define __vmread(x, ptr) ___vmread((x), (ptr), sizeof(*(ptr)))
246 static always_inline int ___vmread (const unsigned long field, void *ptr, const int size)
247 {
248 unsigned long eflags;
249 unsigned long ecx = 0;
251 __asm__ __volatile__ ( VMREAD_OPCODE
252 MODRM_EAX_ECX
253 : "=c" (ecx)
254 : "a" (field)
255 : "memory");
257 switch (size) {
258 case 1:
259 *((u8 *) (ptr)) = ecx;
260 break;
261 case 2:
262 *((u16 *) (ptr)) = ecx;
263 break;
264 case 4:
265 *((u32 *) (ptr)) = ecx;
266 break;
267 case 8:
268 *((u64 *) (ptr)) = ecx;
269 break;
270 default:
271 domain_crash_synchronous();
272 break;
273 }
275 __save_flags(eflags);
276 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
277 return -1;
278 return 0;
279 }
282 static always_inline void __vmwrite_vcpu(struct vcpu *v, unsigned long field, unsigned long value)
283 {
284 switch(field) {
285 case CR0_READ_SHADOW:
286 v->arch.hvm_vmx.cpu_shadow_cr0 = value;
287 break;
288 case GUEST_CR0:
289 v->arch.hvm_vmx.cpu_cr0 = value;
290 break;
291 case CPU_BASED_VM_EXEC_CONTROL:
292 v->arch.hvm_vmx.cpu_based_exec_control = value;
293 break;
294 default:
295 printk("__vmwrite_cpu: invalid field %lx\n", field);
296 break;
297 }
298 }
300 static always_inline void __vmread_vcpu(struct vcpu *v, unsigned long field, unsigned long *value)
301 {
302 switch(field) {
303 case CR0_READ_SHADOW:
304 *value = v->arch.hvm_vmx.cpu_shadow_cr0;
305 break;
306 case GUEST_CR0:
307 *value = v->arch.hvm_vmx.cpu_cr0;
308 break;
309 case CPU_BASED_VM_EXEC_CONTROL:
310 *value = v->arch.hvm_vmx.cpu_based_exec_control;
311 break;
312 default:
313 printk("__vmread_cpu: invalid field %lx\n", field);
314 break;
315 }
316 }
318 static inline int __vmwrite (unsigned long field, unsigned long value)
319 {
320 unsigned long eflags;
321 struct vcpu *v = current;
323 __asm__ __volatile__ ( VMWRITE_OPCODE
324 MODRM_EAX_ECX
325 :
326 : "a" (field) , "c" (value)
327 : "memory");
328 __save_flags(eflags);
329 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
330 return -1;
332 switch(field) {
333 case CR0_READ_SHADOW:
334 case GUEST_CR0:
335 case CPU_BASED_VM_EXEC_CONTROL:
336 __vmwrite_vcpu(v, field, value);
337 break;
338 }
340 return 0;
341 }
343 static inline int __vm_set_bit(unsigned long field, unsigned long mask)
344 {
345 unsigned long tmp;
346 int err = 0;
348 err |= __vmread(field, &tmp);
349 tmp |= mask;
350 err |= __vmwrite(field, tmp);
352 return err;
353 }
355 static inline int __vm_clear_bit(unsigned long field, unsigned long mask)
356 {
357 unsigned long tmp;
358 int err = 0;
360 err |= __vmread(field, &tmp);
361 tmp &= ~mask;
362 err |= __vmwrite(field, tmp);
364 return err;
365 }
367 static inline void __vmxoff (void)
368 {
369 __asm__ __volatile__ ( VMXOFF_OPCODE
370 ::: "memory");
371 }
373 static inline int __vmxon (u64 addr)
374 {
375 unsigned long eflags;
377 __asm__ __volatile__ ( VMXON_OPCODE
378 MODRM_EAX_06
379 :
380 : "a" (&addr)
381 : "memory");
382 __save_flags(eflags);
383 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
384 return -1;
385 return 0;
386 }
388 /* Make sure that xen intercepts any FP accesses from current */
389 static inline void vmx_stts(void)
390 {
391 unsigned long cr0;
392 struct vcpu *v = current;
394 /* FPU state already dirty? Then no need to setup_fpu() lazily. */
395 if ( test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
396 return;
398 /*
399 * If the guest does not have TS enabled then we must cause and handle an
400 * exception on first use of the FPU. If the guest *does* have TS enabled
401 * then this is not necessary: no FPU activity can occur until the guest
402 * clears CR0.TS, and we will initialise the FPU when that happens.
403 */
404 __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
405 if ( !(cr0 & X86_CR0_TS) )
406 {
407 __vmread_vcpu(v, GUEST_CR0, &cr0);
408 __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS);
409 __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
410 }
411 }
413 /* Works only for vcpu == current */
414 static inline int vmx_paging_enabled(struct vcpu *v)
415 {
416 unsigned long cr0;
418 __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
419 return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
420 }
422 static inline int vmx_pgbit_test(struct vcpu *v)
423 {
424 unsigned long cr0;
426 __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
427 return (cr0 & X86_CR0_PG);
428 }
430 static inline int __vmx_inject_exception(struct vcpu *v, int trap, int type,
431 int error_code)
432 {
433 unsigned long intr_fields;
435 /* Reflect it back into the guest */
436 intr_fields = (INTR_INFO_VALID_MASK | type | trap);
437 if (error_code != VMX_DELIVER_NO_ERROR_CODE) {
438 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
439 intr_fields |= INTR_INFO_DELIEVER_CODE_MASK;
440 }
442 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
443 return 0;
444 }
446 static inline int vmx_inject_exception(struct vcpu *v, int trap, int error_code)
447 {
448 return __vmx_inject_exception(v, trap, INTR_TYPE_EXCEPTION, error_code);
449 }
451 static inline int vmx_inject_extint(struct vcpu *v, int trap, int error_code)
452 {
453 __vmx_inject_exception(v, trap, INTR_TYPE_EXT_INTR, error_code);
454 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
456 return 0;
457 }
459 static inline int vmx_reflect_exception(struct vcpu *v)
460 {
461 int error_code, vector;
463 __vmread(VM_EXIT_INTR_INFO, &vector);
464 if (vector & INTR_INFO_DELIEVER_CODE_MASK)
465 __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
466 else
467 error_code = VMX_DELIVER_NO_ERROR_CODE;
468 vector &= 0xff;
470 #ifndef NDEBUG
471 {
472 unsigned long eip;
474 __vmread(GUEST_RIP, &eip);
475 HVM_DBG_LOG(DBG_LEVEL_1,
476 "vmx_reflect_exception: eip = %lx, error_code = %x",
477 eip, error_code);
478 }
479 #endif /* NDEBUG */
481 vmx_inject_exception(v, vector, error_code);
482 return 0;
483 }
485 #endif /* __ASM_X86_HVM_VMX_VMX_H__ */