ia64/xen-unstable

view xen/include/asm-x86/vmx.h @ 6557:df589c4cf1ad

Update __vmread()
To make it not to break the stack.

Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Chengyuan Li <chengyuan.li@intel.com>
author lcy@vtsmp-build.sh.intel.com
date Sun Aug 28 14:55:37 2005 +0800 (2005-08-28)
parents dfaf788ab18c
children 0bc9e2af21c8
line source
1 /*
2 * vmx.h: VMX Architecture related definitions
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
19 #ifndef __ASM_X86_VMX_H__
20 #define __ASM_X86_VMX_H__
22 #include <xen/sched.h>
23 #include <asm/types.h>
24 #include <asm/regs.h>
25 #include <asm/processor.h>
26 #include <asm/vmx_vmcs.h>
27 #include <asm/i387.h>
29 #include <public/io/ioreq.h>
31 extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
32 extern void vmx_asm_do_resume(void);
33 extern void vmx_asm_do_launch(void);
34 extern void vmx_intr_assist(void);
36 extern void arch_vmx_do_launch(struct vcpu *);
37 extern void arch_vmx_do_resume(struct vcpu *);
38 extern void arch_vmx_do_relaunch(struct vcpu *);
40 extern int vmcs_size;
41 extern unsigned int cpu_rev;
43 /*
44 * Need fill bits for SENTER
45 */
47 #define MONITOR_PIN_BASED_EXEC_CONTROLS_RESERVED_VALUE 0x00000016
49 #define MONITOR_PIN_BASED_EXEC_CONTROLS \
50 ( \
51 MONITOR_PIN_BASED_EXEC_CONTROLS_RESERVED_VALUE | \
52 PIN_BASED_EXT_INTR_MASK | \
53 PIN_BASED_NMI_EXITING \
54 )
56 #define MONITOR_CPU_BASED_EXEC_CONTROLS_RESERVED_VALUE 0x0401e172
58 #define _MONITOR_CPU_BASED_EXEC_CONTROLS \
59 ( \
60 MONITOR_CPU_BASED_EXEC_CONTROLS_RESERVED_VALUE | \
61 CPU_BASED_HLT_EXITING | \
62 CPU_BASED_INVDPG_EXITING | \
63 CPU_BASED_MWAIT_EXITING | \
64 CPU_BASED_MOV_DR_EXITING | \
65 CPU_BASED_ACTIVATE_IO_BITMAP | \
66 CPU_BASED_UNCOND_IO_EXITING \
67 )
69 #define MONITOR_CPU_BASED_EXEC_CONTROLS_IA32E_MODE \
70 ( \
71 CPU_BASED_CR8_LOAD_EXITING | \
72 CPU_BASED_CR8_STORE_EXITING \
73 )
75 #define MONITOR_VM_EXIT_CONTROLS_RESERVED_VALUE 0x0003edff
77 #define MONITOR_VM_EXIT_CONTROLS_IA32E_MODE 0x00000200
79 #define _MONITOR_VM_EXIT_CONTROLS \
80 ( \
81 MONITOR_VM_EXIT_CONTROLS_RESERVED_VALUE |\
82 VM_EXIT_ACK_INTR_ON_EXIT \
83 )
85 #if defined (__x86_64__)
86 #define MONITOR_CPU_BASED_EXEC_CONTROLS \
87 ( \
88 _MONITOR_CPU_BASED_EXEC_CONTROLS | \
89 MONITOR_CPU_BASED_EXEC_CONTROLS_IA32E_MODE \
90 )
91 #define MONITOR_VM_EXIT_CONTROLS \
92 ( \
93 _MONITOR_VM_EXIT_CONTROLS | \
94 MONITOR_VM_EXIT_CONTROLS_IA32E_MODE \
95 )
96 #else
97 #define MONITOR_CPU_BASED_EXEC_CONTROLS \
98 _MONITOR_CPU_BASED_EXEC_CONTROLS
100 #define MONITOR_VM_EXIT_CONTROLS \
101 _MONITOR_VM_EXIT_CONTROLS
102 #endif
104 #define VM_ENTRY_CONTROLS_RESERVED_VALUE 0x000011ff
105 #define VM_ENTRY_CONTROLS_IA32E_MODE 0x00000200
106 #define MONITOR_VM_ENTRY_CONTROLS VM_ENTRY_CONTROLS_RESERVED_VALUE
107 /*
108 * Exit Reasons
109 */
110 #define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
112 #define EXIT_REASON_EXCEPTION_NMI 0
113 #define EXIT_REASON_EXTERNAL_INTERRUPT 1
115 #define EXIT_REASON_PENDING_INTERRUPT 7
117 #define EXIT_REASON_TASK_SWITCH 9
118 #define EXIT_REASON_CPUID 10
119 #define EXIT_REASON_HLT 12
120 #define EXIT_REASON_INVLPG 14
121 #define EXIT_REASON_RDPMC 15
122 #define EXIT_REASON_RDTSC 16
123 #define EXIT_REASON_VMCALL 18
125 #define EXIT_REASON_CR_ACCESS 28
126 #define EXIT_REASON_DR_ACCESS 29
127 #define EXIT_REASON_IO_INSTRUCTION 30
128 #define EXIT_REASON_MSR_READ 31
129 #define EXIT_REASON_MSR_WRITE 32
130 #define EXIT_REASON_MWAIT_INSTRUCTION 36
132 /*
133 * Interruption-information format
134 */
135 #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */
136 #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */
137 #define INTR_INFO_DELIEVER_CODE_MASK 0x800 /* 11 */
138 #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */
140 #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
141 #define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */
143 /*
144 * Exit Qualifications for MOV for Control Register Access
145 */
146 #define CONTROL_REG_ACCESS_NUM 0x7 /* 2:0, number of control register */
147 #define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */
148 #define TYPE_MOV_TO_CR (0 << 4)
149 #define TYPE_MOV_FROM_CR (1 << 4)
150 #define TYPE_CLTS (2 << 4)
151 #define TYPE_LMSW (3 << 4)
152 #define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose register */
153 #define LMSW_SOURCE_DATA (0xFFFF << 16) /* 16:31 lmsw source */
154 #define REG_EAX (0 << 8)
155 #define REG_ECX (1 << 8)
156 #define REG_EDX (2 << 8)
157 #define REG_EBX (3 << 8)
158 #define REG_ESP (4 << 8)
159 #define REG_EBP (5 << 8)
160 #define REG_ESI (6 << 8)
161 #define REG_EDI (7 << 8)
162 #define REG_R8 (8 << 8)
163 #define REG_R9 (9 << 8)
164 #define REG_R10 (10 << 8)
165 #define REG_R11 (11 << 8)
166 #define REG_R12 (12 << 8)
167 #define REG_R13 (13 << 8)
168 #define REG_R14 (14 << 8)
169 #define REG_R15 (15 << 8)
171 /*
172 * Exit Qualifications for MOV for Debug Register Access
173 */
174 #define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug register */
175 #define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */
176 #define TYPE_MOV_TO_DR (0 << 4)
177 #define TYPE_MOV_FROM_DR (1 << 4)
178 #define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose register */
180 #define EXCEPTION_BITMAP_DE (1 << 0) /* Divide Error */
181 #define EXCEPTION_BITMAP_DB (1 << 1) /* Debug */
182 #define EXCEPTION_BITMAP_NMI (1 << 2) /* NMI */
183 #define EXCEPTION_BITMAP_BP (1 << 3) /* Breakpoint */
184 #define EXCEPTION_BITMAP_OF (1 << 4) /* Overflow */
185 #define EXCEPTION_BITMAP_BR (1 << 5) /* BOUND Range Exceeded */
186 #define EXCEPTION_BITMAP_UD (1 << 6) /* Invalid Opcode */
187 #define EXCEPTION_BITMAP_NM (1 << 7) /* Device Not Available */
188 #define EXCEPTION_BITMAP_DF (1 << 8) /* Double Fault */
189 /* reserved */
190 #define EXCEPTION_BITMAP_TS (1 << 10) /* Invalid TSS */
191 #define EXCEPTION_BITMAP_NP (1 << 11) /* Segment Not Present */
192 #define EXCEPTION_BITMAP_SS (1 << 12) /* Stack-Segment Fault */
193 #define EXCEPTION_BITMAP_GP (1 << 13) /* General Protection */
194 #define EXCEPTION_BITMAP_PG (1 << 14) /* Page Fault */
195 #define EXCEPTION_BITMAP_MF (1 << 16) /* x87 FPU Floating-Point Error (Math Fault) */
196 #define EXCEPTION_BITMAP_AC (1 << 17) /* Alignment Check */
197 #define EXCEPTION_BITMAP_MC (1 << 18) /* Machine Check */
198 #define EXCEPTION_BITMAP_XF (1 << 19) /* SIMD Floating-Point Exception */
200 /* Pending Debug exceptions */
202 #define PENDING_DEBUG_EXC_BP (1 << 12) /* break point */
203 #define PENDING_DEBUG_EXC_BS (1 << 14) /* Single step */
205 #ifdef XEN_DEBUGGER
206 #define MONITOR_DEFAULT_EXCEPTION_BITMAP \
207 ( EXCEPTION_BITMAP_PG | \
208 EXCEPTION_BITMAP_DB | \
209 EXCEPTION_BITMAP_BP | \
210 EXCEPTION_BITMAP_GP )
211 #else
212 #define MONITOR_DEFAULT_EXCEPTION_BITMAP \
213 ( EXCEPTION_BITMAP_PG | \
214 EXCEPTION_BITMAP_GP )
215 #endif
217 /* These bits in the CR4 are owned by the host */
218 #ifdef __i386__
219 #define VMX_CR4_HOST_MASK (X86_CR4_VMXE)
220 #else
221 #define VMX_CR4_HOST_MASK (X86_CR4_VMXE | X86_CR4_PAE)
222 #endif
224 #define VMCALL_OPCODE ".byte 0x0f,0x01,0xc1\n"
225 #define VMCLEAR_OPCODE ".byte 0x66,0x0f,0xc7\n" /* reg/opcode: /6 */
226 #define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n"
227 #define VMPTRLD_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /6 */
228 #define VMPTRST_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /7 */
229 #define VMREAD_OPCODE ".byte 0x0f,0x78\n"
230 #define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3\n"
231 #define VMWRITE_OPCODE ".byte 0x0f,0x79\n"
232 #define VMXOFF_OPCODE ".byte 0x0f,0x01,0xc4\n"
233 #define VMXON_OPCODE ".byte 0xf3,0x0f,0xc7\n"
235 #define MODRM_EAX_06 ".byte 0x30\n" /* [EAX], with reg/opcode: /6 */
236 #define MODRM_EAX_07 ".byte 0x38\n" /* [EAX], with reg/opcode: /7 */
237 #define MODRM_EAX_ECX ".byte 0xc1\n" /* [EAX], [ECX] */
239 static inline int __vmptrld (u64 addr)
240 {
241 unsigned long eflags;
242 __asm__ __volatile__ ( VMPTRLD_OPCODE
243 MODRM_EAX_06
244 :
245 : "a" (&addr)
246 : "memory");
248 __save_flags(eflags);
249 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
250 return -1;
251 return 0;
252 }
254 static inline void __vmptrst (u64 addr)
255 {
256 __asm__ __volatile__ ( VMPTRST_OPCODE
257 MODRM_EAX_07
258 :
259 : "a" (&addr)
260 : "memory");
261 }
263 static inline int __vmpclear (u64 addr)
264 {
265 unsigned long eflags;
267 __asm__ __volatile__ ( VMCLEAR_OPCODE
268 MODRM_EAX_06
269 :
270 : "a" (&addr)
271 : "memory");
272 __save_flags(eflags);
273 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
274 return -1;
275 return 0;
276 }
278 #define __vmread(x, ptr) ___vmread((x), (ptr), sizeof(*(ptr)))
280 static always_inline int ___vmread (const unsigned long field, void *ptr, const int size)
281 {
282 unsigned long eflags;
283 unsigned long ecx = 0;
285 __asm__ __volatile__ ( VMREAD_OPCODE
286 MODRM_EAX_ECX
287 : "=c" (ecx)
288 : "a" (field)
289 : "memory");
291 switch (size) {
292 case 1:
293 *((u8 *) (ptr)) = ecx;
294 break;
295 case 2:
296 *((u16 *) (ptr)) = ecx;
297 break;
298 case 4:
299 *((u32 *) (ptr)) = ecx;
300 break;
301 case 8:
302 *((u64 *) (ptr)) = ecx;
303 break;
304 default:
305 domain_crash_synchronous();
306 break;
307 }
309 __save_flags(eflags);
310 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
311 return -1;
312 return 0;
313 }
315 static inline int __vmwrite (unsigned long field, unsigned long value)
316 {
317 unsigned long eflags;
319 __asm__ __volatile__ ( VMWRITE_OPCODE
320 MODRM_EAX_ECX
321 :
322 : "a" (field) , "c" (value)
323 : "memory");
324 __save_flags(eflags);
325 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
326 return -1;
327 return 0;
328 }
330 static inline int __vm_set_bit(unsigned long field, unsigned long mask)
331 {
332 unsigned long tmp;
333 int err = 0;
335 err |= __vmread(field, &tmp);
336 tmp |= mask;
337 err |= __vmwrite(field, tmp);
339 return err;
340 }
342 static inline int __vm_clear_bit(unsigned long field, unsigned long mask)
343 {
344 unsigned long tmp;
345 int err = 0;
347 err |= __vmread(field, &tmp);
348 tmp &= ~mask;
349 err |= __vmwrite(field, tmp);
351 return err;
352 }
354 static inline void __vmxoff (void)
355 {
356 __asm__ __volatile__ ( VMXOFF_OPCODE
357 ::: "memory");
358 }
360 static inline int __vmxon (u64 addr)
361 {
362 unsigned long eflags;
364 __asm__ __volatile__ ( VMXON_OPCODE
365 MODRM_EAX_06
366 :
367 : "a" (&addr)
368 : "memory");
369 __save_flags(eflags);
370 if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
371 return -1;
372 return 0;
373 }
375 /* Make sure that xen intercepts any FP accesses from current */
376 static inline void vmx_stts(void)
377 {
378 unsigned long cr0;
380 __vmread(GUEST_CR0, &cr0);
381 if (!(cr0 & X86_CR0_TS))
382 __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS);
384 __vmread(CR0_READ_SHADOW, &cr0);
385 if (!(cr0 & X86_CR0_TS))
386 __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
387 }
389 /* Works only for ed == current */
390 static inline int vmx_paging_enabled(struct vcpu *v)
391 {
392 unsigned long cr0;
394 __vmread(CR0_READ_SHADOW, &cr0);
395 return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
396 }
398 #define VMX_INVALID_ERROR_CODE -1
400 static inline int __vmx_inject_exception(struct vcpu *v, int trap, int type,
401 int error_code)
402 {
403 unsigned long intr_fields;
405 /* Reflect it back into the guest */
406 intr_fields = (INTR_INFO_VALID_MASK | type | trap);
407 if (error_code != VMX_INVALID_ERROR_CODE) {
408 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
409 intr_fields |= INTR_INFO_DELIEVER_CODE_MASK;
410 }
412 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
413 return 0;
414 }
416 static inline int vmx_inject_exception(struct vcpu *v, int trap, int error_code)
417 {
418 return __vmx_inject_exception(v, trap, INTR_TYPE_EXCEPTION, error_code);
419 }
421 static inline int vmx_inject_extint(struct vcpu *v, int trap, int error_code)
422 {
423 __vmx_inject_exception(v, trap, INTR_TYPE_EXT_INTR, error_code);
424 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
426 return 0;
427 }
429 static inline int vmx_reflect_exception(struct vcpu *v)
430 {
431 int error_code, vector;
433 __vmread(VM_EXIT_INTR_INFO, &vector);
434 if (vector & INTR_INFO_DELIEVER_CODE_MASK)
435 __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
436 else
437 error_code = VMX_INVALID_ERROR_CODE;
438 vector &= 0xff;
440 #ifndef NDEBUG
441 {
442 unsigned long eip;
444 __vmread(GUEST_RIP, &eip);
445 VMX_DBG_LOG(DBG_LEVEL_1,
446 "vmx_reflect_exception: eip = %lx, error_code = %x",
447 eip, error_code);
448 }
449 #endif /* NDEBUG */
451 vmx_inject_exception(v, vector, error_code);
452 return 0;
453 }
455 static inline shared_iopage_t *get_sp(struct domain *d)
456 {
457 return (shared_iopage_t *) d->arch.vmx_platform.shared_page_va;
458 }
460 static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
461 {
462 return &get_sp(d)->vcpu_iodata[cpu];
463 }
465 static inline int iopacket_port(struct domain *d)
466 {
467 return get_sp(d)->sp_global.eport;
468 }
470 /* Prototypes */
471 void load_cpu_user_regs(struct cpu_user_regs *regs);
472 void store_cpu_user_regs(struct cpu_user_regs *regs);
474 #endif /* __ASM_X86_VMX_H__ */