ia64/xen-unstable

view xen/arch/x86/vmx.c @ 7340:c05d5e85ded2

Hide the VMX cpu feature from unmodified Guests.

Signed-Off-By: Nitin A Kamble <nitin.a.kamble@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Oct 12 09:27:28 2005 +0100 (2005-10-12)
parents 801b2bd7ef2e
children 52b9aca1916a
line source
1 /*
2 * vmx.c: handling VMX architecture-related VM exits
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
20 #include <xen/config.h>
21 #include <xen/init.h>
22 #include <xen/lib.h>
23 #include <xen/trace.h>
24 #include <xen/sched.h>
25 #include <xen/irq.h>
26 #include <xen/softirq.h>
27 #include <xen/domain_page.h>
28 #include <asm/current.h>
29 #include <asm/io.h>
30 #include <asm/shadow.h>
31 #include <asm/regs.h>
32 #include <asm/cpufeature.h>
33 #include <asm/processor.h>
34 #include <asm/types.h>
35 #include <asm/msr.h>
36 #include <asm/spinlock.h>
37 #include <asm/vmx.h>
38 #include <asm/vmx_vmcs.h>
39 #include <asm/vmx_intercept.h>
40 #include <asm/shadow.h>
41 #if CONFIG_PAGING_LEVELS >= 3
42 #include <asm/shadow_64.h>
43 #endif
44 #include <public/sched.h>
45 #include <public/io/ioreq.h>
47 int hvm_enabled;
49 #ifdef CONFIG_VMX
50 unsigned int opt_vmx_debug_level = 0;
51 integer_param("vmx_debug", opt_vmx_debug_level);
53 #ifdef TRACE_BUFFER
54 static unsigned long trace_values[NR_CPUS][4];
55 #define TRACE_VMEXIT(index,value) trace_values[current->processor][index]=value
56 #else
57 #define TRACE_VMEXIT(index,value) ((void)0)
58 #endif
60 #ifdef __x86_64__
61 static struct msr_state percpu_msr[NR_CPUS];
63 static u32 msr_data_index[VMX_MSR_COUNT] =
64 {
65 MSR_LSTAR, MSR_STAR, MSR_CSTAR,
66 MSR_SYSCALL_MASK, MSR_EFER,
67 };
69 /*
70 * To avoid MSR save/restore at every VM exit/entry time, we restore
71 * the x86_64 specific MSRs at domain switch time. Since those MSRs are
72 * are not modified once set for generic domains, we don't save them,
73 * but simply reset them to the values set at percpu_traps_init().
74 */
75 void vmx_load_msrs(struct vcpu *n)
76 {
77 struct msr_state *host_state;
78 host_state = &percpu_msr[smp_processor_id()];
80 while (host_state->flags){
81 int i;
83 i = find_first_set_bit(host_state->flags);
84 wrmsrl(msr_data_index[i], host_state->msr_items[i]);
85 clear_bit(i, &host_state->flags);
86 }
87 }
89 static void vmx_save_init_msrs(void)
90 {
91 struct msr_state *host_state;
92 host_state = &percpu_msr[smp_processor_id()];
93 int i;
95 for (i = 0; i < VMX_MSR_COUNT; i++)
96 rdmsrl(msr_data_index[i], host_state->msr_items[i]);
97 }
99 #define CASE_READ_MSR(address) \
100 case MSR_ ## address: \
101 msr_content = msr->msr_items[VMX_INDEX_MSR_ ## address]; \
102 break
104 #define CASE_WRITE_MSR(address) \
105 case MSR_ ## address: \
106 { \
107 msr->msr_items[VMX_INDEX_MSR_ ## address] = msr_content; \
108 if (!test_bit(VMX_INDEX_MSR_ ## address, &msr->flags)) { \
109 set_bit(VMX_INDEX_MSR_ ## address, &msr->flags); \
110 } \
111 wrmsrl(MSR_ ## address, msr_content); \
112 set_bit(VMX_INDEX_MSR_ ## address, &host_state->flags); \
113 } \
114 break
116 #define IS_CANO_ADDRESS(add) 1
117 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
118 {
119 u64 msr_content = 0;
120 struct vcpu *vc = current;
121 struct msr_state * msr = &vc->arch.arch_vmx.msr_content;
122 switch(regs->ecx){
123 case MSR_EFER:
124 msr_content = msr->msr_items[VMX_INDEX_MSR_EFER];
125 VMX_DBG_LOG(DBG_LEVEL_2, "EFER msr_content %llx\n", (unsigned long long)msr_content);
126 if (test_bit(VMX_CPU_STATE_LME_ENABLED,
127 &vc->arch.arch_vmx.cpu_state))
128 msr_content |= 1 << _EFER_LME;
130 if (VMX_LONG_GUEST(vc))
131 msr_content |= 1 << _EFER_LMA;
132 break;
133 case MSR_FS_BASE:
134 if (!(VMX_LONG_GUEST(vc)))
135 /* XXX should it be GP fault */
136 domain_crash();
137 __vmread(GUEST_FS_BASE, &msr_content);
138 break;
139 case MSR_GS_BASE:
140 if (!(VMX_LONG_GUEST(vc)))
141 domain_crash();
142 __vmread(GUEST_GS_BASE, &msr_content);
143 break;
144 case MSR_SHADOW_GS_BASE:
145 msr_content = msr->shadow_gs;
146 break;
148 CASE_READ_MSR(STAR);
149 CASE_READ_MSR(LSTAR);
150 CASE_READ_MSR(CSTAR);
151 CASE_READ_MSR(SYSCALL_MASK);
152 default:
153 return 0;
154 }
155 VMX_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %lx\n", msr_content);
156 regs->eax = msr_content & 0xffffffff;
157 regs->edx = msr_content >> 32;
158 return 1;
159 }
161 static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
162 {
163 u64 msr_content = regs->eax | ((u64)regs->edx << 32);
164 struct vcpu *vc = current;
165 struct msr_state * msr = &vc->arch.arch_vmx.msr_content;
166 struct msr_state * host_state =
167 &percpu_msr[smp_processor_id()];
169 VMX_DBG_LOG(DBG_LEVEL_1, " mode_do_msr_write msr %lx msr_content %lx\n",
170 regs->ecx, msr_content);
172 switch (regs->ecx){
173 case MSR_EFER:
174 if ((msr_content & EFER_LME) ^
175 test_bit(VMX_CPU_STATE_LME_ENABLED,
176 &vc->arch.arch_vmx.cpu_state)){
177 if (test_bit(VMX_CPU_STATE_PG_ENABLED,
178 &vc->arch.arch_vmx.cpu_state) ||
179 !test_bit(VMX_CPU_STATE_PAE_ENABLED,
180 &vc->arch.arch_vmx.cpu_state)){
181 vmx_inject_exception(vc, TRAP_gp_fault, 0);
182 }
183 }
184 if (msr_content & EFER_LME)
185 set_bit(VMX_CPU_STATE_LME_ENABLED,
186 &vc->arch.arch_vmx.cpu_state);
187 /* No update for LME/LMA since it have no effect */
188 msr->msr_items[VMX_INDEX_MSR_EFER] =
189 msr_content;
190 if (msr_content & ~(EFER_LME | EFER_LMA)){
191 msr->msr_items[VMX_INDEX_MSR_EFER] = msr_content;
192 if (!test_bit(VMX_INDEX_MSR_EFER, &msr->flags)){
193 rdmsrl(MSR_EFER,
194 host_state->msr_items[VMX_INDEX_MSR_EFER]);
195 set_bit(VMX_INDEX_MSR_EFER, &host_state->flags);
196 set_bit(VMX_INDEX_MSR_EFER, &msr->flags);
197 wrmsrl(MSR_EFER, msr_content);
198 }
199 }
200 break;
202 case MSR_FS_BASE:
203 case MSR_GS_BASE:
204 if (!(VMX_LONG_GUEST(vc)))
205 domain_crash();
206 if (!IS_CANO_ADDRESS(msr_content)){
207 VMX_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
208 vmx_inject_exception(vc, TRAP_gp_fault, 0);
209 }
210 if (regs->ecx == MSR_FS_BASE)
211 __vmwrite(GUEST_FS_BASE, msr_content);
212 else
213 __vmwrite(GUEST_GS_BASE, msr_content);
214 break;
216 case MSR_SHADOW_GS_BASE:
217 if (!(VMX_LONG_GUEST(vc)))
218 domain_crash();
219 vc->arch.arch_vmx.msr_content.shadow_gs = msr_content;
220 wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
221 break;
223 CASE_WRITE_MSR(STAR);
224 CASE_WRITE_MSR(LSTAR);
225 CASE_WRITE_MSR(CSTAR);
226 CASE_WRITE_MSR(SYSCALL_MASK);
227 default:
228 return 0;
229 }
230 return 1;
231 }
233 void
234 vmx_restore_msrs(struct vcpu *v)
235 {
236 int i = 0;
237 struct msr_state *guest_state;
238 struct msr_state *host_state;
239 unsigned long guest_flags ;
241 guest_state = &v->arch.arch_vmx.msr_content;;
242 host_state = &percpu_msr[smp_processor_id()];
244 wrmsrl(MSR_SHADOW_GS_BASE, guest_state->shadow_gs);
245 guest_flags = guest_state->flags;
246 if (!guest_flags)
247 return;
249 while (guest_flags){
250 i = find_first_set_bit(guest_flags);
252 VMX_DBG_LOG(DBG_LEVEL_2,
253 "restore guest's index %d msr %lx with %lx\n",
254 i, (unsigned long) msr_data_index[i], (unsigned long) guest_state->msr_items[i]);
255 set_bit(i, &host_state->flags);
256 wrmsrl(msr_data_index[i], guest_state->msr_items[i]);
257 clear_bit(i, &guest_flags);
258 }
259 }
261 #else /* __i386__ */
262 #define vmx_save_init_msrs() ((void)0)
264 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs){
265 return 0;
266 }
267 static inline int long_mode_do_msr_write(struct cpu_user_regs *regs){
268 return 0;
269 }
270 #endif
272 extern long evtchn_send(int lport);
273 extern long do_block(void);
274 void do_nmi(struct cpu_user_regs *, unsigned long);
276 static int check_vmx_controls(ctrls, msr)
277 {
278 u32 vmx_msr_low, vmx_msr_high;
280 rdmsr(msr, vmx_msr_low, vmx_msr_high);
281 if (ctrls < vmx_msr_low || ctrls > vmx_msr_high) {
282 printk("Insufficient VMX capability 0x%x, "
283 "msr=0x%x,low=0x%8x,high=0x%x\n",
284 ctrls, msr, vmx_msr_low, vmx_msr_high);
285 return 0;
286 }
287 return 1;
288 }
290 int start_vmx(void)
291 {
292 struct vmcs_struct *vmcs;
293 u32 ecx;
294 u32 eax, edx;
295 u64 phys_vmcs; /* debugging */
297 /*
298 * Xen does not fill x86_capability words except 0.
299 */
300 ecx = cpuid_ecx(1);
301 boot_cpu_data.x86_capability[4] = ecx;
303 if (!(test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability)))
304 return 0;
306 rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx);
308 if (eax & IA32_FEATURE_CONTROL_MSR_LOCK) {
309 if ((eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) == 0x0) {
310 printk("VMX disabled by Feature Control MSR.\n");
311 return 0;
312 }
313 }
314 else {
315 wrmsr(IA32_FEATURE_CONTROL_MSR,
316 IA32_FEATURE_CONTROL_MSR_LOCK |
317 IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON, 0);
318 }
320 if (!check_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS,
321 MSR_IA32_VMX_PINBASED_CTLS_MSR))
322 return 0;
323 if (!check_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS,
324 MSR_IA32_VMX_PROCBASED_CTLS_MSR))
325 return 0;
326 if (!check_vmx_controls(MONITOR_VM_EXIT_CONTROLS,
327 MSR_IA32_VMX_EXIT_CTLS_MSR))
328 return 0;
329 if (!check_vmx_controls(MONITOR_VM_ENTRY_CONTROLS,
330 MSR_IA32_VMX_ENTRY_CTLS_MSR))
331 return 0;
333 set_in_cr4(X86_CR4_VMXE); /* Enable VMXE */
335 if (!(vmcs = alloc_vmcs())) {
336 printk("Failed to allocate VMCS\n");
337 return 0;
338 }
340 phys_vmcs = (u64) virt_to_phys(vmcs);
342 if (!(__vmxon(phys_vmcs))) {
343 printk("VMXON is done\n");
344 }
346 vmx_save_init_msrs();
348 hvm_enabled = 1;
350 return 1;
351 }
353 void stop_vmx(void)
354 {
355 if (read_cr4() & X86_CR4_VMXE)
356 __vmxoff();
357 }
359 /*
360 * Not all cases receive valid value in the VM-exit instruction length field.
361 */
362 #define __get_instruction_length(len) \
363 __vmread(VM_EXIT_INSTRUCTION_LEN, &(len)); \
364 if ((len) < 1 || (len) > 15) \
365 __vmx_bug(&regs);
367 static void inline __update_guest_eip(unsigned long inst_len)
368 {
369 unsigned long current_eip;
371 __vmread(GUEST_RIP, &current_eip);
372 __vmwrite(GUEST_RIP, current_eip + inst_len);
373 }
376 static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs)
377 {
378 unsigned long gpa; /* FIXME: PAE */
379 int result;
381 #if 0 /* keep for debugging */
382 {
383 unsigned long eip;
385 __vmread(GUEST_RIP, &eip);
386 VMX_DBG_LOG(DBG_LEVEL_VMMU,
387 "vmx_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
388 va, eip, (unsigned long)regs->error_code);
389 }
390 #endif
392 if (!vmx_paging_enabled(current)){
393 handle_mmio(va, va);
394 TRACE_VMEXIT (2,2);
395 return 1;
396 }
397 gpa = gva_to_gpa(va);
399 /* Use 1:1 page table to identify MMIO address space */
400 if ( mmio_space(gpa) ){
401 if (gpa >= 0xFEE00000) { /* workaround for local APIC */
402 u32 inst_len;
403 __vmread(VM_EXIT_INSTRUCTION_LEN, &(inst_len));
404 __update_guest_eip(inst_len);
405 return 1;
406 }
407 TRACE_VMEXIT (2,2);
408 handle_mmio(va, gpa);
409 return 1;
410 }
412 result = shadow_fault(va, regs);
413 TRACE_VMEXIT (2,result);
414 #if 0
415 if ( !result )
416 {
417 __vmread(GUEST_RIP, &eip);
418 printk("vmx pgfault to guest va=%lx eip=%lx\n", va, eip);
419 }
420 #endif
422 return result;
423 }
425 static void vmx_do_no_device_fault(void)
426 {
427 unsigned long cr0;
428 struct vcpu *v = current;
430 clts();
431 setup_fpu(current);
432 __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
433 if (!(cr0 & X86_CR0_TS)) {
434 __vmread_vcpu(v, GUEST_CR0, &cr0);
435 cr0 &= ~X86_CR0_TS;
436 __vmwrite(GUEST_CR0, cr0);
437 }
438 __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
439 }
442 static void vmx_vmexit_do_cpuid(unsigned long input, struct cpu_user_regs *regs)
443 {
444 unsigned int eax, ebx, ecx, edx;
445 unsigned long eip;
447 __vmread(GUEST_RIP, &eip);
449 VMX_DBG_LOG(DBG_LEVEL_1,
450 "do_cpuid: (eax) %lx, (ebx) %lx, (ecx) %lx, (edx) %lx,"
451 " (esi) %lx, (edi) %lx",
452 (unsigned long)regs->eax, (unsigned long)regs->ebx,
453 (unsigned long)regs->ecx, (unsigned long)regs->edx,
454 (unsigned long)regs->esi, (unsigned long)regs->edi);
456 cpuid(input, &eax, &ebx, &ecx, &edx);
458 if (input == 1) {
459 #ifdef __i386__
460 clear_bit(X86_FEATURE_PSE, &edx);
461 clear_bit(X86_FEATURE_PAE, &edx);
462 clear_bit(X86_FEATURE_PSE36, &edx);
463 #else
464 struct vcpu *v = current;
465 if (v->domain->arch.ops->guest_paging_levels == PAGING_L2)
466 {
467 clear_bit(X86_FEATURE_PSE, &edx);
468 clear_bit(X86_FEATURE_PAE, &edx);
469 clear_bit(X86_FEATURE_PSE36, &edx);
470 }
471 #endif
473 /* Unsupportable for virtualised CPUs. */
474 clear_bit(X86_FEATURE_VMXE & 31, &ecx);
475 clear_bit(X86_FEATURE_MWAIT & 31, &ecx);
476 }
478 regs->eax = (unsigned long) eax;
479 regs->ebx = (unsigned long) ebx;
480 regs->ecx = (unsigned long) ecx;
481 regs->edx = (unsigned long) edx;
483 VMX_DBG_LOG(DBG_LEVEL_1,
484 "vmx_vmexit_do_cpuid: eip: %lx, input: %lx, out:eax=%x, ebx=%x, ecx=%x, edx=%x",
485 eip, input, eax, ebx, ecx, edx);
487 }
489 #define CASE_GET_REG_P(REG, reg) \
490 case REG_ ## REG: reg_p = (unsigned long *)&(regs->reg); break
492 static void vmx_dr_access (unsigned long exit_qualification, struct cpu_user_regs *regs)
493 {
494 unsigned int reg;
495 unsigned long *reg_p = 0;
496 struct vcpu *v = current;
497 unsigned long eip;
499 __vmread(GUEST_RIP, &eip);
501 reg = exit_qualification & DEBUG_REG_ACCESS_NUM;
503 VMX_DBG_LOG(DBG_LEVEL_1,
504 "vmx_dr_access : eip=%lx, reg=%d, exit_qualification = %lx",
505 eip, reg, exit_qualification);
507 switch(exit_qualification & DEBUG_REG_ACCESS_REG) {
508 CASE_GET_REG_P(EAX, eax);
509 CASE_GET_REG_P(ECX, ecx);
510 CASE_GET_REG_P(EDX, edx);
511 CASE_GET_REG_P(EBX, ebx);
512 CASE_GET_REG_P(EBP, ebp);
513 CASE_GET_REG_P(ESI, esi);
514 CASE_GET_REG_P(EDI, edi);
515 case REG_ESP:
516 break;
517 default:
518 __vmx_bug(regs);
519 }
521 switch (exit_qualification & DEBUG_REG_ACCESS_TYPE) {
522 case TYPE_MOV_TO_DR:
523 /* don't need to check the range */
524 if (reg != REG_ESP)
525 v->arch.guest_context.debugreg[reg] = *reg_p;
526 else {
527 unsigned long value;
528 __vmread(GUEST_RSP, &value);
529 v->arch.guest_context.debugreg[reg] = value;
530 }
531 break;
532 case TYPE_MOV_FROM_DR:
533 if (reg != REG_ESP)
534 *reg_p = v->arch.guest_context.debugreg[reg];
535 else {
536 __vmwrite(GUEST_RSP, v->arch.guest_context.debugreg[reg]);
537 }
538 break;
539 }
540 }
542 /*
543 * Invalidate the TLB for va. Invalidate the shadow page corresponding
544 * the address va.
545 */
546 static void vmx_vmexit_do_invlpg(unsigned long va)
547 {
548 unsigned long eip;
549 struct vcpu *v = current;
551 __vmread(GUEST_RIP, &eip);
553 VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_vmexit_do_invlpg: eip=%lx, va=%lx",
554 eip, va);
556 /*
557 * We do the safest things first, then try to update the shadow
558 * copying from guest
559 */
560 shadow_invlpg(v, va);
561 }
563 static int check_for_null_selector(unsigned long eip)
564 {
565 unsigned char inst[MAX_INST_LEN];
566 unsigned long sel;
567 int i, inst_len;
568 int inst_copy_from_guest(unsigned char *, unsigned long, int);
570 __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
571 memset(inst, 0, MAX_INST_LEN);
572 if (inst_copy_from_guest(inst, eip, inst_len) != inst_len) {
573 printf("check_for_null_selector: get guest instruction failed\n");
574 domain_crash_synchronous();
575 }
577 for (i = 0; i < inst_len; i++) {
578 switch (inst[i]) {
579 case 0xf3: /* REPZ */
580 case 0xf2: /* REPNZ */
581 case 0xf0: /* LOCK */
582 case 0x66: /* data32 */
583 case 0x67: /* addr32 */
584 continue;
585 case 0x2e: /* CS */
586 __vmread(GUEST_CS_SELECTOR, &sel);
587 break;
588 case 0x36: /* SS */
589 __vmread(GUEST_SS_SELECTOR, &sel);
590 break;
591 case 0x26: /* ES */
592 __vmread(GUEST_ES_SELECTOR, &sel);
593 break;
594 case 0x64: /* FS */
595 __vmread(GUEST_FS_SELECTOR, &sel);
596 break;
597 case 0x65: /* GS */
598 __vmread(GUEST_GS_SELECTOR, &sel);
599 break;
600 case 0x3e: /* DS */
601 /* FALLTHROUGH */
602 default:
603 /* DS is the default */
604 __vmread(GUEST_DS_SELECTOR, &sel);
605 }
606 return sel == 0 ? 1 : 0;
607 }
609 return 0;
610 }
612 void send_pio_req(struct cpu_user_regs *regs, unsigned long port,
613 unsigned long count, int size, long value, int dir, int pvalid)
614 {
615 struct vcpu *v = current;
616 vcpu_iodata_t *vio;
617 ioreq_t *p;
619 vio = get_vio(v->domain, v->vcpu_id);
620 if (vio == NULL) {
621 printk("bad shared page: %lx\n", (unsigned long) vio);
622 domain_crash_synchronous();
623 }
625 if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
626 printf("VMX I/O has not yet completed\n");
627 domain_crash_synchronous();
628 }
629 set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
631 p = &vio->vp_ioreq;
632 p->dir = dir;
633 p->pdata_valid = pvalid;
635 p->type = IOREQ_TYPE_PIO;
636 p->size = size;
637 p->addr = port;
638 p->count = count;
639 p->df = regs->eflags & EF_DF ? 1 : 0;
641 if (pvalid) {
642 if (vmx_paging_enabled(current))
643 p->u.pdata = (void *) gva_to_gpa(value);
644 else
645 p->u.pdata = (void *) value; /* guest VA == guest PA */
646 } else
647 p->u.data = value;
649 p->state = STATE_IOREQ_READY;
651 if (vmx_portio_intercept(p)) {
652 /* no blocking & no evtchn notification */
653 clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
654 return;
655 }
657 evtchn_send(iopacket_port(v->domain));
658 vmx_wait_io();
659 }
661 static void vmx_io_instruction(struct cpu_user_regs *regs,
662 unsigned long exit_qualification, unsigned long inst_len)
663 {
664 struct mmio_op *mmio_opp;
665 unsigned long eip, cs, eflags;
666 unsigned long port, size, dir;
667 int vm86;
669 mmio_opp = &current->arch.arch_vmx.mmio_op;
670 mmio_opp->instr = INSTR_PIO;
671 mmio_opp->flags = 0;
673 __vmread(GUEST_RIP, &eip);
674 __vmread(GUEST_CS_SELECTOR, &cs);
675 __vmread(GUEST_RFLAGS, &eflags);
676 vm86 = eflags & X86_EFLAGS_VM ? 1 : 0;
678 VMX_DBG_LOG(DBG_LEVEL_1,
679 "vmx_io_instruction: vm86 %d, eip=%lx:%lx, "
680 "exit_qualification = %lx",
681 vm86, cs, eip, exit_qualification);
683 if (test_bit(6, &exit_qualification))
684 port = (exit_qualification >> 16) & 0xFFFF;
685 else
686 port = regs->edx & 0xffff;
687 TRACE_VMEXIT(2, port);
688 size = (exit_qualification & 7) + 1;
689 dir = test_bit(3, &exit_qualification); /* direction */
691 if (test_bit(4, &exit_qualification)) { /* string instruction */
692 unsigned long addr, count = 1;
693 int sign = regs->eflags & EF_DF ? -1 : 1;
695 __vmread(GUEST_LINEAR_ADDRESS, &addr);
697 /*
698 * In protected mode, guest linear address is invalid if the
699 * selector is null.
700 */
701 if (!vm86 && check_for_null_selector(eip))
702 addr = dir == IOREQ_WRITE ? regs->esi : regs->edi;
704 if (test_bit(5, &exit_qualification)) { /* "rep" prefix */
705 mmio_opp->flags |= REPZ;
706 count = vm86 ? regs->ecx & 0xFFFF : regs->ecx;
707 }
709 /*
710 * Handle string pio instructions that cross pages or that
711 * are unaligned. See the comments in vmx_platform.c/handle_mmio()
712 */
713 if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) {
714 unsigned long value = 0;
716 mmio_opp->flags |= OVERLAP;
717 if (dir == IOREQ_WRITE)
718 vmx_copy(&value, addr, size, VMX_COPY_IN);
719 send_pio_req(regs, port, 1, size, value, dir, 0);
720 } else {
721 if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK)) {
722 if (sign > 0)
723 count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;
724 else
725 count = (addr & ~PAGE_MASK) / size;
726 } else
727 __update_guest_eip(inst_len);
729 send_pio_req(regs, port, count, size, addr, dir, 1);
730 }
731 } else {
732 __update_guest_eip(inst_len);
733 send_pio_req(regs, port, 1, size, regs->eax, dir, 0);
734 }
735 }
737 int
738 vmx_copy(void *buf, unsigned long laddr, int size, int dir)
739 {
740 unsigned long gpa, mfn;
741 char *addr;
742 int count;
744 while (size > 0) {
745 count = PAGE_SIZE - (laddr & ~PAGE_MASK);
746 if (count > size)
747 count = size;
749 if (vmx_paging_enabled(current)) {
750 gpa = gva_to_gpa(laddr);
751 mfn = get_mfn_from_pfn(gpa >> PAGE_SHIFT);
752 } else
753 mfn = get_mfn_from_pfn(laddr >> PAGE_SHIFT);
754 if (mfn == INVALID_MFN)
755 return 0;
757 addr = (char *)map_domain_page(mfn) + (laddr & ~PAGE_MASK);
759 if (dir == VMX_COPY_IN)
760 memcpy(buf, addr, count);
761 else
762 memcpy(addr, buf, count);
764 unmap_domain_page(addr);
766 laddr += count;
767 buf += count;
768 size -= count;
769 }
771 return 1;
772 }
774 int
775 vmx_world_save(struct vcpu *v, struct vmx_assist_context *c)
776 {
777 unsigned long inst_len;
778 int error = 0;
780 error |= __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
781 error |= __vmread(GUEST_RIP, &c->eip);
782 c->eip += inst_len; /* skip transition instruction */
783 error |= __vmread(GUEST_RSP, &c->esp);
784 error |= __vmread(GUEST_RFLAGS, &c->eflags);
786 error |= __vmread(CR0_READ_SHADOW, &c->cr0);
787 c->cr3 = v->arch.arch_vmx.cpu_cr3;
788 error |= __vmread(CR4_READ_SHADOW, &c->cr4);
790 error |= __vmread(GUEST_IDTR_LIMIT, &c->idtr_limit);
791 error |= __vmread(GUEST_IDTR_BASE, &c->idtr_base);
793 error |= __vmread(GUEST_GDTR_LIMIT, &c->gdtr_limit);
794 error |= __vmread(GUEST_GDTR_BASE, &c->gdtr_base);
796 error |= __vmread(GUEST_CS_SELECTOR, &c->cs_sel);
797 error |= __vmread(GUEST_CS_LIMIT, &c->cs_limit);
798 error |= __vmread(GUEST_CS_BASE, &c->cs_base);
799 error |= __vmread(GUEST_CS_AR_BYTES, &c->cs_arbytes.bytes);
801 error |= __vmread(GUEST_DS_SELECTOR, &c->ds_sel);
802 error |= __vmread(GUEST_DS_LIMIT, &c->ds_limit);
803 error |= __vmread(GUEST_DS_BASE, &c->ds_base);
804 error |= __vmread(GUEST_DS_AR_BYTES, &c->ds_arbytes.bytes);
806 error |= __vmread(GUEST_ES_SELECTOR, &c->es_sel);
807 error |= __vmread(GUEST_ES_LIMIT, &c->es_limit);
808 error |= __vmread(GUEST_ES_BASE, &c->es_base);
809 error |= __vmread(GUEST_ES_AR_BYTES, &c->es_arbytes.bytes);
811 error |= __vmread(GUEST_SS_SELECTOR, &c->ss_sel);
812 error |= __vmread(GUEST_SS_LIMIT, &c->ss_limit);
813 error |= __vmread(GUEST_SS_BASE, &c->ss_base);
814 error |= __vmread(GUEST_SS_AR_BYTES, &c->ss_arbytes.bytes);
816 error |= __vmread(GUEST_FS_SELECTOR, &c->fs_sel);
817 error |= __vmread(GUEST_FS_LIMIT, &c->fs_limit);
818 error |= __vmread(GUEST_FS_BASE, &c->fs_base);
819 error |= __vmread(GUEST_FS_AR_BYTES, &c->fs_arbytes.bytes);
821 error |= __vmread(GUEST_GS_SELECTOR, &c->gs_sel);
822 error |= __vmread(GUEST_GS_LIMIT, &c->gs_limit);
823 error |= __vmread(GUEST_GS_BASE, &c->gs_base);
824 error |= __vmread(GUEST_GS_AR_BYTES, &c->gs_arbytes.bytes);
826 error |= __vmread(GUEST_TR_SELECTOR, &c->tr_sel);
827 error |= __vmread(GUEST_TR_LIMIT, &c->tr_limit);
828 error |= __vmread(GUEST_TR_BASE, &c->tr_base);
829 error |= __vmread(GUEST_TR_AR_BYTES, &c->tr_arbytes.bytes);
831 error |= __vmread(GUEST_LDTR_SELECTOR, &c->ldtr_sel);
832 error |= __vmread(GUEST_LDTR_LIMIT, &c->ldtr_limit);
833 error |= __vmread(GUEST_LDTR_BASE, &c->ldtr_base);
834 error |= __vmread(GUEST_LDTR_AR_BYTES, &c->ldtr_arbytes.bytes);
836 return !error;
837 }
839 int
840 vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
841 {
842 unsigned long mfn, old_cr4;
843 int error = 0;
845 error |= __vmwrite(GUEST_RIP, c->eip);
846 error |= __vmwrite(GUEST_RSP, c->esp);
847 error |= __vmwrite(GUEST_RFLAGS, c->eflags);
849 error |= __vmwrite(CR0_READ_SHADOW, c->cr0);
851 if (!vmx_paging_enabled(v)) {
852 VMX_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
853 __vmwrite(GUEST_CR3, pagetable_get_paddr(v->domain->arch.phys_table));
854 goto skip_cr3;
855 }
857 if (c->cr3 == v->arch.arch_vmx.cpu_cr3) {
858 /*
859 * This is simple TLB flush, implying the guest has
860 * removed some translation or changed page attributes.
861 * We simply invalidate the shadow.
862 */
863 mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT);
864 if (mfn != pagetable_get_pfn(v->arch.guest_table)) {
865 printk("Invalid CR3 value=%x", c->cr3);
866 domain_crash_synchronous();
867 return 0;
868 }
869 shadow_sync_all(v->domain);
870 } else {
871 /*
872 * If different, make a shadow. Check if the PDBR is valid
873 * first.
874 */
875 VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %x", c->cr3);
876 if ((c->cr3 >> PAGE_SHIFT) > v->domain->max_pages) {
877 printk("Invalid CR3 value=%x", c->cr3);
878 domain_crash_synchronous();
879 return 0;
880 }
881 mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT);
882 v->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
883 update_pagetables(v);
884 /*
885 * arch.shadow_table should now hold the next CR3 for shadow
886 */
887 v->arch.arch_vmx.cpu_cr3 = c->cr3;
888 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
889 __vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.shadow_table));
890 }
892 skip_cr3:
894 error |= __vmread(CR4_READ_SHADOW, &old_cr4);
895 error |= __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
896 error |= __vmwrite(CR4_READ_SHADOW, c->cr4);
898 error |= __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
899 error |= __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
901 error |= __vmwrite(GUEST_GDTR_LIMIT, c->gdtr_limit);
902 error |= __vmwrite(GUEST_GDTR_BASE, c->gdtr_base);
904 error |= __vmwrite(GUEST_CS_SELECTOR, c->cs_sel);
905 error |= __vmwrite(GUEST_CS_LIMIT, c->cs_limit);
906 error |= __vmwrite(GUEST_CS_BASE, c->cs_base);
907 error |= __vmwrite(GUEST_CS_AR_BYTES, c->cs_arbytes.bytes);
909 error |= __vmwrite(GUEST_DS_SELECTOR, c->ds_sel);
910 error |= __vmwrite(GUEST_DS_LIMIT, c->ds_limit);
911 error |= __vmwrite(GUEST_DS_BASE, c->ds_base);
912 error |= __vmwrite(GUEST_DS_AR_BYTES, c->ds_arbytes.bytes);
914 error |= __vmwrite(GUEST_ES_SELECTOR, c->es_sel);
915 error |= __vmwrite(GUEST_ES_LIMIT, c->es_limit);
916 error |= __vmwrite(GUEST_ES_BASE, c->es_base);
917 error |= __vmwrite(GUEST_ES_AR_BYTES, c->es_arbytes.bytes);
919 error |= __vmwrite(GUEST_SS_SELECTOR, c->ss_sel);
920 error |= __vmwrite(GUEST_SS_LIMIT, c->ss_limit);
921 error |= __vmwrite(GUEST_SS_BASE, c->ss_base);
922 error |= __vmwrite(GUEST_SS_AR_BYTES, c->ss_arbytes.bytes);
924 error |= __vmwrite(GUEST_FS_SELECTOR, c->fs_sel);
925 error |= __vmwrite(GUEST_FS_LIMIT, c->fs_limit);
926 error |= __vmwrite(GUEST_FS_BASE, c->fs_base);
927 error |= __vmwrite(GUEST_FS_AR_BYTES, c->fs_arbytes.bytes);
929 error |= __vmwrite(GUEST_GS_SELECTOR, c->gs_sel);
930 error |= __vmwrite(GUEST_GS_LIMIT, c->gs_limit);
931 error |= __vmwrite(GUEST_GS_BASE, c->gs_base);
932 error |= __vmwrite(GUEST_GS_AR_BYTES, c->gs_arbytes.bytes);
934 error |= __vmwrite(GUEST_TR_SELECTOR, c->tr_sel);
935 error |= __vmwrite(GUEST_TR_LIMIT, c->tr_limit);
936 error |= __vmwrite(GUEST_TR_BASE, c->tr_base);
937 error |= __vmwrite(GUEST_TR_AR_BYTES, c->tr_arbytes.bytes);
939 error |= __vmwrite(GUEST_LDTR_SELECTOR, c->ldtr_sel);
940 error |= __vmwrite(GUEST_LDTR_LIMIT, c->ldtr_limit);
941 error |= __vmwrite(GUEST_LDTR_BASE, c->ldtr_base);
942 error |= __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes.bytes);
944 return !error;
945 }
947 enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE };
949 int
950 vmx_assist(struct vcpu *v, int mode)
951 {
952 struct vmx_assist_context c;
953 u32 magic;
954 u32 cp;
956 /* make sure vmxassist exists (this is not an error) */
957 if (!vmx_copy(&magic, VMXASSIST_MAGIC_OFFSET, sizeof(magic), VMX_COPY_IN))
958 return 0;
959 if (magic != VMXASSIST_MAGIC)
960 return 0;
962 switch (mode) {
963 /*
964 * Transfer control to vmxassist.
965 * Store the current context in VMXASSIST_OLD_CONTEXT and load
966 * the new VMXASSIST_NEW_CONTEXT context. This context was created
967 * by vmxassist and will transfer control to it.
968 */
969 case VMX_ASSIST_INVOKE:
970 /* save the old context */
971 if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), VMX_COPY_IN))
972 goto error;
973 if (cp != 0) {
974 if (!vmx_world_save(v, &c))
975 goto error;
976 if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_OUT))
977 goto error;
978 }
980 /* restore the new context, this should activate vmxassist */
981 if (!vmx_copy(&cp, VMXASSIST_NEW_CONTEXT, sizeof(cp), VMX_COPY_IN))
982 goto error;
983 if (cp != 0) {
984 if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_IN))
985 goto error;
986 if (!vmx_world_restore(v, &c))
987 goto error;
988 return 1;
989 }
990 break;
992 /*
993 * Restore the VMXASSIST_OLD_CONTEXT that was saved by VMX_ASSIST_INVOKE
994 * above.
995 */
996 case VMX_ASSIST_RESTORE:
997 /* save the old context */
998 if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), VMX_COPY_IN))
999 goto error;
1000 if (cp != 0) {
1001 if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_IN))
1002 goto error;
1003 if (!vmx_world_restore(v, &c))
1004 goto error;
1005 return 1;
1007 break;
1010 error:
1011 printf("Failed to transfer to vmxassist\n");
1012 domain_crash_synchronous();
1013 return 0;
1016 static int vmx_set_cr0(unsigned long value)
1018 struct vcpu *v = current;
1019 unsigned long mfn;
1020 unsigned long eip;
1021 int paging_enabled;
1022 unsigned long vm_entry_value;
1023 /*
1024 * CR0: We don't want to lose PE and PG.
1025 */
1026 paging_enabled = vmx_paging_enabled(v);
1027 __vmwrite(GUEST_CR0, value | X86_CR0_PE | X86_CR0_PG | X86_CR0_NE);
1028 __vmwrite(CR0_READ_SHADOW, value);
1030 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value);
1032 if ((value & X86_CR0_PE) && (value & X86_CR0_PG) && !paging_enabled) {
1033 /*
1034 * The guest CR3 must be pointing to the guest physical.
1035 */
1036 if ( !VALID_MFN(mfn = get_mfn_from_pfn(
1037 v->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
1038 !get_page(pfn_to_page(mfn), v->domain) )
1040 printk("Invalid CR3 value = %lx", v->arch.arch_vmx.cpu_cr3);
1041 domain_crash_synchronous(); /* need to take a clean path */
1044 #if defined(__x86_64__)
1045 if (test_bit(VMX_CPU_STATE_LME_ENABLED,
1046 &v->arch.arch_vmx.cpu_state) &&
1047 !test_bit(VMX_CPU_STATE_PAE_ENABLED,
1048 &v->arch.arch_vmx.cpu_state)){
1049 VMX_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable\n");
1050 vmx_inject_exception(v, TRAP_gp_fault, 0);
1052 if (test_bit(VMX_CPU_STATE_LME_ENABLED,
1053 &v->arch.arch_vmx.cpu_state)){
1054 /* Here the PAE is should to be opened */
1055 VMX_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode\n");
1056 set_bit(VMX_CPU_STATE_LMA_ENABLED,
1057 &v->arch.arch_vmx.cpu_state);
1058 __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
1059 vm_entry_value |= VM_ENTRY_CONTROLS_IA32E_MODE;
1060 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
1062 #if CONFIG_PAGING_LEVELS >= 4
1063 if(!shadow_set_guest_paging_levels(v->domain, 4)) {
1064 printk("Unsupported guest paging levels\n");
1065 domain_crash_synchronous(); /* need to take a clean path */
1067 #endif
1069 else
1071 #if CONFIG_PAGING_LEVELS >= 4
1072 if(!shadow_set_guest_paging_levels(v->domain, 2)) {
1073 printk("Unsupported guest paging levels\n");
1074 domain_crash_synchronous(); /* need to take a clean path */
1076 #endif
1079 unsigned long crn;
1080 /* update CR4's PAE if needed */
1081 __vmread(GUEST_CR4, &crn);
1082 if ( (!(crn & X86_CR4_PAE)) &&
1083 test_bit(VMX_CPU_STATE_PAE_ENABLED,
1084 &v->arch.arch_vmx.cpu_state)){
1085 VMX_DBG_LOG(DBG_LEVEL_1, "enable PAE on cr4\n");
1086 __vmwrite(GUEST_CR4, crn | X86_CR4_PAE);
1088 #endif
1089 /*
1090 * Now arch.guest_table points to machine physical.
1091 */
1092 v->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
1093 update_pagetables(v);
1095 VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
1096 (unsigned long) (mfn << PAGE_SHIFT));
1098 __vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.shadow_table));
1099 /*
1100 * arch->shadow_table should hold the next CR3 for shadow
1101 */
1102 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
1103 v->arch.arch_vmx.cpu_cr3, mfn);
1106 if(!((value & X86_CR0_PE) && (value & X86_CR0_PG)) && paging_enabled)
1107 if(v->arch.arch_vmx.cpu_cr3)
1108 put_page(pfn_to_page(get_mfn_from_pfn(
1109 v->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)));
1111 /*
1112 * VMX does not implement real-mode virtualization. We emulate
1113 * real-mode by performing a world switch to VMXAssist whenever
1114 * a partition disables the CR0.PE bit.
1115 */
1116 if ((value & X86_CR0_PE) == 0) {
1117 if ( value & X86_CR0_PG ) {
1118 /* inject GP here */
1119 vmx_inject_exception(v, TRAP_gp_fault, 0);
1120 return 0;
1121 } else {
1122 /*
1123 * Disable paging here.
1124 * Same to PE == 1 && PG == 0
1125 */
1126 if (test_bit(VMX_CPU_STATE_LMA_ENABLED,
1127 &v->arch.arch_vmx.cpu_state)){
1128 clear_bit(VMX_CPU_STATE_LMA_ENABLED,
1129 &v->arch.arch_vmx.cpu_state);
1130 __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
1131 vm_entry_value &= ~VM_ENTRY_CONTROLS_IA32E_MODE;
1132 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
1136 if (vmx_assist(v, VMX_ASSIST_INVOKE)) {
1137 set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &v->arch.arch_vmx.cpu_state);
1138 __vmread(GUEST_RIP, &eip);
1139 VMX_DBG_LOG(DBG_LEVEL_1,
1140 "Transfering control to vmxassist %%eip 0x%lx\n", eip);
1141 return 0; /* do not update eip! */
1143 } else if (test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
1144 &v->arch.arch_vmx.cpu_state)) {
1145 __vmread(GUEST_RIP, &eip);
1146 VMX_DBG_LOG(DBG_LEVEL_1,
1147 "Enabling CR0.PE at %%eip 0x%lx\n", eip);
1148 if (vmx_assist(v, VMX_ASSIST_RESTORE)) {
1149 clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
1150 &v->arch.arch_vmx.cpu_state);
1151 __vmread(GUEST_RIP, &eip);
1152 VMX_DBG_LOG(DBG_LEVEL_1,
1153 "Restoring to %%eip 0x%lx\n", eip);
1154 return 0; /* do not update eip! */
1158 return 1;
1161 #define CASE_GET_REG(REG, reg) \
1162 case REG_ ## REG: value = regs->reg; break
1164 #define CASE_EXTEND_SET_REG \
1165 CASE_EXTEND_REG(S)
1166 #define CASE_EXTEND_GET_REG \
1167 CASE_EXTEND_REG(G)
1169 #ifdef __i386__
1170 #define CASE_EXTEND_REG(T)
1171 #else
1172 #define CASE_EXTEND_REG(T) \
1173 CASE_ ## T ## ET_REG(R8, r8); \
1174 CASE_ ## T ## ET_REG(R9, r9); \
1175 CASE_ ## T ## ET_REG(R10, r10); \
1176 CASE_ ## T ## ET_REG(R11, r11); \
1177 CASE_ ## T ## ET_REG(R12, r12); \
1178 CASE_ ## T ## ET_REG(R13, r13); \
1179 CASE_ ## T ## ET_REG(R14, r14); \
1180 CASE_ ## T ## ET_REG(R15, r15);
1181 #endif
1184 /*
1185 * Write to control registers
1186 */
1187 static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
1189 unsigned long value;
1190 unsigned long old_cr;
1191 struct vcpu *v = current;
1193 switch (gp) {
1194 CASE_GET_REG(EAX, eax);
1195 CASE_GET_REG(ECX, ecx);
1196 CASE_GET_REG(EDX, edx);
1197 CASE_GET_REG(EBX, ebx);
1198 CASE_GET_REG(EBP, ebp);
1199 CASE_GET_REG(ESI, esi);
1200 CASE_GET_REG(EDI, edi);
1201 CASE_EXTEND_GET_REG
1202 case REG_ESP:
1203 __vmread(GUEST_RSP, &value);
1204 break;
1205 default:
1206 printk("invalid gp: %d\n", gp);
1207 __vmx_bug(regs);
1210 VMX_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx,", cr, value);
1211 VMX_DBG_LOG(DBG_LEVEL_1, "current = %lx,", (unsigned long) current);
1213 switch(cr) {
1214 case 0:
1216 return vmx_set_cr0(value);
1218 case 3:
1220 unsigned long old_base_mfn, mfn;
1222 /*
1223 * If paging is not enabled yet, simply copy the value to CR3.
1224 */
1225 if (!vmx_paging_enabled(v)) {
1226 v->arch.arch_vmx.cpu_cr3 = value;
1227 break;
1230 /*
1231 * We make a new one if the shadow does not exist.
1232 */
1233 if (value == v->arch.arch_vmx.cpu_cr3) {
1234 /*
1235 * This is simple TLB flush, implying the guest has
1236 * removed some translation or changed page attributes.
1237 * We simply invalidate the shadow.
1238 */
1239 mfn = get_mfn_from_pfn(value >> PAGE_SHIFT);
1240 if (mfn != pagetable_get_pfn(v->arch.guest_table))
1241 __vmx_bug(regs);
1242 shadow_sync_all(v->domain);
1243 } else {
1244 /*
1245 * If different, make a shadow. Check if the PDBR is valid
1246 * first.
1247 */
1248 VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
1249 if ( ((value >> PAGE_SHIFT) > v->domain->max_pages ) ||
1250 !VALID_MFN(mfn = get_mfn_from_pfn(value >> PAGE_SHIFT)) ||
1251 !get_page(pfn_to_page(mfn), v->domain) )
1253 printk("Invalid CR3 value=%lx", value);
1254 domain_crash_synchronous(); /* need to take a clean path */
1256 old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
1257 v->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
1258 if (old_base_mfn)
1259 put_page(pfn_to_page(old_base_mfn));
1260 update_pagetables(v);
1261 /*
1262 * arch.shadow_table should now hold the next CR3 for shadow
1263 */
1264 v->arch.arch_vmx.cpu_cr3 = value;
1265 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx",
1266 value);
1267 __vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.shadow_table));
1269 break;
1271 case 4:
1273 /* CR4 */
1274 unsigned long old_guest_cr;
1276 __vmread(GUEST_CR4, &old_guest_cr);
1277 if (value & X86_CR4_PAE){
1278 set_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.arch_vmx.cpu_state);
1279 } else {
1280 if (test_bit(VMX_CPU_STATE_LMA_ENABLED,
1281 &v->arch.arch_vmx.cpu_state)){
1282 vmx_inject_exception(v, TRAP_gp_fault, 0);
1284 clear_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.arch_vmx.cpu_state);
1287 __vmread(CR4_READ_SHADOW, &old_cr);
1289 __vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK);
1290 __vmwrite(CR4_READ_SHADOW, value);
1292 /*
1293 * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
1294 * all TLB entries except global entries.
1295 */
1296 if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE)) {
1297 shadow_sync_all(v->domain);
1299 break;
1301 default:
1302 printk("invalid cr: %d\n", gp);
1303 __vmx_bug(regs);
1306 return 1;
1309 #define CASE_SET_REG(REG, reg) \
1310 case REG_ ## REG: \
1311 regs->reg = value; \
1312 break
1314 /*
1315 * Read from control registers. CR0 and CR4 are read from the shadow.
1316 */
1317 static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
1319 unsigned long value;
1320 struct vcpu *v = current;
1322 if (cr != 3)
1323 __vmx_bug(regs);
1325 value = (unsigned long) v->arch.arch_vmx.cpu_cr3;
1327 switch (gp) {
1328 CASE_SET_REG(EAX, eax);
1329 CASE_SET_REG(ECX, ecx);
1330 CASE_SET_REG(EDX, edx);
1331 CASE_SET_REG(EBX, ebx);
1332 CASE_SET_REG(EBP, ebp);
1333 CASE_SET_REG(ESI, esi);
1334 CASE_SET_REG(EDI, edi);
1335 CASE_EXTEND_SET_REG
1336 case REG_ESP:
1337 __vmwrite(GUEST_RSP, value);
1338 regs->esp = value;
1339 break;
1340 default:
1341 printk("invalid gp: %d\n", gp);
1342 __vmx_bug(regs);
1345 VMX_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx,", cr, value);
1348 static int vmx_cr_access(unsigned long exit_qualification, struct cpu_user_regs *regs)
1350 unsigned int gp, cr;
1351 unsigned long value;
1352 struct vcpu *v = current;
1354 switch (exit_qualification & CONTROL_REG_ACCESS_TYPE) {
1355 case TYPE_MOV_TO_CR:
1356 gp = exit_qualification & CONTROL_REG_ACCESS_REG;
1357 cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
1358 TRACE_VMEXIT(1,TYPE_MOV_TO_CR);
1359 TRACE_VMEXIT(2,cr);
1360 TRACE_VMEXIT(3,gp);
1361 return mov_to_cr(gp, cr, regs);
1362 case TYPE_MOV_FROM_CR:
1363 gp = exit_qualification & CONTROL_REG_ACCESS_REG;
1364 cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
1365 TRACE_VMEXIT(1,TYPE_MOV_FROM_CR);
1366 TRACE_VMEXIT(2,cr);
1367 TRACE_VMEXIT(3,gp);
1368 mov_from_cr(cr, gp, regs);
1369 break;
1370 case TYPE_CLTS:
1371 TRACE_VMEXIT(1,TYPE_CLTS);
1372 clts();
1373 setup_fpu(current);
1375 __vmread_vcpu(v, GUEST_CR0, &value);
1376 value &= ~X86_CR0_TS; /* clear TS */
1377 __vmwrite(GUEST_CR0, value);
1379 __vmread_vcpu(v, CR0_READ_SHADOW, &value);
1380 value &= ~X86_CR0_TS; /* clear TS */
1381 __vmwrite(CR0_READ_SHADOW, value);
1382 break;
1383 case TYPE_LMSW:
1384 TRACE_VMEXIT(1,TYPE_LMSW);
1385 __vmread_vcpu(v, CR0_READ_SHADOW, &value);
1386 value = (value & ~0xF) |
1387 (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
1388 return vmx_set_cr0(value);
1389 break;
1390 default:
1391 __vmx_bug(regs);
1392 break;
1394 return 1;
1397 static inline void vmx_do_msr_read(struct cpu_user_regs *regs)
1399 u64 msr_content = 0;
1401 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read: ecx=%lx, eax=%lx, edx=%lx",
1402 (unsigned long)regs->ecx, (unsigned long)regs->eax,
1403 (unsigned long)regs->edx);
1404 switch (regs->ecx) {
1405 case MSR_IA32_SYSENTER_CS:
1406 __vmread(GUEST_SYSENTER_CS, (u32 *)&msr_content);
1407 break;
1408 case MSR_IA32_SYSENTER_ESP:
1409 __vmread(GUEST_SYSENTER_ESP, &msr_content);
1410 break;
1411 case MSR_IA32_SYSENTER_EIP:
1412 __vmread(GUEST_SYSENTER_EIP, &msr_content);
1413 break;
1414 default:
1415 if(long_mode_do_msr_read(regs))
1416 return;
1417 rdmsr_user(regs->ecx, regs->eax, regs->edx);
1418 break;
1421 regs->eax = msr_content & 0xFFFFFFFF;
1422 regs->edx = msr_content >> 32;
1424 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read returns: "
1425 "ecx=%lx, eax=%lx, edx=%lx",
1426 (unsigned long)regs->ecx, (unsigned long)regs->eax,
1427 (unsigned long)regs->edx);
1430 static inline void vmx_do_msr_write(struct cpu_user_regs *regs)
1432 u64 msr_content;
1434 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_write: ecx=%lx, eax=%lx, edx=%lx",
1435 (unsigned long)regs->ecx, (unsigned long)regs->eax,
1436 (unsigned long)regs->edx);
1438 msr_content = (regs->eax & 0xFFFFFFFF) | ((u64)regs->edx << 32);
1440 switch (regs->ecx) {
1441 case MSR_IA32_SYSENTER_CS:
1442 __vmwrite(GUEST_SYSENTER_CS, msr_content);
1443 break;
1444 case MSR_IA32_SYSENTER_ESP:
1445 __vmwrite(GUEST_SYSENTER_ESP, msr_content);
1446 break;
1447 case MSR_IA32_SYSENTER_EIP:
1448 __vmwrite(GUEST_SYSENTER_EIP, msr_content);
1449 break;
1450 default:
1451 long_mode_do_msr_write(regs);
1452 break;
1455 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_write returns: "
1456 "ecx=%lx, eax=%lx, edx=%lx",
1457 (unsigned long)regs->ecx, (unsigned long)regs->eax,
1458 (unsigned long)regs->edx);
1461 volatile unsigned long do_hlt_count;
1462 /*
1463 * Need to use this exit to reschedule
1464 */
1465 void vmx_vmexit_do_hlt(void)
1467 do_block();
1470 static inline void vmx_vmexit_do_extint(struct cpu_user_regs *regs)
1472 unsigned int vector;
1473 int error;
1475 asmlinkage void do_IRQ(struct cpu_user_regs *);
1476 void smp_apic_timer_interrupt(struct cpu_user_regs *);
1477 void timer_interrupt(int, void *, struct cpu_user_regs *);
1478 void smp_event_check_interrupt(void);
1479 void smp_invalidate_interrupt(void);
1480 void smp_call_function_interrupt(void);
1481 void smp_spurious_interrupt(struct cpu_user_regs *regs);
1482 void smp_error_interrupt(struct cpu_user_regs *regs);
1484 if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
1485 && !(vector & INTR_INFO_VALID_MASK))
1486 __vmx_bug(regs);
1488 vector &= 0xff;
1489 local_irq_disable();
1491 switch(vector) {
1492 case LOCAL_TIMER_VECTOR:
1493 smp_apic_timer_interrupt(regs);
1494 break;
1495 case EVENT_CHECK_VECTOR:
1496 smp_event_check_interrupt();
1497 break;
1498 case INVALIDATE_TLB_VECTOR:
1499 smp_invalidate_interrupt();
1500 break;
1501 case CALL_FUNCTION_VECTOR:
1502 smp_call_function_interrupt();
1503 break;
1504 case SPURIOUS_APIC_VECTOR:
1505 smp_spurious_interrupt(regs);
1506 break;
1507 case ERROR_APIC_VECTOR:
1508 smp_error_interrupt(regs);
1509 break;
1510 default:
1511 regs->entry_vector = vector;
1512 do_IRQ(regs);
1513 break;
1517 #define BUF_SIZ 256
1518 #define MAX_LINE 80
1519 char print_buf[BUF_SIZ];
1520 static int index;
1522 static void vmx_print_line(const char c, struct vcpu *v)
1525 if (index == MAX_LINE || c == '\n') {
1526 if (index == MAX_LINE) {
1527 print_buf[index++] = c;
1529 print_buf[index] = '\0';
1530 printk("(GUEST: %u) %s\n", v->domain->domain_id, (char *) &print_buf);
1531 index = 0;
1533 else
1534 print_buf[index++] = c;
1537 void save_vmx_cpu_user_regs(struct cpu_user_regs *ctxt)
1539 __vmread(GUEST_SS_SELECTOR, &ctxt->ss);
1540 __vmread(GUEST_RSP, &ctxt->esp);
1541 __vmread(GUEST_RFLAGS, &ctxt->eflags);
1542 __vmread(GUEST_CS_SELECTOR, &ctxt->cs);
1543 __vmread(GUEST_RIP, &ctxt->eip);
1545 __vmread(GUEST_GS_SELECTOR, &ctxt->gs);
1546 __vmread(GUEST_FS_SELECTOR, &ctxt->fs);
1547 __vmread(GUEST_ES_SELECTOR, &ctxt->es);
1548 __vmread(GUEST_DS_SELECTOR, &ctxt->ds);
1551 #ifdef XEN_DEBUGGER
1552 void save_cpu_user_regs(struct cpu_user_regs *regs)
1554 __vmread(GUEST_SS_SELECTOR, &regs->xss);
1555 __vmread(GUEST_RSP, &regs->esp);
1556 __vmread(GUEST_RFLAGS, &regs->eflags);
1557 __vmread(GUEST_CS_SELECTOR, &regs->xcs);
1558 __vmread(GUEST_RIP, &regs->eip);
1560 __vmread(GUEST_GS_SELECTOR, &regs->xgs);
1561 __vmread(GUEST_FS_SELECTOR, &regs->xfs);
1562 __vmread(GUEST_ES_SELECTOR, &regs->xes);
1563 __vmread(GUEST_DS_SELECTOR, &regs->xds);
1566 void restore_cpu_user_regs(struct cpu_user_regs *regs)
1568 __vmwrite(GUEST_SS_SELECTOR, regs->xss);
1569 __vmwrite(GUEST_RSP, regs->esp);
1570 __vmwrite(GUEST_RFLAGS, regs->eflags);
1571 __vmwrite(GUEST_CS_SELECTOR, regs->xcs);
1572 __vmwrite(GUEST_RIP, regs->eip);
1574 __vmwrite(GUEST_GS_SELECTOR, regs->xgs);
1575 __vmwrite(GUEST_FS_SELECTOR, regs->xfs);
1576 __vmwrite(GUEST_ES_SELECTOR, regs->xes);
1577 __vmwrite(GUEST_DS_SELECTOR, regs->xds);
1579 #endif
1581 asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
1583 unsigned int exit_reason, idtv_info_field;
1584 unsigned long exit_qualification, eip, inst_len = 0;
1585 struct vcpu *v = current;
1586 int error;
1588 if ((error = __vmread(VM_EXIT_REASON, &exit_reason)))
1589 __vmx_bug(&regs);
1591 perfc_incra(vmexits, exit_reason);
1593 __vmread(IDT_VECTORING_INFO_FIELD, &idtv_info_field);
1594 if (idtv_info_field & INTR_INFO_VALID_MASK) {
1595 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
1597 __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
1598 if (inst_len >= 1 && inst_len <= 15)
1599 __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len);
1601 if (idtv_info_field & 0x800) { /* valid error code */
1602 unsigned long error_code;
1603 __vmread(IDT_VECTORING_ERROR_CODE, &error_code);
1604 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
1607 VMX_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field);
1610 /* don't bother H/W interrutps */
1611 if (exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT &&
1612 exit_reason != EXIT_REASON_VMCALL &&
1613 exit_reason != EXIT_REASON_IO_INSTRUCTION)
1614 VMX_DBG_LOG(DBG_LEVEL_0, "exit reason = %x", exit_reason);
1616 if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
1617 printk("Failed vm entry\n");
1618 domain_crash_synchronous();
1619 return;
1622 #ifdef TRACE_BUFFER
1624 __vmread(GUEST_RIP, &eip);
1625 TRACE_3D(TRC_VMX_VMEXIT, v->domain->domain_id, eip, exit_reason);
1626 TRACE_VMEXIT(0,exit_reason);
1628 #endif
1630 switch (exit_reason) {
1631 case EXIT_REASON_EXCEPTION_NMI:
1633 /*
1634 * We don't set the software-interrupt exiting (INT n).
1635 * (1) We can get an exception (e.g. #PG) in the guest, or
1636 * (2) NMI
1637 */
1638 int error;
1639 unsigned int vector;
1640 unsigned long va;
1642 if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
1643 || !(vector & INTR_INFO_VALID_MASK))
1644 __vmx_bug(&regs);
1645 vector &= 0xff;
1647 TRACE_VMEXIT(1,vector);
1648 perfc_incra(cause_vector, vector);
1650 TRACE_3D(TRC_VMX_VECTOR, v->domain->domain_id, eip, vector);
1651 switch (vector) {
1652 #ifdef XEN_DEBUGGER
1653 case TRAP_debug:
1655 save_cpu_user_regs(&regs);
1656 pdb_handle_exception(1, &regs, 1);
1657 restore_cpu_user_regs(&regs);
1658 break;
1660 case TRAP_int3:
1662 save_cpu_user_regs(&regs);
1663 pdb_handle_exception(3, &regs, 1);
1664 restore_cpu_user_regs(&regs);
1665 break;
1667 #else
1668 case TRAP_debug:
1670 void store_cpu_user_regs(struct cpu_user_regs *regs);
1671 long do_sched_op(unsigned long op);
1674 store_cpu_user_regs(&regs);
1675 __vm_clear_bit(GUEST_PENDING_DBG_EXCEPTIONS, PENDING_DEBUG_EXC_BS);
1677 set_bit(_VCPUF_ctrl_pause, &current->vcpu_flags);
1678 do_sched_op(SCHEDOP_yield);
1680 break;
1682 #endif
1683 case TRAP_no_device:
1685 vmx_do_no_device_fault();
1686 break;
1688 case TRAP_page_fault:
1690 __vmread(EXIT_QUALIFICATION, &va);
1691 __vmread(VM_EXIT_INTR_ERROR_CODE, &regs.error_code);
1693 TRACE_VMEXIT(3,regs.error_code);
1694 TRACE_VMEXIT(4,va);
1696 VMX_DBG_LOG(DBG_LEVEL_VMMU,
1697 "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
1698 (unsigned long)regs.eax, (unsigned long)regs.ebx,
1699 (unsigned long)regs.ecx, (unsigned long)regs.edx,
1700 (unsigned long)regs.esi, (unsigned long)regs.edi);
1701 v->arch.arch_vmx.mmio_op.inst_decoder_regs = &regs;
1703 if (!(error = vmx_do_page_fault(va, &regs))) {
1704 /*
1705 * Inject #PG using Interruption-Information Fields
1706 */
1707 vmx_inject_exception(v, TRAP_page_fault, regs.error_code);
1708 v->arch.arch_vmx.cpu_cr2 = va;
1709 TRACE_3D(TRC_VMX_INT, v->domain->domain_id, TRAP_page_fault, va);
1711 break;
1713 case TRAP_nmi:
1714 do_nmi(&regs, 0);
1715 break;
1716 default:
1717 vmx_reflect_exception(v);
1718 break;
1720 break;
1722 case EXIT_REASON_EXTERNAL_INTERRUPT:
1723 vmx_vmexit_do_extint(&regs);
1724 break;
1725 case EXIT_REASON_PENDING_INTERRUPT:
1726 __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
1727 MONITOR_CPU_BASED_EXEC_CONTROLS);
1728 break;
1729 case EXIT_REASON_TASK_SWITCH:
1730 __vmx_bug(&regs);
1731 break;
1732 case EXIT_REASON_CPUID:
1733 __get_instruction_length(inst_len);
1734 vmx_vmexit_do_cpuid(regs.eax, &regs);
1735 __update_guest_eip(inst_len);
1736 break;
1737 case EXIT_REASON_HLT:
1738 __get_instruction_length(inst_len);
1739 __update_guest_eip(inst_len);
1740 vmx_vmexit_do_hlt();
1741 break;
1742 case EXIT_REASON_INVLPG:
1744 unsigned long va;
1746 __vmread(EXIT_QUALIFICATION, &va);
1747 vmx_vmexit_do_invlpg(va);
1748 __get_instruction_length(inst_len);
1749 __update_guest_eip(inst_len);
1750 break;
1752 case EXIT_REASON_VMCALL:
1753 __get_instruction_length(inst_len);
1754 __vmread(GUEST_RIP, &eip);
1755 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1757 vmx_print_line(regs.eax, v); /* provides the current domain */
1758 __update_guest_eip(inst_len);
1759 break;
1760 case EXIT_REASON_CR_ACCESS:
1762 __vmread(GUEST_RIP, &eip);
1763 __get_instruction_length(inst_len);
1764 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1766 VMX_DBG_LOG(DBG_LEVEL_1, "eip = %lx, inst_len =%lx, exit_qualification = %lx",
1767 eip, inst_len, exit_qualification);
1768 if (vmx_cr_access(exit_qualification, &regs))
1769 __update_guest_eip(inst_len);
1770 TRACE_VMEXIT(3,regs.error_code);
1771 TRACE_VMEXIT(4,exit_qualification);
1772 break;
1774 case EXIT_REASON_DR_ACCESS:
1775 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1776 vmx_dr_access(exit_qualification, &regs);
1777 __get_instruction_length(inst_len);
1778 __update_guest_eip(inst_len);
1779 break;
1780 case EXIT_REASON_IO_INSTRUCTION:
1781 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1782 __get_instruction_length(inst_len);
1783 vmx_io_instruction(&regs, exit_qualification, inst_len);
1784 TRACE_VMEXIT(4,exit_qualification);
1785 break;
1786 case EXIT_REASON_MSR_READ:
1787 __get_instruction_length(inst_len);
1788 vmx_do_msr_read(&regs);
1789 __update_guest_eip(inst_len);
1790 break;
1791 case EXIT_REASON_MSR_WRITE:
1792 __vmread(GUEST_RIP, &eip);
1793 vmx_do_msr_write(&regs);
1794 __get_instruction_length(inst_len);
1795 __update_guest_eip(inst_len);
1796 break;
1797 case EXIT_REASON_MWAIT_INSTRUCTION:
1798 __vmx_bug(&regs);
1799 break;
1800 default:
1801 __vmx_bug(&regs); /* should not happen */
1805 asmlinkage void load_cr2(void)
1807 struct vcpu *v = current;
1809 local_irq_disable();
1810 #ifdef __i386__
1811 asm volatile("movl %0,%%cr2": :"r" (v->arch.arch_vmx.cpu_cr2));
1812 #else
1813 asm volatile("movq %0,%%cr2": :"r" (v->arch.arch_vmx.cpu_cr2));
1814 #endif
1817 #ifdef TRACE_BUFFER
1818 asmlinkage void trace_vmentry (void)
1820 TRACE_5D(TRC_VMENTRY,trace_values[current->processor][0],
1821 trace_values[current->processor][1],trace_values[current->processor][2],
1822 trace_values[current->processor][3],trace_values[current->processor][4]);
1823 TRACE_VMEXIT(0,9);
1824 TRACE_VMEXIT(1,9);
1825 TRACE_VMEXIT(2,9);
1826 TRACE_VMEXIT(3,9);
1827 TRACE_VMEXIT(4,9);
1828 return;
1830 asmlinkage void trace_vmexit (void)
1832 TRACE_3D(TRC_VMEXIT,0,0,0);
1833 return;
1835 #endif
1836 #endif /* CONFIG_VMX */
1838 /*
1839 * Local variables:
1840 * mode: C
1841 * c-set-style: "BSD"
1842 * c-basic-offset: 4
1843 * tab-width: 4
1844 * indent-tabs-mode: nil
1845 * End:
1846 */