ia64/xen-unstable

view xen/arch/x86/vmx.c @ 6766:219d96d545fc

merge?
author cl349@firebug.cl.cam.ac.uk
date Mon Sep 12 20:00:41 2005 +0000 (2005-09-12)
parents 3feb7fa331ed 888094e5ac07
children 4d899a738d59 8ca0f98ba8e2
line source
1 /*
2 * vmx.c: handling VMX architecture-related VM exits
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
20 #include <xen/config.h>
21 #include <xen/init.h>
22 #include <xen/lib.h>
23 #include <xen/trace.h>
24 #include <xen/sched.h>
25 #include <xen/irq.h>
26 #include <xen/softirq.h>
27 #include <xen/domain_page.h>
28 #include <asm/current.h>
29 #include <asm/io.h>
30 #include <asm/shadow.h>
31 #include <asm/regs.h>
32 #include <asm/cpufeature.h>
33 #include <asm/processor.h>
34 #include <asm/types.h>
35 #include <asm/msr.h>
36 #include <asm/spinlock.h>
37 #include <asm/vmx.h>
38 #include <asm/vmx_vmcs.h>
39 #include <asm/vmx_intercept.h>
40 #include <asm/shadow.h>
41 #if CONFIG_PAGING_LEVELS >= 3
42 #include <asm/shadow_64.h>
43 #endif
45 #include <public/io/ioreq.h>
47 int hvm_enabled;
49 #ifdef CONFIG_VMX
51 int vmcs_size;
52 unsigned int opt_vmx_debug_level = 0;
53 integer_param("vmx_debug", opt_vmx_debug_level);
55 #ifdef TRACE_BUFFER
56 static unsigned long trace_values[NR_CPUS][4];
57 #define TRACE_VMEXIT(index,value) trace_values[current->processor][index]=value
58 #else
59 #define TRACE_VMEXIT(index,value) ((void)0)
60 #endif
62 #ifdef __x86_64__
63 static struct msr_state percpu_msr[NR_CPUS];
65 static u32 msr_data_index[VMX_MSR_COUNT] =
66 {
67 MSR_LSTAR, MSR_STAR, MSR_CSTAR,
68 MSR_SYSCALL_MASK, MSR_EFER,
69 };
71 /*
72 * To avoid MSR save/restore at every VM exit/entry time, we restore
73 * the x86_64 specific MSRs at domain switch time. Since those MSRs are
74 * are not modified once set for generic domains, we don't save them,
75 * but simply reset them to the values set at percpu_traps_init().
76 */
77 void vmx_load_msrs(struct vcpu *n)
78 {
79 struct msr_state *host_state;
80 host_state = &percpu_msr[smp_processor_id()];
82 while (host_state->flags){
83 int i;
85 i = find_first_set_bit(host_state->flags);
86 wrmsrl(msr_data_index[i], host_state->msr_items[i]);
87 clear_bit(i, &host_state->flags);
88 }
89 }
91 static void vmx_save_init_msrs(void)
92 {
93 struct msr_state *host_state;
94 host_state = &percpu_msr[smp_processor_id()];
95 int i;
97 for (i = 0; i < VMX_MSR_COUNT; i++)
98 rdmsrl(msr_data_index[i], host_state->msr_items[i]);
99 }
101 #define CASE_READ_MSR(address) \
102 case MSR_ ## address: \
103 msr_content = msr->msr_items[VMX_INDEX_MSR_ ## address]; \
104 break
106 #define CASE_WRITE_MSR(address) \
107 case MSR_ ## address: \
108 { \
109 msr->msr_items[VMX_INDEX_MSR_ ## address] = msr_content; \
110 if (!test_bit(VMX_INDEX_MSR_ ## address, &msr->flags)) { \
111 set_bit(VMX_INDEX_MSR_ ## address, &msr->flags); \
112 } \
113 wrmsrl(MSR_ ## address, msr_content); \
114 set_bit(VMX_INDEX_MSR_ ## address, &host_state->flags); \
115 } \
116 break
118 #define IS_CANO_ADDRESS(add) 1
119 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
120 {
121 u64 msr_content = 0;
122 struct vcpu *vc = current;
123 struct msr_state * msr = &vc->arch.arch_vmx.msr_content;
124 switch(regs->ecx){
125 case MSR_EFER:
126 msr_content = msr->msr_items[VMX_INDEX_MSR_EFER];
127 VMX_DBG_LOG(DBG_LEVEL_2, "EFER msr_content %llx\n", (unsigned long long)msr_content);
128 if (test_bit(VMX_CPU_STATE_LME_ENABLED,
129 &vc->arch.arch_vmx.cpu_state))
130 msr_content |= 1 << _EFER_LME;
132 if (VMX_LONG_GUEST(vc))
133 msr_content |= 1 << _EFER_LMA;
134 break;
135 case MSR_FS_BASE:
136 if (!(VMX_LONG_GUEST(vc)))
137 /* XXX should it be GP fault */
138 domain_crash();
139 __vmread(GUEST_FS_BASE, &msr_content);
140 break;
141 case MSR_GS_BASE:
142 if (!(VMX_LONG_GUEST(vc)))
143 domain_crash();
144 __vmread(GUEST_GS_BASE, &msr_content);
145 break;
146 case MSR_SHADOW_GS_BASE:
147 msr_content = msr->shadow_gs;
148 break;
150 CASE_READ_MSR(STAR);
151 CASE_READ_MSR(LSTAR);
152 CASE_READ_MSR(CSTAR);
153 CASE_READ_MSR(SYSCALL_MASK);
154 default:
155 return 0;
156 }
157 VMX_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %lx\n", msr_content);
158 regs->eax = msr_content & 0xffffffff;
159 regs->edx = msr_content >> 32;
160 return 1;
161 }
163 static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
164 {
165 u64 msr_content = regs->eax | ((u64)regs->edx << 32);
166 struct vcpu *vc = current;
167 struct msr_state * msr = &vc->arch.arch_vmx.msr_content;
168 struct msr_state * host_state =
169 &percpu_msr[smp_processor_id()];
171 VMX_DBG_LOG(DBG_LEVEL_1, " mode_do_msr_write msr %lx msr_content %lx\n",
172 regs->ecx, msr_content);
174 switch (regs->ecx){
175 case MSR_EFER:
176 if ((msr_content & EFER_LME) ^
177 test_bit(VMX_CPU_STATE_LME_ENABLED,
178 &vc->arch.arch_vmx.cpu_state)){
179 if (test_bit(VMX_CPU_STATE_PG_ENABLED,
180 &vc->arch.arch_vmx.cpu_state) ||
181 !test_bit(VMX_CPU_STATE_PAE_ENABLED,
182 &vc->arch.arch_vmx.cpu_state)){
183 vmx_inject_exception(vc, TRAP_gp_fault, 0);
184 }
185 }
186 if (msr_content & EFER_LME)
187 set_bit(VMX_CPU_STATE_LME_ENABLED,
188 &vc->arch.arch_vmx.cpu_state);
189 /* No update for LME/LMA since it have no effect */
190 msr->msr_items[VMX_INDEX_MSR_EFER] =
191 msr_content;
192 if (msr_content & ~(EFER_LME | EFER_LMA)){
193 msr->msr_items[VMX_INDEX_MSR_EFER] = msr_content;
194 if (!test_bit(VMX_INDEX_MSR_EFER, &msr->flags)){
195 rdmsrl(MSR_EFER,
196 host_state->msr_items[VMX_INDEX_MSR_EFER]);
197 set_bit(VMX_INDEX_MSR_EFER, &host_state->flags);
198 set_bit(VMX_INDEX_MSR_EFER, &msr->flags);
199 wrmsrl(MSR_EFER, msr_content);
200 }
201 }
202 break;
204 case MSR_FS_BASE:
205 case MSR_GS_BASE:
206 if (!(VMX_LONG_GUEST(vc)))
207 domain_crash();
208 if (!IS_CANO_ADDRESS(msr_content)){
209 VMX_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
210 vmx_inject_exception(vc, TRAP_gp_fault, 0);
211 }
212 if (regs->ecx == MSR_FS_BASE)
213 __vmwrite(GUEST_FS_BASE, msr_content);
214 else
215 __vmwrite(GUEST_GS_BASE, msr_content);
216 break;
218 case MSR_SHADOW_GS_BASE:
219 if (!(VMX_LONG_GUEST(vc)))
220 domain_crash();
221 vc->arch.arch_vmx.msr_content.shadow_gs = msr_content;
222 wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
223 break;
225 CASE_WRITE_MSR(STAR);
226 CASE_WRITE_MSR(LSTAR);
227 CASE_WRITE_MSR(CSTAR);
228 CASE_WRITE_MSR(SYSCALL_MASK);
229 default:
230 return 0;
231 }
232 return 1;
233 }
235 void
236 vmx_restore_msrs(struct vcpu *d)
237 {
238 int i = 0;
239 struct msr_state *guest_state;
240 struct msr_state *host_state;
241 unsigned long guest_flags ;
243 guest_state = &d->arch.arch_vmx.msr_content;;
244 host_state = &percpu_msr[smp_processor_id()];
246 wrmsrl(MSR_SHADOW_GS_BASE, guest_state->shadow_gs);
247 guest_flags = guest_state->flags;
248 if (!guest_flags)
249 return;
251 while (guest_flags){
252 i = find_first_set_bit(guest_flags);
254 VMX_DBG_LOG(DBG_LEVEL_2,
255 "restore guest's index %d msr %lx with %lx\n",
256 i, (unsigned long) msr_data_index[i], (unsigned long) guest_state->msr_items[i]);
257 set_bit(i, &host_state->flags);
258 wrmsrl(msr_data_index[i], guest_state->msr_items[i]);
259 clear_bit(i, &guest_flags);
260 }
261 }
263 #else /* __i386__ */
264 #define vmx_save_init_msrs() ((void)0)
266 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs){
267 return 0;
268 }
269 static inline int long_mode_do_msr_write(struct cpu_user_regs *regs){
270 return 0;
271 }
272 #endif
274 extern long evtchn_send(int lport);
275 extern long do_block(void);
276 void do_nmi(struct cpu_user_regs *, unsigned long);
278 static int check_vmx_controls(ctrls, msr)
279 {
280 u32 vmx_msr_low, vmx_msr_high;
282 rdmsr(msr, vmx_msr_low, vmx_msr_high);
283 if (ctrls < vmx_msr_low || ctrls > vmx_msr_high) {
284 printk("Insufficient VMX capability 0x%x, "
285 "msr=0x%x,low=0x%8x,high=0x%x\n",
286 ctrls, msr, vmx_msr_low, vmx_msr_high);
287 return 0;
288 }
289 return 1;
290 }
292 int start_vmx(void)
293 {
294 struct vmcs_struct *vmcs;
295 u32 ecx;
296 u32 eax, edx;
297 u64 phys_vmcs; /* debugging */
299 /*
300 * Xen does not fill x86_capability words except 0.
301 */
302 ecx = cpuid_ecx(1);
303 boot_cpu_data.x86_capability[4] = ecx;
305 if (!(test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability)))
306 return 0;
308 rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx);
310 if (eax & IA32_FEATURE_CONTROL_MSR_LOCK) {
311 if ((eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) == 0x0) {
312 printk("VMX disabled by Feature Control MSR.\n");
313 return 0;
314 }
315 }
316 else {
317 wrmsr(IA32_FEATURE_CONTROL_MSR,
318 IA32_FEATURE_CONTROL_MSR_LOCK |
319 IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON, 0);
320 }
322 if (!check_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS,
323 MSR_IA32_VMX_PINBASED_CTLS_MSR))
324 return 0;
325 if (!check_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS,
326 MSR_IA32_VMX_PROCBASED_CTLS_MSR))
327 return 0;
328 if (!check_vmx_controls(MONITOR_VM_EXIT_CONTROLS,
329 MSR_IA32_VMX_EXIT_CTLS_MSR))
330 return 0;
331 if (!check_vmx_controls(MONITOR_VM_ENTRY_CONTROLS,
332 MSR_IA32_VMX_ENTRY_CTLS_MSR))
333 return 0;
335 set_in_cr4(X86_CR4_VMXE); /* Enable VMXE */
337 if (!(vmcs = alloc_vmcs())) {
338 printk("Failed to allocate VMCS\n");
339 return 0;
340 }
342 phys_vmcs = (u64) virt_to_phys(vmcs);
344 if (!(__vmxon(phys_vmcs))) {
345 printk("VMXON is done\n");
346 }
348 vmx_save_init_msrs();
350 hvm_enabled = 1;
352 return 1;
353 }
355 void stop_vmx(void)
356 {
357 if (read_cr4() & X86_CR4_VMXE)
358 __vmxoff();
359 }
361 /*
362 * Not all cases receive valid value in the VM-exit instruction length field.
363 */
364 #define __get_instruction_length(len) \
365 __vmread(VM_EXIT_INSTRUCTION_LEN, &(len)); \
366 if ((len) < 1 || (len) > 15) \
367 __vmx_bug(&regs);
369 static void inline __update_guest_eip(unsigned long inst_len)
370 {
371 unsigned long current_eip;
373 __vmread(GUEST_RIP, &current_eip);
374 __vmwrite(GUEST_RIP, current_eip + inst_len);
375 }
378 static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs)
379 {
380 unsigned long eip;
381 unsigned long gpa; /* FIXME: PAE */
382 int result;
384 #if VMX_DEBUG
385 {
386 __vmread(GUEST_RIP, &eip);
387 VMX_DBG_LOG(DBG_LEVEL_VMMU,
388 "vmx_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
389 va, eip, (unsigned long)regs->error_code);
390 }
391 #endif
393 if (!vmx_paging_enabled(current)){
394 handle_mmio(va, va);
395 TRACE_VMEXIT (2,2);
396 return 1;
397 }
398 gpa = gva_to_gpa(va);
400 /* Use 1:1 page table to identify MMIO address space */
401 if ( mmio_space(gpa) ){
402 if (gpa >= 0xFEE00000) { /* workaround for local APIC */
403 u32 inst_len;
404 __vmread(VM_EXIT_INSTRUCTION_LEN, &(inst_len));
405 __update_guest_eip(inst_len);
406 return 1;
407 }
408 TRACE_VMEXIT (2,2);
409 handle_mmio(va, gpa);
410 return 1;
411 }
413 result = shadow_fault(va, regs);
414 TRACE_VMEXIT (2,result);
415 #if 0
416 if ( !result )
417 {
418 __vmread(GUEST_RIP, &eip);
419 printk("vmx pgfault to guest va=%lx eip=%lx\n", va, eip);
420 }
421 #endif
423 return result;
424 }
426 static void vmx_do_no_device_fault(void)
427 {
428 unsigned long cr0;
430 clts();
431 setup_fpu(current);
432 __vmread(CR0_READ_SHADOW, &cr0);
433 if (!(cr0 & X86_CR0_TS)) {
434 __vmread(GUEST_CR0, &cr0);
435 cr0 &= ~X86_CR0_TS;
436 __vmwrite(GUEST_CR0, cr0);
437 }
438 __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
439 }
442 static void vmx_vmexit_do_cpuid(unsigned long input, struct cpu_user_regs *regs)
443 {
444 unsigned int eax, ebx, ecx, edx;
445 unsigned long eip;
447 __vmread(GUEST_RIP, &eip);
449 VMX_DBG_LOG(DBG_LEVEL_1,
450 "do_cpuid: (eax) %lx, (ebx) %lx, (ecx) %lx, (edx) %lx,"
451 " (esi) %lx, (edi) %lx",
452 (unsigned long)regs->eax, (unsigned long)regs->ebx,
453 (unsigned long)regs->ecx, (unsigned long)regs->edx,
454 (unsigned long)regs->esi, (unsigned long)regs->edi);
456 cpuid(input, &eax, &ebx, &ecx, &edx);
458 if (input == 1) {
459 #ifdef __i386__
460 clear_bit(X86_FEATURE_PSE, &edx);
461 clear_bit(X86_FEATURE_PAE, &edx);
462 clear_bit(X86_FEATURE_PSE36, &edx);
463 #else
464 struct vcpu *d = current;
465 if (d->domain->arch.ops->guest_paging_levels == PAGING_L2)
466 {
467 clear_bit(X86_FEATURE_PSE, &edx);
468 clear_bit(X86_FEATURE_PAE, &edx);
469 clear_bit(X86_FEATURE_PSE36, &edx);
470 }
471 #endif
473 }
475 regs->eax = (unsigned long) eax;
476 regs->ebx = (unsigned long) ebx;
477 regs->ecx = (unsigned long) ecx;
478 regs->edx = (unsigned long) edx;
480 VMX_DBG_LOG(DBG_LEVEL_1,
481 "vmx_vmexit_do_cpuid: eip: %lx, input: %lx, out:eax=%x, ebx=%x, ecx=%x, edx=%x",
482 eip, input, eax, ebx, ecx, edx);
484 }
486 #define CASE_GET_REG_P(REG, reg) \
487 case REG_ ## REG: reg_p = (unsigned long *)&(regs->reg); break
489 static void vmx_dr_access (unsigned long exit_qualification, struct cpu_user_regs *regs)
490 {
491 unsigned int reg;
492 unsigned long *reg_p = 0;
493 struct vcpu *v = current;
494 unsigned long eip;
496 __vmread(GUEST_RIP, &eip);
498 reg = exit_qualification & DEBUG_REG_ACCESS_NUM;
500 VMX_DBG_LOG(DBG_LEVEL_1,
501 "vmx_dr_access : eip=%lx, reg=%d, exit_qualification = %lx",
502 eip, reg, exit_qualification);
504 switch(exit_qualification & DEBUG_REG_ACCESS_REG) {
505 CASE_GET_REG_P(EAX, eax);
506 CASE_GET_REG_P(ECX, ecx);
507 CASE_GET_REG_P(EDX, edx);
508 CASE_GET_REG_P(EBX, ebx);
509 CASE_GET_REG_P(EBP, ebp);
510 CASE_GET_REG_P(ESI, esi);
511 CASE_GET_REG_P(EDI, edi);
512 case REG_ESP:
513 break;
514 default:
515 __vmx_bug(regs);
516 }
518 switch (exit_qualification & DEBUG_REG_ACCESS_TYPE) {
519 case TYPE_MOV_TO_DR:
520 /* don't need to check the range */
521 if (reg != REG_ESP)
522 v->arch.guest_context.debugreg[reg] = *reg_p;
523 else {
524 unsigned long value;
525 __vmread(GUEST_RSP, &value);
526 v->arch.guest_context.debugreg[reg] = value;
527 }
528 break;
529 case TYPE_MOV_FROM_DR:
530 if (reg != REG_ESP)
531 *reg_p = v->arch.guest_context.debugreg[reg];
532 else {
533 __vmwrite(GUEST_RSP, v->arch.guest_context.debugreg[reg]);
534 }
535 break;
536 }
537 }
539 /*
540 * Invalidate the TLB for va. Invalidate the shadow page corresponding
541 * the address va.
542 */
543 static void vmx_vmexit_do_invlpg(unsigned long va)
544 {
545 unsigned long eip;
546 struct vcpu *v = current;
548 __vmread(GUEST_RIP, &eip);
550 VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_vmexit_do_invlpg: eip=%lx, va=%lx",
551 eip, va);
553 /*
554 * We do the safest things first, then try to update the shadow
555 * copying from guest
556 */
557 shadow_invlpg(v, va);
558 }
560 static int check_for_null_selector(unsigned long eip)
561 {
562 unsigned char inst[MAX_INST_LEN];
563 unsigned long sel;
564 int i, inst_len;
565 int inst_copy_from_guest(unsigned char *, unsigned long, int);
567 __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
568 memset(inst, 0, MAX_INST_LEN);
569 if (inst_copy_from_guest(inst, eip, inst_len) != inst_len) {
570 printf("check_for_null_selector: get guest instruction failed\n");
571 domain_crash_synchronous();
572 }
574 for (i = 0; i < inst_len; i++) {
575 switch (inst[i]) {
576 case 0xf3: /* REPZ */
577 case 0xf2: /* REPNZ */
578 case 0xf0: /* LOCK */
579 case 0x66: /* data32 */
580 case 0x67: /* addr32 */
581 continue;
582 case 0x2e: /* CS */
583 __vmread(GUEST_CS_SELECTOR, &sel);
584 break;
585 case 0x36: /* SS */
586 __vmread(GUEST_SS_SELECTOR, &sel);
587 break;
588 case 0x26: /* ES */
589 __vmread(GUEST_ES_SELECTOR, &sel);
590 break;
591 case 0x64: /* FS */
592 __vmread(GUEST_FS_SELECTOR, &sel);
593 break;
594 case 0x65: /* GS */
595 __vmread(GUEST_GS_SELECTOR, &sel);
596 break;
597 case 0x3e: /* DS */
598 /* FALLTHROUGH */
599 default:
600 /* DS is the default */
601 __vmread(GUEST_DS_SELECTOR, &sel);
602 }
603 return sel == 0 ? 1 : 0;
604 }
606 return 0;
607 }
609 void send_pio_req(struct cpu_user_regs *regs, unsigned long port,
610 unsigned long count, int size, long value, int dir, int pvalid)
611 {
612 struct vcpu *v = current;
613 vcpu_iodata_t *vio;
614 ioreq_t *p;
616 vio = get_vio(v->domain, v->vcpu_id);
617 if (vio == NULL) {
618 printk("bad shared page: %lx\n", (unsigned long) vio);
619 domain_crash_synchronous();
620 }
622 if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
623 printf("VMX I/O has not yet completed\n");
624 domain_crash_synchronous();
625 }
626 set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
628 p = &vio->vp_ioreq;
629 p->dir = dir;
630 p->pdata_valid = pvalid;
632 p->type = IOREQ_TYPE_PIO;
633 p->size = size;
634 p->addr = port;
635 p->count = count;
636 p->df = regs->eflags & EF_DF ? 1 : 0;
638 if (pvalid) {
639 if (vmx_paging_enabled(current))
640 p->u.pdata = (void *) gva_to_gpa(value);
641 else
642 p->u.pdata = (void *) value; /* guest VA == guest PA */
643 } else
644 p->u.data = value;
646 p->state = STATE_IOREQ_READY;
648 if (vmx_portio_intercept(p)) {
649 /* no blocking & no evtchn notification */
650 clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
651 return;
652 }
654 evtchn_send(iopacket_port(v->domain));
655 vmx_wait_io();
656 }
658 static void vmx_io_instruction(struct cpu_user_regs *regs,
659 unsigned long exit_qualification, unsigned long inst_len)
660 {
661 struct mi_per_cpu_info *mpcip;
662 unsigned long eip, cs, eflags;
663 unsigned long port, size, dir;
664 int vm86;
666 mpcip = &current->domain->arch.vmx_platform.mpci;
667 mpcip->instr = INSTR_PIO;
668 mpcip->flags = 0;
670 __vmread(GUEST_RIP, &eip);
671 __vmread(GUEST_CS_SELECTOR, &cs);
672 __vmread(GUEST_RFLAGS, &eflags);
673 vm86 = eflags & X86_EFLAGS_VM ? 1 : 0;
675 VMX_DBG_LOG(DBG_LEVEL_1,
676 "vmx_io_instruction: vm86 %d, eip=%lx:%lx, "
677 "exit_qualification = %lx",
678 vm86, cs, eip, exit_qualification);
680 if (test_bit(6, &exit_qualification))
681 port = (exit_qualification >> 16) & 0xFFFF;
682 else
683 port = regs->edx & 0xffff;
684 TRACE_VMEXIT(2, port);
685 size = (exit_qualification & 7) + 1;
686 dir = test_bit(3, &exit_qualification); /* direction */
688 if (test_bit(4, &exit_qualification)) { /* string instruction */
689 unsigned long addr, count = 1;
690 int sign = regs->eflags & EF_DF ? -1 : 1;
692 __vmread(GUEST_LINEAR_ADDRESS, &addr);
694 /*
695 * In protected mode, guest linear address is invalid if the
696 * selector is null.
697 */
698 if (!vm86 && check_for_null_selector(eip))
699 addr = dir == IOREQ_WRITE ? regs->esi : regs->edi;
701 if (test_bit(5, &exit_qualification)) { /* "rep" prefix */
702 mpcip->flags |= REPZ;
703 count = vm86 ? regs->ecx & 0xFFFF : regs->ecx;
704 }
706 /*
707 * Handle string pio instructions that cross pages or that
708 * are unaligned. See the comments in vmx_platform.c/handle_mmio()
709 */
710 if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) {
711 unsigned long value = 0;
713 mpcip->flags |= OVERLAP;
714 if (dir == IOREQ_WRITE)
715 vmx_copy(&value, addr, size, VMX_COPY_IN);
716 send_pio_req(regs, port, 1, size, value, dir, 0);
717 } else {
718 if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK)) {
719 if (sign > 0)
720 count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;
721 else
722 count = (addr & ~PAGE_MASK) / size;
723 } else
724 __update_guest_eip(inst_len);
726 send_pio_req(regs, port, count, size, addr, dir, 1);
727 }
728 } else {
729 __update_guest_eip(inst_len);
730 send_pio_req(regs, port, 1, size, regs->eax, dir, 0);
731 }
732 }
734 int
735 vmx_copy(void *buf, unsigned long laddr, int size, int dir)
736 {
737 unsigned long gpa, mfn;
738 char *addr;
739 int count;
741 while (size > 0) {
742 count = PAGE_SIZE - (laddr & ~PAGE_MASK);
743 if (count > size)
744 count = size;
746 if (vmx_paging_enabled(current)) {
747 gpa = gva_to_gpa(laddr);
748 mfn = get_mfn_from_pfn(gpa >> PAGE_SHIFT);
749 } else
750 mfn = get_mfn_from_pfn(laddr >> PAGE_SHIFT);
751 if (mfn == INVALID_MFN)
752 return 0;
754 addr = (char *)map_domain_page(mfn) + (laddr & ~PAGE_MASK);
756 if (dir == VMX_COPY_IN)
757 memcpy(buf, addr, count);
758 else
759 memcpy(addr, buf, count);
761 unmap_domain_page(addr);
763 laddr += count;
764 buf += count;
765 size -= count;
766 }
768 return 1;
769 }
771 int
772 vmx_world_save(struct vcpu *d, struct vmx_assist_context *c)
773 {
774 unsigned long inst_len;
775 int error = 0;
777 error |= __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
778 error |= __vmread(GUEST_RIP, &c->eip);
779 c->eip += inst_len; /* skip transition instruction */
780 error |= __vmread(GUEST_RSP, &c->esp);
781 error |= __vmread(GUEST_RFLAGS, &c->eflags);
783 error |= __vmread(CR0_READ_SHADOW, &c->cr0);
784 c->cr3 = d->arch.arch_vmx.cpu_cr3;
785 error |= __vmread(CR4_READ_SHADOW, &c->cr4);
787 error |= __vmread(GUEST_IDTR_LIMIT, &c->idtr_limit);
788 error |= __vmread(GUEST_IDTR_BASE, &c->idtr_base);
790 error |= __vmread(GUEST_GDTR_LIMIT, &c->gdtr_limit);
791 error |= __vmread(GUEST_GDTR_BASE, &c->gdtr_base);
793 error |= __vmread(GUEST_CS_SELECTOR, &c->cs_sel);
794 error |= __vmread(GUEST_CS_LIMIT, &c->cs_limit);
795 error |= __vmread(GUEST_CS_BASE, &c->cs_base);
796 error |= __vmread(GUEST_CS_AR_BYTES, &c->cs_arbytes.bytes);
798 error |= __vmread(GUEST_DS_SELECTOR, &c->ds_sel);
799 error |= __vmread(GUEST_DS_LIMIT, &c->ds_limit);
800 error |= __vmread(GUEST_DS_BASE, &c->ds_base);
801 error |= __vmread(GUEST_DS_AR_BYTES, &c->ds_arbytes.bytes);
803 error |= __vmread(GUEST_ES_SELECTOR, &c->es_sel);
804 error |= __vmread(GUEST_ES_LIMIT, &c->es_limit);
805 error |= __vmread(GUEST_ES_BASE, &c->es_base);
806 error |= __vmread(GUEST_ES_AR_BYTES, &c->es_arbytes.bytes);
808 error |= __vmread(GUEST_SS_SELECTOR, &c->ss_sel);
809 error |= __vmread(GUEST_SS_LIMIT, &c->ss_limit);
810 error |= __vmread(GUEST_SS_BASE, &c->ss_base);
811 error |= __vmread(GUEST_SS_AR_BYTES, &c->ss_arbytes.bytes);
813 error |= __vmread(GUEST_FS_SELECTOR, &c->fs_sel);
814 error |= __vmread(GUEST_FS_LIMIT, &c->fs_limit);
815 error |= __vmread(GUEST_FS_BASE, &c->fs_base);
816 error |= __vmread(GUEST_FS_AR_BYTES, &c->fs_arbytes.bytes);
818 error |= __vmread(GUEST_GS_SELECTOR, &c->gs_sel);
819 error |= __vmread(GUEST_GS_LIMIT, &c->gs_limit);
820 error |= __vmread(GUEST_GS_BASE, &c->gs_base);
821 error |= __vmread(GUEST_GS_AR_BYTES, &c->gs_arbytes.bytes);
823 error |= __vmread(GUEST_TR_SELECTOR, &c->tr_sel);
824 error |= __vmread(GUEST_TR_LIMIT, &c->tr_limit);
825 error |= __vmread(GUEST_TR_BASE, &c->tr_base);
826 error |= __vmread(GUEST_TR_AR_BYTES, &c->tr_arbytes.bytes);
828 error |= __vmread(GUEST_LDTR_SELECTOR, &c->ldtr_sel);
829 error |= __vmread(GUEST_LDTR_LIMIT, &c->ldtr_limit);
830 error |= __vmread(GUEST_LDTR_BASE, &c->ldtr_base);
831 error |= __vmread(GUEST_LDTR_AR_BYTES, &c->ldtr_arbytes.bytes);
833 return !error;
834 }
836 int
837 vmx_world_restore(struct vcpu *d, struct vmx_assist_context *c)
838 {
839 unsigned long mfn, old_cr4;
840 int error = 0;
842 error |= __vmwrite(GUEST_RIP, c->eip);
843 error |= __vmwrite(GUEST_RSP, c->esp);
844 error |= __vmwrite(GUEST_RFLAGS, c->eflags);
846 error |= __vmwrite(CR0_READ_SHADOW, c->cr0);
848 if (!vmx_paging_enabled(d)) {
849 VMX_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
850 __vmwrite(GUEST_CR3, pagetable_get_paddr(d->domain->arch.phys_table));
851 goto skip_cr3;
852 }
854 if (c->cr3 == d->arch.arch_vmx.cpu_cr3) {
855 /*
856 * This is simple TLB flush, implying the guest has
857 * removed some translation or changed page attributes.
858 * We simply invalidate the shadow.
859 */
860 mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT);
861 if (mfn != pagetable_get_pfn(d->arch.guest_table)) {
862 printk("Invalid CR3 value=%x", c->cr3);
863 domain_crash_synchronous();
864 return 0;
865 }
866 shadow_sync_all(d->domain);
867 } else {
868 /*
869 * If different, make a shadow. Check if the PDBR is valid
870 * first.
871 */
872 VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %x", c->cr3);
873 if ((c->cr3 >> PAGE_SHIFT) > d->domain->max_pages) {
874 printk("Invalid CR3 value=%x", c->cr3);
875 domain_crash_synchronous();
876 return 0;
877 }
878 mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT);
879 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
880 update_pagetables(d);
881 /*
882 * arch.shadow_table should now hold the next CR3 for shadow
883 */
884 d->arch.arch_vmx.cpu_cr3 = c->cr3;
885 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
886 __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
887 }
889 skip_cr3:
891 error |= __vmread(CR4_READ_SHADOW, &old_cr4);
892 error |= __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
893 error |= __vmwrite(CR4_READ_SHADOW, c->cr4);
895 error |= __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
896 error |= __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
898 error |= __vmwrite(GUEST_GDTR_LIMIT, c->gdtr_limit);
899 error |= __vmwrite(GUEST_GDTR_BASE, c->gdtr_base);
901 error |= __vmwrite(GUEST_CS_SELECTOR, c->cs_sel);
902 error |= __vmwrite(GUEST_CS_LIMIT, c->cs_limit);
903 error |= __vmwrite(GUEST_CS_BASE, c->cs_base);
904 error |= __vmwrite(GUEST_CS_AR_BYTES, c->cs_arbytes.bytes);
906 error |= __vmwrite(GUEST_DS_SELECTOR, c->ds_sel);
907 error |= __vmwrite(GUEST_DS_LIMIT, c->ds_limit);
908 error |= __vmwrite(GUEST_DS_BASE, c->ds_base);
909 error |= __vmwrite(GUEST_DS_AR_BYTES, c->ds_arbytes.bytes);
911 error |= __vmwrite(GUEST_ES_SELECTOR, c->es_sel);
912 error |= __vmwrite(GUEST_ES_LIMIT, c->es_limit);
913 error |= __vmwrite(GUEST_ES_BASE, c->es_base);
914 error |= __vmwrite(GUEST_ES_AR_BYTES, c->es_arbytes.bytes);
916 error |= __vmwrite(GUEST_SS_SELECTOR, c->ss_sel);
917 error |= __vmwrite(GUEST_SS_LIMIT, c->ss_limit);
918 error |= __vmwrite(GUEST_SS_BASE, c->ss_base);
919 error |= __vmwrite(GUEST_SS_AR_BYTES, c->ss_arbytes.bytes);
921 error |= __vmwrite(GUEST_FS_SELECTOR, c->fs_sel);
922 error |= __vmwrite(GUEST_FS_LIMIT, c->fs_limit);
923 error |= __vmwrite(GUEST_FS_BASE, c->fs_base);
924 error |= __vmwrite(GUEST_FS_AR_BYTES, c->fs_arbytes.bytes);
926 error |= __vmwrite(GUEST_GS_SELECTOR, c->gs_sel);
927 error |= __vmwrite(GUEST_GS_LIMIT, c->gs_limit);
928 error |= __vmwrite(GUEST_GS_BASE, c->gs_base);
929 error |= __vmwrite(GUEST_GS_AR_BYTES, c->gs_arbytes.bytes);
931 error |= __vmwrite(GUEST_TR_SELECTOR, c->tr_sel);
932 error |= __vmwrite(GUEST_TR_LIMIT, c->tr_limit);
933 error |= __vmwrite(GUEST_TR_BASE, c->tr_base);
934 error |= __vmwrite(GUEST_TR_AR_BYTES, c->tr_arbytes.bytes);
936 error |= __vmwrite(GUEST_LDTR_SELECTOR, c->ldtr_sel);
937 error |= __vmwrite(GUEST_LDTR_LIMIT, c->ldtr_limit);
938 error |= __vmwrite(GUEST_LDTR_BASE, c->ldtr_base);
939 error |= __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes.bytes);
941 return !error;
942 }
944 enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE };
946 int
947 vmx_assist(struct vcpu *d, int mode)
948 {
949 struct vmx_assist_context c;
950 u32 magic;
951 u32 cp;
953 /* make sure vmxassist exists (this is not an error) */
954 if (!vmx_copy(&magic, VMXASSIST_MAGIC_OFFSET, sizeof(magic), VMX_COPY_IN))
955 return 0;
956 if (magic != VMXASSIST_MAGIC)
957 return 0;
959 switch (mode) {
960 /*
961 * Transfer control to vmxassist.
962 * Store the current context in VMXASSIST_OLD_CONTEXT and load
963 * the new VMXASSIST_NEW_CONTEXT context. This context was created
964 * by vmxassist and will transfer control to it.
965 */
966 case VMX_ASSIST_INVOKE:
967 /* save the old context */
968 if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), VMX_COPY_IN))
969 goto error;
970 if (cp != 0) {
971 if (!vmx_world_save(d, &c))
972 goto error;
973 if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_OUT))
974 goto error;
975 }
977 /* restore the new context, this should activate vmxassist */
978 if (!vmx_copy(&cp, VMXASSIST_NEW_CONTEXT, sizeof(cp), VMX_COPY_IN))
979 goto error;
980 if (cp != 0) {
981 if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_IN))
982 goto error;
983 if (!vmx_world_restore(d, &c))
984 goto error;
985 return 1;
986 }
987 break;
989 /*
990 * Restore the VMXASSIST_OLD_CONTEXT that was saved by VMX_ASSIST_INVOKE
991 * above.
992 */
993 case VMX_ASSIST_RESTORE:
994 /* save the old context */
995 if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), VMX_COPY_IN))
996 goto error;
997 if (cp != 0) {
998 if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_IN))
999 goto error;
1000 if (!vmx_world_restore(d, &c))
1001 goto error;
1002 return 1;
1004 break;
1007 error:
1008 printf("Failed to transfer to vmxassist\n");
1009 domain_crash_synchronous();
1010 return 0;
1013 static int vmx_set_cr0(unsigned long value)
1015 struct vcpu *d = current;
1016 unsigned long mfn;
1017 unsigned long eip;
1018 int paging_enabled;
1019 unsigned long vm_entry_value;
1020 /*
1021 * CR0: We don't want to lose PE and PG.
1022 */
1023 paging_enabled = vmx_paging_enabled(d);
1024 __vmwrite(GUEST_CR0, (value | X86_CR0_PE | X86_CR0_PG));
1025 __vmwrite(CR0_READ_SHADOW, value);
1027 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value);
1029 if ((value & X86_CR0_PE) && (value & X86_CR0_PG) && !paging_enabled) {
1030 /*
1031 * The guest CR3 must be pointing to the guest physical.
1032 */
1033 if ( !VALID_MFN(mfn = get_mfn_from_pfn(
1034 d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
1035 !get_page(pfn_to_page(mfn), d->domain) )
1037 printk("Invalid CR3 value = %lx", d->arch.arch_vmx.cpu_cr3);
1038 domain_crash_synchronous(); /* need to take a clean path */
1041 #if defined(__x86_64__)
1042 if (test_bit(VMX_CPU_STATE_LME_ENABLED,
1043 &d->arch.arch_vmx.cpu_state) &&
1044 !test_bit(VMX_CPU_STATE_PAE_ENABLED,
1045 &d->arch.arch_vmx.cpu_state)){
1046 VMX_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable\n");
1047 vmx_inject_exception(d, TRAP_gp_fault, 0);
1049 if (test_bit(VMX_CPU_STATE_LME_ENABLED,
1050 &d->arch.arch_vmx.cpu_state)){
1051 /* Here the PAE is should to be opened */
1052 VMX_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode\n");
1053 set_bit(VMX_CPU_STATE_LMA_ENABLED,
1054 &d->arch.arch_vmx.cpu_state);
1055 __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
1056 vm_entry_value |= VM_ENTRY_CONTROLS_IA32E_MODE;
1057 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
1059 #if CONFIG_PAGING_LEVELS >= 4
1060 if(!shadow_set_guest_paging_levels(d->domain, 4)) {
1061 printk("Unsupported guest paging levels\n");
1062 domain_crash_synchronous(); /* need to take a clean path */
1064 #endif
1066 else
1068 #if CONFIG_PAGING_LEVELS >= 4
1069 if(!shadow_set_guest_paging_levels(d->domain, 2)) {
1070 printk("Unsupported guest paging levels\n");
1071 domain_crash_synchronous(); /* need to take a clean path */
1073 #endif
1076 unsigned long crn;
1077 /* update CR4's PAE if needed */
1078 __vmread(GUEST_CR4, &crn);
1079 if ( (!(crn & X86_CR4_PAE)) &&
1080 test_bit(VMX_CPU_STATE_PAE_ENABLED,
1081 &d->arch.arch_vmx.cpu_state)){
1082 VMX_DBG_LOG(DBG_LEVEL_1, "enable PAE on cr4\n");
1083 __vmwrite(GUEST_CR4, crn | X86_CR4_PAE);
1085 #endif
1086 /*
1087 * Now arch.guest_table points to machine physical.
1088 */
1089 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
1090 update_pagetables(d);
1092 VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
1093 (unsigned long) (mfn << PAGE_SHIFT));
1095 __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
1096 /*
1097 * arch->shadow_table should hold the next CR3 for shadow
1098 */
1099 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
1100 d->arch.arch_vmx.cpu_cr3, mfn);
1103 /*
1104 * VMX does not implement real-mode virtualization. We emulate
1105 * real-mode by performing a world switch to VMXAssist whenever
1106 * a partition disables the CR0.PE bit.
1107 */
1108 if ((value & X86_CR0_PE) == 0) {
1109 if ( value & X86_CR0_PG ) {
1110 /* inject GP here */
1111 vmx_inject_exception(d, TRAP_gp_fault, 0);
1112 return 0;
1113 } else {
1114 /*
1115 * Disable paging here.
1116 * Same to PE == 1 && PG == 0
1117 */
1118 if (test_bit(VMX_CPU_STATE_LMA_ENABLED,
1119 &d->arch.arch_vmx.cpu_state)){
1120 clear_bit(VMX_CPU_STATE_LMA_ENABLED,
1121 &d->arch.arch_vmx.cpu_state);
1122 __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
1123 vm_entry_value &= ~VM_ENTRY_CONTROLS_IA32E_MODE;
1124 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
1127 __vmread(GUEST_RIP, &eip);
1128 VMX_DBG_LOG(DBG_LEVEL_1,
1129 "Disabling CR0.PE at %%eip 0x%lx\n", eip);
1130 if (vmx_assist(d, VMX_ASSIST_INVOKE)) {
1131 set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &d->arch.arch_vmx.cpu_state);
1132 __vmread(GUEST_RIP, &eip);
1133 VMX_DBG_LOG(DBG_LEVEL_1,
1134 "Transfering control to vmxassist %%eip 0x%lx\n", eip);
1135 return 0; /* do not update eip! */
1137 } else if (test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
1138 &d->arch.arch_vmx.cpu_state)) {
1139 __vmread(GUEST_RIP, &eip);
1140 VMX_DBG_LOG(DBG_LEVEL_1,
1141 "Enabling CR0.PE at %%eip 0x%lx\n", eip);
1142 if (vmx_assist(d, VMX_ASSIST_RESTORE)) {
1143 clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
1144 &d->arch.arch_vmx.cpu_state);
1145 __vmread(GUEST_RIP, &eip);
1146 VMX_DBG_LOG(DBG_LEVEL_1,
1147 "Restoring to %%eip 0x%lx\n", eip);
1148 return 0; /* do not update eip! */
1152 return 1;
1155 #define CASE_GET_REG(REG, reg) \
1156 case REG_ ## REG: value = regs->reg; break
1158 #define CASE_EXTEND_SET_REG \
1159 CASE_EXTEND_REG(S)
1160 #define CASE_EXTEND_GET_REG \
1161 CASE_EXTEND_REG(G)
1163 #ifdef __i386__
1164 #define CASE_EXTEND_REG(T)
1165 #else
1166 #define CASE_EXTEND_REG(T) \
1167 CASE_ ## T ## ET_REG(R8, r8); \
1168 CASE_ ## T ## ET_REG(R9, r9); \
1169 CASE_ ## T ## ET_REG(R10, r10); \
1170 CASE_ ## T ## ET_REG(R11, r11); \
1171 CASE_ ## T ## ET_REG(R12, r12); \
1172 CASE_ ## T ## ET_REG(R13, r13); \
1173 CASE_ ## T ## ET_REG(R14, r14); \
1174 CASE_ ## T ## ET_REG(R15, r15);
1175 #endif
1178 /*
1179 * Write to control registers
1180 */
1181 static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
1183 unsigned long value;
1184 unsigned long old_cr;
1185 struct vcpu *d = current;
1187 switch (gp) {
1188 CASE_GET_REG(EAX, eax);
1189 CASE_GET_REG(ECX, ecx);
1190 CASE_GET_REG(EDX, edx);
1191 CASE_GET_REG(EBX, ebx);
1192 CASE_GET_REG(EBP, ebp);
1193 CASE_GET_REG(ESI, esi);
1194 CASE_GET_REG(EDI, edi);
1195 CASE_EXTEND_GET_REG
1196 case REG_ESP:
1197 __vmread(GUEST_RSP, &value);
1198 break;
1199 default:
1200 printk("invalid gp: %d\n", gp);
1201 __vmx_bug(regs);
1204 VMX_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx,", cr, value);
1205 VMX_DBG_LOG(DBG_LEVEL_1, "current = %lx,", (unsigned long) current);
1207 switch(cr) {
1208 case 0:
1210 return vmx_set_cr0(value);
1212 case 3:
1214 unsigned long old_base_mfn, mfn;
1216 /*
1217 * If paging is not enabled yet, simply copy the value to CR3.
1218 */
1219 if (!vmx_paging_enabled(d)) {
1220 d->arch.arch_vmx.cpu_cr3 = value;
1221 break;
1224 /*
1225 * We make a new one if the shadow does not exist.
1226 */
1227 if (value == d->arch.arch_vmx.cpu_cr3) {
1228 /*
1229 * This is simple TLB flush, implying the guest has
1230 * removed some translation or changed page attributes.
1231 * We simply invalidate the shadow.
1232 */
1233 mfn = get_mfn_from_pfn(value >> PAGE_SHIFT);
1234 if (mfn != pagetable_get_pfn(d->arch.guest_table))
1235 __vmx_bug(regs);
1236 shadow_sync_all(d->domain);
1237 } else {
1238 /*
1239 * If different, make a shadow. Check if the PDBR is valid
1240 * first.
1241 */
1242 VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
1243 if ( ((value >> PAGE_SHIFT) > d->domain->max_pages ) ||
1244 !VALID_MFN(mfn = get_mfn_from_pfn(value >> PAGE_SHIFT)) ||
1245 !get_page(pfn_to_page(mfn), d->domain) )
1247 printk("Invalid CR3 value=%lx", value);
1248 domain_crash_synchronous(); /* need to take a clean path */
1250 old_base_mfn = pagetable_get_pfn(d->arch.guest_table);
1251 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
1252 if (old_base_mfn)
1253 put_page(pfn_to_page(old_base_mfn));
1254 update_pagetables(d);
1255 /*
1256 * arch.shadow_table should now hold the next CR3 for shadow
1257 */
1258 d->arch.arch_vmx.cpu_cr3 = value;
1259 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx",
1260 value);
1261 __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
1263 break;
1265 case 4:
1267 /* CR4 */
1268 unsigned long old_guest_cr;
1270 __vmread(GUEST_CR4, &old_guest_cr);
1271 if (value & X86_CR4_PAE){
1272 set_bit(VMX_CPU_STATE_PAE_ENABLED, &d->arch.arch_vmx.cpu_state);
1273 } else {
1274 if (test_bit(VMX_CPU_STATE_LMA_ENABLED,
1275 &d->arch.arch_vmx.cpu_state)){
1276 vmx_inject_exception(d, TRAP_gp_fault, 0);
1278 clear_bit(VMX_CPU_STATE_PAE_ENABLED, &d->arch.arch_vmx.cpu_state);
1281 __vmread(CR4_READ_SHADOW, &old_cr);
1283 __vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK);
1284 __vmwrite(CR4_READ_SHADOW, value);
1286 /*
1287 * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
1288 * all TLB entries except global entries.
1289 */
1290 if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE)) {
1291 shadow_sync_all(d->domain);
1293 break;
1295 default:
1296 printk("invalid cr: %d\n", gp);
1297 __vmx_bug(regs);
1300 return 1;
1303 #define CASE_SET_REG(REG, reg) \
1304 case REG_ ## REG: \
1305 regs->reg = value; \
1306 break
1308 /*
1309 * Read from control registers. CR0 and CR4 are read from the shadow.
1310 */
1311 static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
1313 unsigned long value;
1314 struct vcpu *d = current;
1316 if (cr != 3)
1317 __vmx_bug(regs);
1319 value = (unsigned long) d->arch.arch_vmx.cpu_cr3;
1321 switch (gp) {
1322 CASE_SET_REG(EAX, eax);
1323 CASE_SET_REG(ECX, ecx);
1324 CASE_SET_REG(EDX, edx);
1325 CASE_SET_REG(EBX, ebx);
1326 CASE_SET_REG(EBP, ebp);
1327 CASE_SET_REG(ESI, esi);
1328 CASE_SET_REG(EDI, edi);
1329 CASE_EXTEND_SET_REG
1330 case REG_ESP:
1331 __vmwrite(GUEST_RSP, value);
1332 regs->esp = value;
1333 break;
1334 default:
1335 printk("invalid gp: %d\n", gp);
1336 __vmx_bug(regs);
1339 VMX_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx,", cr, value);
1342 static int vmx_cr_access(unsigned long exit_qualification, struct cpu_user_regs *regs)
1344 unsigned int gp, cr;
1345 unsigned long value;
1347 switch (exit_qualification & CONTROL_REG_ACCESS_TYPE) {
1348 case TYPE_MOV_TO_CR:
1349 gp = exit_qualification & CONTROL_REG_ACCESS_REG;
1350 cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
1351 TRACE_VMEXIT(1,TYPE_MOV_TO_CR);
1352 TRACE_VMEXIT(2,cr);
1353 TRACE_VMEXIT(3,gp);
1354 return mov_to_cr(gp, cr, regs);
1355 case TYPE_MOV_FROM_CR:
1356 gp = exit_qualification & CONTROL_REG_ACCESS_REG;
1357 cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
1358 TRACE_VMEXIT(1,TYPE_MOV_FROM_CR);
1359 TRACE_VMEXIT(2,cr);
1360 TRACE_VMEXIT(3,gp);
1361 mov_from_cr(cr, gp, regs);
1362 break;
1363 case TYPE_CLTS:
1364 TRACE_VMEXIT(1,TYPE_CLTS);
1365 clts();
1366 setup_fpu(current);
1368 __vmread(GUEST_CR0, &value);
1369 value &= ~X86_CR0_TS; /* clear TS */
1370 __vmwrite(GUEST_CR0, value);
1372 __vmread(CR0_READ_SHADOW, &value);
1373 value &= ~X86_CR0_TS; /* clear TS */
1374 __vmwrite(CR0_READ_SHADOW, value);
1375 break;
1376 case TYPE_LMSW:
1377 TRACE_VMEXIT(1,TYPE_LMSW);
1378 __vmread(CR0_READ_SHADOW, &value);
1379 value = (value & ~0xF) |
1380 (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
1381 return vmx_set_cr0(value);
1382 break;
1383 default:
1384 __vmx_bug(regs);
1385 break;
1387 return 1;
1390 static inline void vmx_do_msr_read(struct cpu_user_regs *regs)
1392 u64 msr_content = 0;
1394 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read: ecx=%lx, eax=%lx, edx=%lx",
1395 (unsigned long)regs->ecx, (unsigned long)regs->eax,
1396 (unsigned long)regs->edx);
1397 switch (regs->ecx) {
1398 case MSR_IA32_SYSENTER_CS:
1399 __vmread(GUEST_SYSENTER_CS, (u32 *)&msr_content);
1400 break;
1401 case MSR_IA32_SYSENTER_ESP:
1402 __vmread(GUEST_SYSENTER_ESP, &msr_content);
1403 break;
1404 case MSR_IA32_SYSENTER_EIP:
1405 __vmread(GUEST_SYSENTER_EIP, &msr_content);
1406 break;
1407 default:
1408 if(long_mode_do_msr_read(regs))
1409 return;
1410 rdmsr_user(regs->ecx, regs->eax, regs->edx);
1411 break;
1414 regs->eax = msr_content & 0xFFFFFFFF;
1415 regs->edx = msr_content >> 32;
1417 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read returns: "
1418 "ecx=%lx, eax=%lx, edx=%lx",
1419 (unsigned long)regs->ecx, (unsigned long)regs->eax,
1420 (unsigned long)regs->edx);
1423 static inline void vmx_do_msr_write(struct cpu_user_regs *regs)
1425 u64 msr_content;
1427 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_write: ecx=%lx, eax=%lx, edx=%lx",
1428 (unsigned long)regs->ecx, (unsigned long)regs->eax,
1429 (unsigned long)regs->edx);
1431 msr_content = (regs->eax & 0xFFFFFFFF) | ((u64)regs->edx << 32);
1433 switch (regs->ecx) {
1434 case MSR_IA32_SYSENTER_CS:
1435 __vmwrite(GUEST_SYSENTER_CS, msr_content);
1436 break;
1437 case MSR_IA32_SYSENTER_ESP:
1438 __vmwrite(GUEST_SYSENTER_ESP, msr_content);
1439 break;
1440 case MSR_IA32_SYSENTER_EIP:
1441 __vmwrite(GUEST_SYSENTER_EIP, msr_content);
1442 break;
1443 default:
1444 long_mode_do_msr_write(regs);
1445 break;
1448 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_write returns: "
1449 "ecx=%lx, eax=%lx, edx=%lx",
1450 (unsigned long)regs->ecx, (unsigned long)regs->eax,
1451 (unsigned long)regs->edx);
1454 /*
1455 * Need to use this exit to reschedule
1456 */
1457 static inline void vmx_vmexit_do_hlt(void)
1459 #if VMX_DEBUG
1460 unsigned long eip;
1461 __vmread(GUEST_RIP, &eip);
1462 #endif
1463 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_hlt:eip=%lx", eip);
1464 raise_softirq(SCHEDULE_SOFTIRQ);
1467 static inline void vmx_vmexit_do_extint(struct cpu_user_regs *regs)
1469 unsigned int vector;
1470 int error;
1472 asmlinkage void do_IRQ(struct cpu_user_regs *);
1473 void smp_apic_timer_interrupt(struct cpu_user_regs *);
1474 void timer_interrupt(int, void *, struct cpu_user_regs *);
1475 void smp_event_check_interrupt(void);
1476 void smp_invalidate_interrupt(void);
1477 void smp_call_function_interrupt(void);
1478 void smp_spurious_interrupt(struct cpu_user_regs *regs);
1479 void smp_error_interrupt(struct cpu_user_regs *regs);
1481 if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
1482 && !(vector & INTR_INFO_VALID_MASK))
1483 __vmx_bug(regs);
1485 vector &= 0xff;
1486 local_irq_disable();
1488 switch(vector) {
1489 case LOCAL_TIMER_VECTOR:
1490 smp_apic_timer_interrupt(regs);
1491 break;
1492 case EVENT_CHECK_VECTOR:
1493 smp_event_check_interrupt();
1494 break;
1495 case INVALIDATE_TLB_VECTOR:
1496 smp_invalidate_interrupt();
1497 break;
1498 case CALL_FUNCTION_VECTOR:
1499 smp_call_function_interrupt();
1500 break;
1501 case SPURIOUS_APIC_VECTOR:
1502 smp_spurious_interrupt(regs);
1503 break;
1504 case ERROR_APIC_VECTOR:
1505 smp_error_interrupt(regs);
1506 break;
1507 default:
1508 regs->entry_vector = vector;
1509 do_IRQ(regs);
1510 break;
1514 static inline void vmx_vmexit_do_mwait(void)
1516 #if VMX_DEBUG
1517 unsigned long eip;
1518 __vmread(GUEST_RIP, &eip);
1519 #endif
1520 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_mwait:eip=%lx", eip);
1521 raise_softirq(SCHEDULE_SOFTIRQ);
1524 #define BUF_SIZ 256
1525 #define MAX_LINE 80
1526 char print_buf[BUF_SIZ];
1527 static int index;
1529 static void vmx_print_line(const char c, struct vcpu *d)
1532 if (index == MAX_LINE || c == '\n') {
1533 if (index == MAX_LINE) {
1534 print_buf[index++] = c;
1536 print_buf[index] = '\0';
1537 printk("(GUEST: %u) %s\n", d->domain->domain_id, (char *) &print_buf);
1538 index = 0;
1540 else
1541 print_buf[index++] = c;
1544 void save_vmx_cpu_user_regs(struct cpu_user_regs *ctxt)
1546 __vmread(GUEST_SS_SELECTOR, &ctxt->ss);
1547 __vmread(GUEST_RSP, &ctxt->esp);
1548 __vmread(GUEST_RFLAGS, &ctxt->eflags);
1549 __vmread(GUEST_CS_SELECTOR, &ctxt->cs);
1550 __vmread(GUEST_RIP, &ctxt->eip);
1552 __vmread(GUEST_GS_SELECTOR, &ctxt->gs);
1553 __vmread(GUEST_FS_SELECTOR, &ctxt->fs);
1554 __vmread(GUEST_ES_SELECTOR, &ctxt->es);
1555 __vmread(GUEST_DS_SELECTOR, &ctxt->ds);
1558 #ifdef XEN_DEBUGGER
1559 void save_cpu_user_regs(struct cpu_user_regs *regs)
1561 __vmread(GUEST_SS_SELECTOR, &regs->xss);
1562 __vmread(GUEST_RSP, &regs->esp);
1563 __vmread(GUEST_RFLAGS, &regs->eflags);
1564 __vmread(GUEST_CS_SELECTOR, &regs->xcs);
1565 __vmread(GUEST_RIP, &regs->eip);
1567 __vmread(GUEST_GS_SELECTOR, &regs->xgs);
1568 __vmread(GUEST_FS_SELECTOR, &regs->xfs);
1569 __vmread(GUEST_ES_SELECTOR, &regs->xes);
1570 __vmread(GUEST_DS_SELECTOR, &regs->xds);
1573 void restore_cpu_user_regs(struct cpu_user_regs *regs)
1575 __vmwrite(GUEST_SS_SELECTOR, regs->xss);
1576 __vmwrite(GUEST_RSP, regs->esp);
1577 __vmwrite(GUEST_RFLAGS, regs->eflags);
1578 __vmwrite(GUEST_CS_SELECTOR, regs->xcs);
1579 __vmwrite(GUEST_RIP, regs->eip);
1581 __vmwrite(GUEST_GS_SELECTOR, regs->xgs);
1582 __vmwrite(GUEST_FS_SELECTOR, regs->xfs);
1583 __vmwrite(GUEST_ES_SELECTOR, regs->xes);
1584 __vmwrite(GUEST_DS_SELECTOR, regs->xds);
1586 #endif
1588 asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
1590 unsigned int exit_reason, idtv_info_field;
1591 unsigned long exit_qualification, eip, inst_len = 0;
1592 struct vcpu *v = current;
1593 int error;
1595 if ((error = __vmread(VM_EXIT_REASON, &exit_reason)))
1596 __vmx_bug(&regs);
1598 perfc_incra(vmexits, exit_reason);
1600 __vmread(IDT_VECTORING_INFO_FIELD, &idtv_info_field);
1601 if (idtv_info_field & INTR_INFO_VALID_MASK) {
1602 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
1604 __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
1605 if (inst_len >= 1 && inst_len <= 15)
1606 __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len);
1608 if (idtv_info_field & 0x800) { /* valid error code */
1609 unsigned long error_code;
1610 __vmread(IDT_VECTORING_ERROR_CODE, &error_code);
1611 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
1614 VMX_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field);
1617 /* don't bother H/W interrutps */
1618 if (exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT &&
1619 exit_reason != EXIT_REASON_VMCALL &&
1620 exit_reason != EXIT_REASON_IO_INSTRUCTION)
1621 VMX_DBG_LOG(DBG_LEVEL_0, "exit reason = %x", exit_reason);
1623 if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
1624 printk("Failed vm entry\n");
1625 domain_crash_synchronous();
1626 return;
1629 __vmread(GUEST_RIP, &eip);
1630 TRACE_3D(TRC_VMX_VMEXIT, v->domain->domain_id, eip, exit_reason);
1631 TRACE_VMEXIT(0,exit_reason);
1633 switch (exit_reason) {
1634 case EXIT_REASON_EXCEPTION_NMI:
1636 /*
1637 * We don't set the software-interrupt exiting (INT n).
1638 * (1) We can get an exception (e.g. #PG) in the guest, or
1639 * (2) NMI
1640 */
1641 int error;
1642 unsigned int vector;
1643 unsigned long va;
1645 if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
1646 || !(vector & INTR_INFO_VALID_MASK))
1647 __vmx_bug(&regs);
1648 vector &= 0xff;
1650 TRACE_VMEXIT(1,vector);
1651 perfc_incra(cause_vector, vector);
1653 TRACE_3D(TRC_VMX_VECTOR, v->domain->domain_id, eip, vector);
1654 switch (vector) {
1655 #ifdef XEN_DEBUGGER
1656 case TRAP_debug:
1658 save_cpu_user_regs(&regs);
1659 pdb_handle_exception(1, &regs, 1);
1660 restore_cpu_user_regs(&regs);
1661 break;
1663 case TRAP_int3:
1665 save_cpu_user_regs(&regs);
1666 pdb_handle_exception(3, &regs, 1);
1667 restore_cpu_user_regs(&regs);
1668 break;
1670 #else
1671 case TRAP_debug:
1673 void store_cpu_user_regs(struct cpu_user_regs *regs);
1674 long do_sched_op(unsigned long op);
1677 store_cpu_user_regs(&regs);
1678 __vm_clear_bit(GUEST_PENDING_DBG_EXCEPTIONS, PENDING_DEBUG_EXC_BS);
1680 set_bit(_VCPUF_ctrl_pause, &current->vcpu_flags);
1681 do_sched_op(SCHEDOP_yield);
1683 break;
1685 #endif
1686 case TRAP_no_device:
1688 vmx_do_no_device_fault();
1689 break;
1691 case TRAP_page_fault:
1693 __vmread(EXIT_QUALIFICATION, &va);
1694 __vmread(VM_EXIT_INTR_ERROR_CODE, &regs.error_code);
1696 TRACE_VMEXIT(3,regs.error_code);
1697 TRACE_VMEXIT(4,va);
1699 VMX_DBG_LOG(DBG_LEVEL_VMMU,
1700 "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
1701 (unsigned long)regs.eax, (unsigned long)regs.ebx,
1702 (unsigned long)regs.ecx, (unsigned long)regs.edx,
1703 (unsigned long)regs.esi, (unsigned long)regs.edi);
1704 v->domain->arch.vmx_platform.mpci.inst_decoder_regs = &regs;
1706 if (!(error = vmx_do_page_fault(va, &regs))) {
1707 /*
1708 * Inject #PG using Interruption-Information Fields
1709 */
1710 vmx_inject_exception(v, TRAP_page_fault, regs.error_code);
1711 v->arch.arch_vmx.cpu_cr2 = va;
1712 TRACE_3D(TRC_VMX_INT, v->domain->domain_id, TRAP_page_fault, va);
1714 break;
1716 case TRAP_nmi:
1717 do_nmi(&regs, 0);
1718 break;
1719 default:
1720 vmx_reflect_exception(v);
1721 break;
1723 break;
1725 case EXIT_REASON_EXTERNAL_INTERRUPT:
1726 vmx_vmexit_do_extint(&regs);
1727 break;
1728 case EXIT_REASON_PENDING_INTERRUPT:
1729 __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
1730 MONITOR_CPU_BASED_EXEC_CONTROLS);
1731 break;
1732 case EXIT_REASON_TASK_SWITCH:
1733 __vmx_bug(&regs);
1734 break;
1735 case EXIT_REASON_CPUID:
1736 __get_instruction_length(inst_len);
1737 vmx_vmexit_do_cpuid(regs.eax, &regs);
1738 __update_guest_eip(inst_len);
1739 break;
1740 case EXIT_REASON_HLT:
1741 __get_instruction_length(inst_len);
1742 __update_guest_eip(inst_len);
1743 vmx_vmexit_do_hlt();
1744 break;
1745 case EXIT_REASON_INVLPG:
1747 unsigned long va;
1749 __vmread(EXIT_QUALIFICATION, &va);
1750 vmx_vmexit_do_invlpg(va);
1751 __get_instruction_length(inst_len);
1752 __update_guest_eip(inst_len);
1753 break;
1755 case EXIT_REASON_VMCALL:
1756 __get_instruction_length(inst_len);
1757 __vmread(GUEST_RIP, &eip);
1758 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1760 vmx_print_line(regs.eax, v); /* provides the current domain */
1761 __update_guest_eip(inst_len);
1762 break;
1763 case EXIT_REASON_CR_ACCESS:
1765 __vmread(GUEST_RIP, &eip);
1766 __get_instruction_length(inst_len);
1767 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1769 VMX_DBG_LOG(DBG_LEVEL_1, "eip = %lx, inst_len =%lx, exit_qualification = %lx",
1770 eip, inst_len, exit_qualification);
1771 if (vmx_cr_access(exit_qualification, &regs))
1772 __update_guest_eip(inst_len);
1773 TRACE_VMEXIT(3,regs.error_code);
1774 TRACE_VMEXIT(4,exit_qualification);
1775 break;
1777 case EXIT_REASON_DR_ACCESS:
1778 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1779 vmx_dr_access(exit_qualification, &regs);
1780 __get_instruction_length(inst_len);
1781 __update_guest_eip(inst_len);
1782 break;
1783 case EXIT_REASON_IO_INSTRUCTION:
1784 __vmread(EXIT_QUALIFICATION, &exit_qualification);
1785 __get_instruction_length(inst_len);
1786 vmx_io_instruction(&regs, exit_qualification, inst_len);
1787 TRACE_VMEXIT(4,exit_qualification);
1788 break;
1789 case EXIT_REASON_MSR_READ:
1790 __get_instruction_length(inst_len);
1791 vmx_do_msr_read(&regs);
1792 __update_guest_eip(inst_len);
1793 break;
1794 case EXIT_REASON_MSR_WRITE:
1795 __vmread(GUEST_RIP, &eip);
1796 vmx_do_msr_write(&regs);
1797 __get_instruction_length(inst_len);
1798 __update_guest_eip(inst_len);
1799 break;
1800 case EXIT_REASON_MWAIT_INSTRUCTION:
1801 __get_instruction_length(inst_len);
1802 __update_guest_eip(inst_len);
1803 vmx_vmexit_do_mwait();
1804 break;
1805 default:
1806 __vmx_bug(&regs); /* should not happen */
1810 asmlinkage void load_cr2(void)
1812 struct vcpu *d = current;
1814 local_irq_disable();
1815 #ifdef __i386__
1816 asm volatile("movl %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2));
1817 #else
1818 asm volatile("movq %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2));
1819 #endif
1822 #ifdef TRACE_BUFFER
1823 asmlinkage void trace_vmentry (void)
1825 TRACE_5D(TRC_VMENTRY,trace_values[current->processor][0],
1826 trace_values[current->processor][1],trace_values[current->processor][2],
1827 trace_values[current->processor][3],trace_values[current->processor][4]);
1828 TRACE_VMEXIT(0,9);
1829 TRACE_VMEXIT(1,9);
1830 TRACE_VMEXIT(2,9);
1831 TRACE_VMEXIT(3,9);
1832 TRACE_VMEXIT(4,9);
1833 return;
1835 asmlinkage void trace_vmexit (void)
1837 TRACE_3D(TRC_VMEXIT,0,0,0);
1838 return;
1840 #endif
1841 #endif /* CONFIG_VMX */
1843 /*
1844 * Local variables:
1845 * mode: C
1846 * c-set-style: "BSD"
1847 * c-basic-offset: 4
1848 * tab-width: 4
1849 * indent-tabs-mode: nil
1850 * End:
1851 */