ia64/xen-unstable

view xen/arch/x86/hvm/vmx/vmcs.c @ 19648:f0e2df69a8eb

x86 hvm: Allow cross-vendor migration

Intercept #UD and emulate SYSCALL/SYSENTER/SYSEXIT as necessary.

Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue May 26 15:01:36 2009 +0100 (2009-05-26)
parents 7d552e56d105
children 822ea2bf0c54
line source
1 /*
2 * vmcs.c: VMCS management
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
19 #include <xen/config.h>
20 #include <xen/init.h>
21 #include <xen/mm.h>
22 #include <xen/lib.h>
23 #include <xen/errno.h>
24 #include <xen/domain_page.h>
25 #include <asm/current.h>
26 #include <asm/cpufeature.h>
27 #include <asm/processor.h>
28 #include <asm/msr.h>
29 #include <asm/hvm/hvm.h>
30 #include <asm/hvm/io.h>
31 #include <asm/hvm/support.h>
32 #include <asm/hvm/vmx/vmx.h>
33 #include <asm/hvm/vmx/vmcs.h>
34 #include <asm/flushtlb.h>
35 #include <xen/event.h>
36 #include <xen/kernel.h>
37 #include <xen/keyhandler.h>
38 #include <asm/shadow.h>
39 #include <asm/tboot.h>
41 static int opt_vpid_enabled = 1;
42 boolean_param("vpid", opt_vpid_enabled);
44 /* Dynamic (run-time adjusted) execution control flags. */
45 u32 vmx_pin_based_exec_control __read_mostly;
46 u32 vmx_cpu_based_exec_control __read_mostly;
47 u32 vmx_secondary_exec_control __read_mostly;
48 u32 vmx_vmexit_control __read_mostly;
49 u32 vmx_vmentry_control __read_mostly;
50 bool_t cpu_has_vmx_ins_outs_instr_info __read_mostly;
52 static DEFINE_PER_CPU(struct vmcs_struct *, host_vmcs);
53 static DEFINE_PER_CPU(struct vmcs_struct *, current_vmcs);
54 static DEFINE_PER_CPU(struct list_head, active_vmcs_list);
56 static u32 vmcs_revision_id __read_mostly;
58 static void __init vmx_display_features(void)
59 {
60 int printed = 0;
62 printk("VMX: Supported advanced features:\n");
64 #define P(p,s) if ( p ) { printk(" - %s\n", s); printed = 1; }
65 P(cpu_has_vmx_virtualize_apic_accesses, "APIC MMIO access virtualisation");
66 P(cpu_has_vmx_tpr_shadow, "APIC TPR shadow");
67 P(cpu_has_vmx_ept, "Extended Page Tables (EPT)");
68 P(cpu_has_vmx_vpid, "Virtual-Processor Identifiers (VPID)");
69 P(cpu_has_vmx_vnmi, "Virtual NMI");
70 P(cpu_has_vmx_msr_bitmap, "MSR direct-access bitmap");
71 #undef P
73 if ( !printed )
74 printk(" - none\n");
75 }
77 static u32 adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr)
78 {
79 u32 vmx_msr_low, vmx_msr_high, ctl = ctl_min | ctl_opt;
81 rdmsr(msr, vmx_msr_low, vmx_msr_high);
83 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
84 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
86 /* Ensure minimum (required) set of control bits are supported. */
87 BUG_ON(ctl_min & ~ctl);
89 return ctl;
90 }
92 static void vmx_init_vmcs_config(void)
93 {
94 u32 vmx_basic_msr_low, vmx_basic_msr_high, min, opt;
95 u32 _vmx_pin_based_exec_control;
96 u32 _vmx_cpu_based_exec_control;
97 u32 _vmx_secondary_exec_control = 0;
98 u32 _vmx_vmexit_control;
99 u32 _vmx_vmentry_control;
101 rdmsr(MSR_IA32_VMX_BASIC, vmx_basic_msr_low, vmx_basic_msr_high);
103 min = (PIN_BASED_EXT_INTR_MASK |
104 PIN_BASED_NMI_EXITING);
105 opt = PIN_BASED_VIRTUAL_NMIS;
106 _vmx_pin_based_exec_control = adjust_vmx_controls(
107 min, opt, MSR_IA32_VMX_PINBASED_CTLS);
109 min = (CPU_BASED_HLT_EXITING |
110 CPU_BASED_INVLPG_EXITING |
111 CPU_BASED_CR3_LOAD_EXITING |
112 CPU_BASED_CR3_STORE_EXITING |
113 CPU_BASED_MONITOR_EXITING |
114 CPU_BASED_MWAIT_EXITING |
115 CPU_BASED_MOV_DR_EXITING |
116 CPU_BASED_ACTIVATE_IO_BITMAP |
117 CPU_BASED_USE_TSC_OFFSETING |
118 (opt_softtsc ? CPU_BASED_RDTSC_EXITING : 0));
119 opt = (CPU_BASED_ACTIVATE_MSR_BITMAP |
120 CPU_BASED_TPR_SHADOW |
121 CPU_BASED_MONITOR_TRAP_FLAG |
122 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
123 _vmx_cpu_based_exec_control = adjust_vmx_controls(
124 min, opt, MSR_IA32_VMX_PROCBASED_CTLS);
125 #ifdef __x86_64__
126 if ( !(_vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW) )
127 {
128 min |= CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING;
129 _vmx_cpu_based_exec_control = adjust_vmx_controls(
130 min, opt, MSR_IA32_VMX_PROCBASED_CTLS);
131 }
132 #endif
134 if ( _vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS )
135 {
136 min = 0;
137 opt = (SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
138 SECONDARY_EXEC_WBINVD_EXITING |
139 SECONDARY_EXEC_ENABLE_EPT);
140 if ( opt_vpid_enabled )
141 opt |= SECONDARY_EXEC_ENABLE_VPID;
142 _vmx_secondary_exec_control = adjust_vmx_controls(
143 min, opt, MSR_IA32_VMX_PROCBASED_CTLS2);
144 }
146 if ( _vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT )
147 {
148 /*
149 * To use EPT we expect to be able to clear certain intercepts.
150 * We check VMX_BASIC_MSR[55] to correctly handle default1 controls.
151 */
152 uint32_t must_be_one, must_be_zero, msr = MSR_IA32_VMX_PROCBASED_CTLS;
153 if ( vmx_basic_msr_high & (1u << 23) )
154 msr = MSR_IA32_VMX_TRUE_PROCBASED_CTLS;
155 rdmsr(msr, must_be_one, must_be_zero);
156 if ( must_be_one & (CPU_BASED_INVLPG_EXITING |
157 CPU_BASED_CR3_LOAD_EXITING |
158 CPU_BASED_CR3_STORE_EXITING) )
159 _vmx_secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
160 }
162 #if defined(__i386__)
163 /* If we can't virtualise APIC accesses, the TPR shadow is pointless. */
164 if ( !(_vmx_secondary_exec_control &
165 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) )
166 _vmx_cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
167 #endif
169 min = VM_EXIT_ACK_INTR_ON_EXIT;
170 opt = VM_EXIT_SAVE_GUEST_PAT | VM_EXIT_LOAD_HOST_PAT;
171 #ifdef __x86_64__
172 min |= VM_EXIT_IA32E_MODE;
173 #endif
174 _vmx_vmexit_control = adjust_vmx_controls(
175 min, opt, MSR_IA32_VMX_EXIT_CTLS);
177 min = 0;
178 opt = VM_ENTRY_LOAD_GUEST_PAT;
179 _vmx_vmentry_control = adjust_vmx_controls(
180 min, opt, MSR_IA32_VMX_ENTRY_CTLS);
182 if ( !vmx_pin_based_exec_control )
183 {
184 /* First time through. */
185 vmcs_revision_id = vmx_basic_msr_low;
186 vmx_pin_based_exec_control = _vmx_pin_based_exec_control;
187 vmx_cpu_based_exec_control = _vmx_cpu_based_exec_control;
188 vmx_secondary_exec_control = _vmx_secondary_exec_control;
189 vmx_vmexit_control = _vmx_vmexit_control;
190 vmx_vmentry_control = _vmx_vmentry_control;
191 cpu_has_vmx_ins_outs_instr_info = !!(vmx_basic_msr_high & (1U<<22));
192 vmx_display_features();
193 }
194 else
195 {
196 /* Globals are already initialised: re-check them. */
197 BUG_ON(vmcs_revision_id != vmx_basic_msr_low);
198 BUG_ON(vmx_pin_based_exec_control != _vmx_pin_based_exec_control);
199 BUG_ON(vmx_cpu_based_exec_control != _vmx_cpu_based_exec_control);
200 BUG_ON(vmx_secondary_exec_control != _vmx_secondary_exec_control);
201 BUG_ON(vmx_vmexit_control != _vmx_vmexit_control);
202 BUG_ON(vmx_vmentry_control != _vmx_vmentry_control);
203 BUG_ON(cpu_has_vmx_ins_outs_instr_info !=
204 !!(vmx_basic_msr_high & (1U<<22)));
205 }
207 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
208 BUG_ON((vmx_basic_msr_high & 0x1fff) > PAGE_SIZE);
210 #ifdef __x86_64__
211 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
212 BUG_ON(vmx_basic_msr_high & (1u<<16));
213 #endif
215 /* Require Write-Back (WB) memory type for VMCS accesses. */
216 BUG_ON(((vmx_basic_msr_high >> 18) & 15) != 6);
217 }
219 static struct vmcs_struct *vmx_alloc_vmcs(void)
220 {
221 struct vmcs_struct *vmcs;
223 if ( (vmcs = alloc_xenheap_page()) == NULL )
224 {
225 gdprintk(XENLOG_WARNING, "Failed to allocate VMCS.\n");
226 return NULL;
227 }
229 clear_page(vmcs);
230 vmcs->vmcs_revision_id = vmcs_revision_id;
232 return vmcs;
233 }
235 static void vmx_free_vmcs(struct vmcs_struct *vmcs)
236 {
237 free_xenheap_page(vmcs);
238 }
240 static void __vmx_clear_vmcs(void *info)
241 {
242 struct vcpu *v = info;
243 struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
245 /* Otherwise we can nest (vmx_cpu_down() vs. vmx_clear_vmcs()). */
246 ASSERT(!local_irq_is_enabled());
248 if ( arch_vmx->active_cpu == smp_processor_id() )
249 {
250 __vmpclear(virt_to_maddr(arch_vmx->vmcs));
252 arch_vmx->active_cpu = -1;
253 arch_vmx->launched = 0;
255 list_del(&arch_vmx->active_list);
257 if ( arch_vmx->vmcs == this_cpu(current_vmcs) )
258 this_cpu(current_vmcs) = NULL;
259 }
260 }
262 static void vmx_clear_vmcs(struct vcpu *v)
263 {
264 int cpu = v->arch.hvm_vmx.active_cpu;
266 if ( cpu != -1 )
267 on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs, v, 1, 1);
268 }
270 static void vmx_load_vmcs(struct vcpu *v)
271 {
272 unsigned long flags;
274 local_irq_save(flags);
276 if ( v->arch.hvm_vmx.active_cpu == -1 )
277 {
278 list_add(&v->arch.hvm_vmx.active_list, &this_cpu(active_vmcs_list));
279 v->arch.hvm_vmx.active_cpu = smp_processor_id();
280 }
282 ASSERT(v->arch.hvm_vmx.active_cpu == smp_processor_id());
284 __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
285 this_cpu(current_vmcs) = v->arch.hvm_vmx.vmcs;
287 local_irq_restore(flags);
288 }
290 int vmx_cpu_up(void)
291 {
292 u32 eax, edx;
293 int bios_locked, cpu = smp_processor_id();
294 u64 cr0, vmx_cr0_fixed0, vmx_cr0_fixed1;
296 BUG_ON(!(read_cr4() & X86_CR4_VMXE));
298 /*
299 * Ensure the current processor operating mode meets
300 * the requred CRO fixed bits in VMX operation.
301 */
302 cr0 = read_cr0();
303 rdmsrl(MSR_IA32_VMX_CR0_FIXED0, vmx_cr0_fixed0);
304 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx_cr0_fixed1);
305 if ( (~cr0 & vmx_cr0_fixed0) || (cr0 & ~vmx_cr0_fixed1) )
306 {
307 printk("CPU%d: some settings of host CR0 are "
308 "not allowed in VMX operation.\n", cpu);
309 return 0;
310 }
312 rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx);
314 bios_locked = !!(eax & IA32_FEATURE_CONTROL_MSR_LOCK);
315 if ( bios_locked )
316 {
317 if ( !(eax & (IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX |
318 IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX)) )
319 {
320 printk("CPU%d: VMX disabled by BIOS.\n", cpu);
321 return 0;
322 }
323 }
324 else
325 {
326 eax = IA32_FEATURE_CONTROL_MSR_LOCK;
327 eax |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX;
328 if ( test_bit(X86_FEATURE_SMXE, &boot_cpu_data.x86_capability) )
329 eax |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX;
330 wrmsr(IA32_FEATURE_CONTROL_MSR, eax, 0);
331 }
333 vmx_init_vmcs_config();
335 INIT_LIST_HEAD(&this_cpu(active_vmcs_list));
337 if ( this_cpu(host_vmcs) == NULL )
338 {
339 this_cpu(host_vmcs) = vmx_alloc_vmcs();
340 if ( this_cpu(host_vmcs) == NULL )
341 {
342 printk("CPU%d: Could not allocate host VMCS\n", cpu);
343 return 0;
344 }
345 }
347 switch ( __vmxon(virt_to_maddr(this_cpu(host_vmcs))) )
348 {
349 case -2: /* #UD or #GP */
350 if ( bios_locked &&
351 test_bit(X86_FEATURE_SMXE, &boot_cpu_data.x86_capability) &&
352 (!(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX) ||
353 !(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX)) )
354 {
355 printk("CPU%d: VMXON failed: perhaps because of TXT settings "
356 "in your BIOS configuration?\n", cpu);
357 printk(" --> Disable TXT in your BIOS unless using a secure "
358 "bootloader.\n");
359 return 0;
360 }
361 /* fall through */
362 case -1: /* CF==1 or ZF==1 */
363 printk("CPU%d: unexpected VMXON failure\n", cpu);
364 return 0;
365 case 0: /* success */
366 break;
367 default:
368 BUG();
369 }
371 ept_sync_all();
373 vpid_sync_all();
375 return 1;
376 }
378 void vmx_cpu_down(void)
379 {
380 struct list_head *active_vmcs_list = &this_cpu(active_vmcs_list);
381 unsigned long flags;
383 local_irq_save(flags);
385 while ( !list_empty(active_vmcs_list) )
386 __vmx_clear_vmcs(list_entry(active_vmcs_list->next,
387 struct vcpu, arch.hvm_vmx.active_list));
389 BUG_ON(!(read_cr4() & X86_CR4_VMXE));
390 __vmxoff();
392 local_irq_restore(flags);
393 }
395 struct foreign_vmcs {
396 struct vcpu *v;
397 unsigned int count;
398 };
399 static DEFINE_PER_CPU(struct foreign_vmcs, foreign_vmcs);
401 void vmx_vmcs_enter(struct vcpu *v)
402 {
403 struct foreign_vmcs *fv;
405 /*
406 * NB. We must *always* run an HVM VCPU on its own VMCS, except for
407 * vmx_vmcs_enter/exit critical regions.
408 */
409 if ( likely(v == current) )
410 return;
412 fv = &this_cpu(foreign_vmcs);
414 if ( fv->v == v )
415 {
416 BUG_ON(fv->count == 0);
417 }
418 else
419 {
420 BUG_ON(fv->v != NULL);
421 BUG_ON(fv->count != 0);
423 vcpu_pause(v);
424 spin_lock(&v->arch.hvm_vmx.vmcs_lock);
426 vmx_clear_vmcs(v);
427 vmx_load_vmcs(v);
429 fv->v = v;
430 }
432 fv->count++;
433 }
435 void vmx_vmcs_exit(struct vcpu *v)
436 {
437 struct foreign_vmcs *fv;
439 if ( likely(v == current) )
440 return;
442 fv = &this_cpu(foreign_vmcs);
443 BUG_ON(fv->v != v);
444 BUG_ON(fv->count == 0);
446 if ( --fv->count == 0 )
447 {
448 /* Don't confuse vmx_do_resume (for @v or @current!) */
449 vmx_clear_vmcs(v);
450 if ( is_hvm_vcpu(current) )
451 vmx_load_vmcs(current);
453 spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
454 vcpu_unpause(v);
456 fv->v = NULL;
457 }
458 }
460 struct xgt_desc {
461 unsigned short size;
462 unsigned long address __attribute__((packed));
463 };
465 static void vmx_set_host_env(struct vcpu *v)
466 {
467 unsigned int cpu = smp_processor_id();
469 __vmwrite(HOST_GDTR_BASE,
470 (unsigned long)(this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY));
471 __vmwrite(HOST_IDTR_BASE, (unsigned long)idt_tables[cpu]);
473 __vmwrite(HOST_TR_SELECTOR, TSS_ENTRY << 3);
474 __vmwrite(HOST_TR_BASE, (unsigned long)&init_tss[cpu]);
476 __vmwrite(HOST_SYSENTER_ESP, get_stack_bottom());
478 /*
479 * Skip end of cpu_user_regs when entering the hypervisor because the
480 * CPU does not save context onto the stack. SS,RSP,CS,RIP,RFLAGS,etc
481 * all get saved into the VMCS instead.
482 */
483 __vmwrite(HOST_RSP,
484 (unsigned long)&get_cpu_info()->guest_cpu_user_regs.error_code);
485 }
487 void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr)
488 {
489 unsigned long *msr_bitmap = v->arch.hvm_vmx.msr_bitmap;
491 /* VMX MSR bitmap supported? */
492 if ( msr_bitmap == NULL )
493 return;
495 /*
496 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
497 * have the write-low and read-high bitmap offsets the wrong way round.
498 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
499 */
500 if ( msr <= 0x1fff )
501 {
502 __clear_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* read-low */
503 __clear_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* write-low */
504 }
505 else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
506 {
507 msr &= 0x1fff;
508 __clear_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* read-high */
509 __clear_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* write-high */
510 }
511 }
513 static int construct_vmcs(struct vcpu *v)
514 {
515 struct domain *d = v->domain;
516 uint16_t sysenter_cs;
517 unsigned long sysenter_eip;
519 vmx_vmcs_enter(v);
521 /* VMCS controls. */
522 __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
524 v->arch.hvm_vmx.exec_control = vmx_cpu_based_exec_control;
525 v->arch.hvm_vmx.secondary_exec_control = vmx_secondary_exec_control;
527 if ( paging_mode_hap(d) )
528 {
529 v->arch.hvm_vmx.exec_control &= ~(CPU_BASED_INVLPG_EXITING |
530 CPU_BASED_CR3_LOAD_EXITING |
531 CPU_BASED_CR3_STORE_EXITING);
532 }
533 else
534 {
535 v->arch.hvm_vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
536 vmx_vmexit_control &= ~(VM_EXIT_SAVE_GUEST_PAT |
537 VM_EXIT_LOAD_HOST_PAT);
538 vmx_vmentry_control &= ~VM_ENTRY_LOAD_GUEST_PAT;
539 }
541 /* Do not enable Monitor Trap Flag unless start single step debug */
542 v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
544 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
545 __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
546 __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control);
548 if ( cpu_has_vmx_secondary_exec_control )
549 __vmwrite(SECONDARY_VM_EXEC_CONTROL,
550 v->arch.hvm_vmx.secondary_exec_control);
552 /* MSR access bitmap. */
553 if ( cpu_has_vmx_msr_bitmap )
554 {
555 unsigned long *msr_bitmap = alloc_xenheap_page();
557 if ( msr_bitmap == NULL )
558 return -ENOMEM;
560 memset(msr_bitmap, ~0, PAGE_SIZE);
561 v->arch.hvm_vmx.msr_bitmap = msr_bitmap;
562 __vmwrite(MSR_BITMAP, virt_to_maddr(msr_bitmap));
564 vmx_disable_intercept_for_msr(v, MSR_FS_BASE);
565 vmx_disable_intercept_for_msr(v, MSR_GS_BASE);
566 vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_CS);
567 vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_ESP);
568 vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP);
569 if ( cpu_has_vmx_pat && paging_mode_hap(d) )
570 vmx_disable_intercept_for_msr(v, MSR_IA32_CR_PAT);
571 }
573 /* I/O access bitmap. */
574 __vmwrite(IO_BITMAP_A, virt_to_maddr((char *)hvm_io_bitmap + 0));
575 __vmwrite(IO_BITMAP_B, virt_to_maddr((char *)hvm_io_bitmap + PAGE_SIZE));
577 /* Host data selectors. */
578 __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
579 __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS);
580 __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS);
581 __vmwrite(HOST_FS_SELECTOR, 0);
582 __vmwrite(HOST_GS_SELECTOR, 0);
583 __vmwrite(HOST_FS_BASE, 0);
584 __vmwrite(HOST_GS_BASE, 0);
586 /* Host control registers. */
587 v->arch.hvm_vmx.host_cr0 = read_cr0() | X86_CR0_TS;
588 __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
589 __vmwrite(HOST_CR4, mmu_cr4_features);
591 /* Host CS:RIP. */
592 __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
593 __vmwrite(HOST_RIP, (unsigned long)vmx_asm_vmexit_handler);
595 /* Host SYSENTER CS:RIP. */
596 rdmsrl(MSR_IA32_SYSENTER_CS, sysenter_cs);
597 __vmwrite(HOST_SYSENTER_CS, sysenter_cs);
598 rdmsrl(MSR_IA32_SYSENTER_EIP, sysenter_eip);
599 __vmwrite(HOST_SYSENTER_EIP, sysenter_eip);
601 /* MSR intercepts. */
602 __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
603 __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
604 __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
606 __vmwrite(VM_ENTRY_INTR_INFO, 0);
608 __vmwrite(CR0_GUEST_HOST_MASK, ~0UL);
609 __vmwrite(CR4_GUEST_HOST_MASK, ~0UL);
611 __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
612 __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
614 __vmwrite(CR3_TARGET_COUNT, 0);
616 __vmwrite(GUEST_ACTIVITY_STATE, 0);
618 /* Guest segment bases. */
619 __vmwrite(GUEST_ES_BASE, 0);
620 __vmwrite(GUEST_SS_BASE, 0);
621 __vmwrite(GUEST_DS_BASE, 0);
622 __vmwrite(GUEST_FS_BASE, 0);
623 __vmwrite(GUEST_GS_BASE, 0);
624 __vmwrite(GUEST_CS_BASE, 0);
626 /* Guest segment limits. */
627 __vmwrite(GUEST_ES_LIMIT, ~0u);
628 __vmwrite(GUEST_SS_LIMIT, ~0u);
629 __vmwrite(GUEST_DS_LIMIT, ~0u);
630 __vmwrite(GUEST_FS_LIMIT, ~0u);
631 __vmwrite(GUEST_GS_LIMIT, ~0u);
632 __vmwrite(GUEST_CS_LIMIT, ~0u);
634 /* Guest segment AR bytes. */
635 __vmwrite(GUEST_ES_AR_BYTES, 0xc093); /* read/write, accessed */
636 __vmwrite(GUEST_SS_AR_BYTES, 0xc093);
637 __vmwrite(GUEST_DS_AR_BYTES, 0xc093);
638 __vmwrite(GUEST_FS_AR_BYTES, 0xc093);
639 __vmwrite(GUEST_GS_AR_BYTES, 0xc093);
640 __vmwrite(GUEST_CS_AR_BYTES, 0xc09b); /* exec/read, accessed */
642 /* Guest IDT. */
643 __vmwrite(GUEST_IDTR_BASE, 0);
644 __vmwrite(GUEST_IDTR_LIMIT, 0);
646 /* Guest GDT. */
647 __vmwrite(GUEST_GDTR_BASE, 0);
648 __vmwrite(GUEST_GDTR_LIMIT, 0);
650 /* Guest LDT. */
651 __vmwrite(GUEST_LDTR_AR_BYTES, 0x0082); /* LDT */
652 __vmwrite(GUEST_LDTR_SELECTOR, 0);
653 __vmwrite(GUEST_LDTR_BASE, 0);
654 __vmwrite(GUEST_LDTR_LIMIT, 0);
656 /* Guest TSS. */
657 __vmwrite(GUEST_TR_AR_BYTES, 0x008b); /* 32-bit TSS (busy) */
658 __vmwrite(GUEST_TR_BASE, 0);
659 __vmwrite(GUEST_TR_LIMIT, 0xff);
661 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
662 __vmwrite(GUEST_DR7, 0);
663 __vmwrite(VMCS_LINK_POINTER, ~0UL);
664 #if defined(__i386__)
665 __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
666 #endif
668 __vmwrite(EXCEPTION_BITMAP,
669 HVM_TRAP_MASK
670 | (paging_mode_hap(d) ? 0 : (1U << TRAP_page_fault))
671 | (1U << TRAP_no_device)
672 | (1U << TRAP_invalid_op));
674 v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
675 hvm_update_guest_cr(v, 0);
677 v->arch.hvm_vcpu.guest_cr[4] = 0;
678 hvm_update_guest_cr(v, 4);
680 if ( cpu_has_vmx_tpr_shadow )
681 {
682 __vmwrite(VIRTUAL_APIC_PAGE_ADDR,
683 page_to_maddr(vcpu_vlapic(v)->regs_page));
684 __vmwrite(TPR_THRESHOLD, 0);
685 }
687 if ( paging_mode_hap(d) )
688 {
689 __vmwrite(EPT_POINTER, d->arch.hvm_domain.vmx.ept_control.eptp);
690 #ifdef __i386__
691 __vmwrite(EPT_POINTER_HIGH,
692 d->arch.hvm_domain.vmx.ept_control.eptp >> 32);
693 #endif
694 }
696 if ( cpu_has_vmx_vpid )
697 {
698 v->arch.hvm_vmx.vpid =
699 v->domain->arch.hvm_domain.vmx.vpid_base + v->vcpu_id;
700 __vmwrite(VIRTUAL_PROCESSOR_ID, v->arch.hvm_vmx.vpid);
701 }
703 if ( cpu_has_vmx_pat && paging_mode_hap(d) )
704 {
705 u64 host_pat, guest_pat;
707 rdmsrl(MSR_IA32_CR_PAT, host_pat);
708 guest_pat = 0x7040600070406ULL;
710 __vmwrite(HOST_PAT, host_pat);
711 __vmwrite(GUEST_PAT, guest_pat);
712 #ifdef __i386__
713 __vmwrite(HOST_PAT_HIGH, host_pat >> 32);
714 __vmwrite(GUEST_PAT_HIGH, guest_pat >> 32);
715 #endif
716 }
718 vmx_vmcs_exit(v);
720 paging_update_paging_modes(v); /* will update HOST & GUEST_CR3 as reqd */
722 vmx_vlapic_msr_changed(v);
724 return 0;
725 }
727 int vmx_read_guest_msr(u32 msr, u64 *val)
728 {
729 struct vcpu *curr = current;
730 unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
731 const struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
733 for ( i = 0; i < msr_count; i++ )
734 {
735 if ( msr_area[i].index == msr )
736 {
737 *val = msr_area[i].data;
738 return 0;
739 }
740 }
742 return -ESRCH;
743 }
745 int vmx_write_guest_msr(u32 msr, u64 val)
746 {
747 struct vcpu *curr = current;
748 unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
749 struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
751 for ( i = 0; i < msr_count; i++ )
752 {
753 if ( msr_area[i].index == msr )
754 {
755 msr_area[i].data = val;
756 return 0;
757 }
758 }
760 return -ESRCH;
761 }
763 int vmx_add_guest_msr(u32 msr)
764 {
765 struct vcpu *curr = current;
766 unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
767 struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
769 if ( msr_area == NULL )
770 {
771 if ( (msr_area = alloc_xenheap_page()) == NULL )
772 return -ENOMEM;
773 curr->arch.hvm_vmx.msr_area = msr_area;
774 __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(msr_area));
775 __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
776 }
778 for ( i = 0; i < msr_count; i++ )
779 if ( msr_area[i].index == msr )
780 return 0;
782 if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
783 return -ENOSPC;
785 msr_area[msr_count].index = msr;
786 msr_area[msr_count].mbz = 0;
787 msr_area[msr_count].data = 0;
788 curr->arch.hvm_vmx.msr_count = ++msr_count;
789 __vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count);
790 __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count);
792 return 0;
793 }
795 int vmx_add_host_load_msr(u32 msr)
796 {
797 struct vcpu *curr = current;
798 unsigned int i, msr_count = curr->arch.hvm_vmx.host_msr_count;
799 struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.host_msr_area;
801 if ( msr_area == NULL )
802 {
803 if ( (msr_area = alloc_xenheap_page()) == NULL )
804 return -ENOMEM;
805 curr->arch.hvm_vmx.host_msr_area = msr_area;
806 __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
807 }
809 for ( i = 0; i < msr_count; i++ )
810 if ( msr_area[i].index == msr )
811 return 0;
813 if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
814 return -ENOSPC;
816 msr_area[msr_count].index = msr;
817 msr_area[msr_count].mbz = 0;
818 rdmsrl(msr, msr_area[msr_count].data);
819 curr->arch.hvm_vmx.host_msr_count = ++msr_count;
820 __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count);
822 return 0;
823 }
825 int vmx_create_vmcs(struct vcpu *v)
826 {
827 struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
828 int rc;
830 if ( (arch_vmx->vmcs = vmx_alloc_vmcs()) == NULL )
831 return -ENOMEM;
833 INIT_LIST_HEAD(&arch_vmx->active_list);
834 __vmpclear(virt_to_maddr(arch_vmx->vmcs));
835 arch_vmx->active_cpu = -1;
836 arch_vmx->launched = 0;
838 if ( (rc = construct_vmcs(v)) != 0 )
839 {
840 vmx_free_vmcs(arch_vmx->vmcs);
841 return rc;
842 }
844 return 0;
845 }
847 void vmx_destroy_vmcs(struct vcpu *v)
848 {
849 struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
851 vmx_clear_vmcs(v);
853 vmx_free_vmcs(arch_vmx->vmcs);
855 free_xenheap_page(v->arch.hvm_vmx.host_msr_area);
856 free_xenheap_page(v->arch.hvm_vmx.msr_area);
857 free_xenheap_page(v->arch.hvm_vmx.msr_bitmap);
858 }
860 void vm_launch_fail(void)
861 {
862 unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
863 printk("<vm_launch_fail> error code %lx\n", error);
864 domain_crash_synchronous();
865 }
867 void vm_resume_fail(void)
868 {
869 unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
870 printk("<vm_resume_fail> error code %lx\n", error);
871 domain_crash_synchronous();
872 }
874 static void wbinvd_ipi(void *info)
875 {
876 wbinvd();
877 }
879 void vmx_do_resume(struct vcpu *v)
880 {
881 bool_t debug_state;
883 if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() )
884 {
885 if ( v->arch.hvm_vmx.vmcs != this_cpu(current_vmcs) )
886 vmx_load_vmcs(v);
887 }
888 else
889 {
890 /*
891 * For pass-through domain, guest PCI-E device driver may leverage the
892 * "Non-Snoop" I/O, and explicitly WBINVD or CLFLUSH to a RAM space.
893 * Since migration may occur before WBINVD or CLFLUSH, we need to
894 * maintain data consistency either by:
895 * 1: flushing cache (wbinvd) when the guest is scheduled out if
896 * there is no wbinvd exit, or
897 * 2: execute wbinvd on all dirty pCPUs when guest wbinvd exits.
898 */
899 if ( has_arch_pdevs(v->domain) && !cpu_has_wbinvd_exiting )
900 {
901 int cpu = v->arch.hvm_vmx.active_cpu;
902 if ( cpu != -1 )
903 on_selected_cpus(cpumask_of_cpu(cpu), wbinvd_ipi, NULL, 1, 1);
904 }
906 vmx_clear_vmcs(v);
907 vmx_load_vmcs(v);
908 hvm_migrate_timers(v);
909 vmx_set_host_env(v);
910 vpid_sync_vcpu_all(v);
911 }
913 debug_state = v->domain->debugger_attached;
914 if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
915 {
916 v->arch.hvm_vcpu.debug_state_latch = debug_state;
917 vmx_update_debug_state(v);
918 }
920 hvm_do_resume(v);
921 reset_stack_and_jump(vmx_asm_do_vmentry);
922 }
924 static unsigned long vmr(unsigned long field)
925 {
926 int rc;
927 unsigned long val;
928 val = __vmread_safe(field, &rc);
929 return rc ? 0 : val;
930 }
932 static void vmx_dump_sel(char *name, uint32_t selector)
933 {
934 uint32_t sel, attr, limit;
935 uint64_t base;
936 sel = vmr(selector);
937 attr = vmr(selector + (GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR));
938 limit = vmr(selector + (GUEST_ES_LIMIT - GUEST_ES_SELECTOR));
939 base = vmr(selector + (GUEST_ES_BASE - GUEST_ES_SELECTOR));
940 printk("%s: sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016"PRIx64"\n",
941 name, sel, attr, limit, base);
942 }
944 static void vmx_dump_sel2(char *name, uint32_t lim)
945 {
946 uint32_t limit;
947 uint64_t base;
948 limit = vmr(lim);
949 base = vmr(lim + (GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
950 printk("%s: limit=0x%08x, base=0x%016"PRIx64"\n",
951 name, limit, base);
952 }
954 void vmcs_dump_vcpu(struct vcpu *v)
955 {
956 struct cpu_user_regs *regs = &v->arch.guest_context.user_regs;
957 unsigned long long x;
959 if ( v == current )
960 regs = guest_cpu_user_regs();
962 vmx_vmcs_enter(v);
964 printk("*** Guest State ***\n");
965 printk("CR0: actual=0x%016llx, shadow=0x%016llx, gh_mask=%016llx\n",
966 (unsigned long long)vmr(GUEST_CR0),
967 (unsigned long long)vmr(CR0_READ_SHADOW),
968 (unsigned long long)vmr(CR0_GUEST_HOST_MASK));
969 printk("CR4: actual=0x%016llx, shadow=0x%016llx, gh_mask=%016llx\n",
970 (unsigned long long)vmr(GUEST_CR4),
971 (unsigned long long)vmr(CR4_READ_SHADOW),
972 (unsigned long long)vmr(CR4_GUEST_HOST_MASK));
973 printk("CR3: actual=0x%016llx, target_count=%d\n",
974 (unsigned long long)vmr(GUEST_CR3),
975 (int)vmr(CR3_TARGET_COUNT));
976 printk(" target0=%016llx, target1=%016llx\n",
977 (unsigned long long)vmr(CR3_TARGET_VALUE0),
978 (unsigned long long)vmr(CR3_TARGET_VALUE1));
979 printk(" target2=%016llx, target3=%016llx\n",
980 (unsigned long long)vmr(CR3_TARGET_VALUE2),
981 (unsigned long long)vmr(CR3_TARGET_VALUE3));
982 printk("RSP = 0x%016llx (0x%016llx) RIP = 0x%016llx (0x%016llx)\n",
983 (unsigned long long)vmr(GUEST_RSP),
984 (unsigned long long)regs->esp,
985 (unsigned long long)vmr(GUEST_RIP),
986 (unsigned long long)regs->eip);
987 printk("RFLAGS=0x%016llx (0x%016llx) DR7 = 0x%016llx\n",
988 (unsigned long long)vmr(GUEST_RFLAGS),
989 (unsigned long long)regs->eflags,
990 (unsigned long long)vmr(GUEST_DR7));
991 printk("Sysenter RSP=%016llx CS:RIP=%04x:%016llx\n",
992 (unsigned long long)vmr(GUEST_SYSENTER_ESP),
993 (int)vmr(GUEST_SYSENTER_CS),
994 (unsigned long long)vmr(GUEST_SYSENTER_EIP));
995 vmx_dump_sel("CS", GUEST_CS_SELECTOR);
996 vmx_dump_sel("DS", GUEST_DS_SELECTOR);
997 vmx_dump_sel("SS", GUEST_SS_SELECTOR);
998 vmx_dump_sel("ES", GUEST_ES_SELECTOR);
999 vmx_dump_sel("FS", GUEST_FS_SELECTOR);
1000 vmx_dump_sel("GS", GUEST_GS_SELECTOR);
1001 vmx_dump_sel2("GDTR", GUEST_GDTR_LIMIT);
1002 vmx_dump_sel("LDTR", GUEST_LDTR_SELECTOR);
1003 vmx_dump_sel2("IDTR", GUEST_IDTR_LIMIT);
1004 vmx_dump_sel("TR", GUEST_TR_SELECTOR);
1005 printk("Guest PAT = 0x%08x%08x\n",
1006 (uint32_t)vmr(GUEST_PAT_HIGH), (uint32_t)vmr(GUEST_PAT));
1007 x = (unsigned long long)vmr(TSC_OFFSET_HIGH) << 32;
1008 x |= (uint32_t)vmr(TSC_OFFSET);
1009 printk("TSC Offset = %016llx\n", x);
1010 x = (unsigned long long)vmr(GUEST_IA32_DEBUGCTL_HIGH) << 32;
1011 x |= (uint32_t)vmr(GUEST_IA32_DEBUGCTL);
1012 printk("DebugCtl=%016llx DebugExceptions=%016llx\n", x,
1013 (unsigned long long)vmr(GUEST_PENDING_DBG_EXCEPTIONS));
1014 printk("Interruptibility=%04x ActivityState=%04x\n",
1015 (int)vmr(GUEST_INTERRUPTIBILITY_INFO),
1016 (int)vmr(GUEST_ACTIVITY_STATE));
1018 printk("*** Host State ***\n");
1019 printk("RSP = 0x%016llx RIP = 0x%016llx\n",
1020 (unsigned long long)vmr(HOST_RSP),
1021 (unsigned long long)vmr(HOST_RIP));
1022 printk("CS=%04x DS=%04x ES=%04x FS=%04x GS=%04x SS=%04x TR=%04x\n",
1023 (uint16_t)vmr(HOST_CS_SELECTOR),
1024 (uint16_t)vmr(HOST_DS_SELECTOR),
1025 (uint16_t)vmr(HOST_ES_SELECTOR),
1026 (uint16_t)vmr(HOST_FS_SELECTOR),
1027 (uint16_t)vmr(HOST_GS_SELECTOR),
1028 (uint16_t)vmr(HOST_SS_SELECTOR),
1029 (uint16_t)vmr(HOST_TR_SELECTOR));
1030 printk("FSBase=%016llx GSBase=%016llx TRBase=%016llx\n",
1031 (unsigned long long)vmr(HOST_FS_BASE),
1032 (unsigned long long)vmr(HOST_GS_BASE),
1033 (unsigned long long)vmr(HOST_TR_BASE));
1034 printk("GDTBase=%016llx IDTBase=%016llx\n",
1035 (unsigned long long)vmr(HOST_GDTR_BASE),
1036 (unsigned long long)vmr(HOST_IDTR_BASE));
1037 printk("CR0=%016llx CR3=%016llx CR4=%016llx\n",
1038 (unsigned long long)vmr(HOST_CR0),
1039 (unsigned long long)vmr(HOST_CR3),
1040 (unsigned long long)vmr(HOST_CR4));
1041 printk("Sysenter RSP=%016llx CS:RIP=%04x:%016llx\n",
1042 (unsigned long long)vmr(HOST_SYSENTER_ESP),
1043 (int)vmr(HOST_SYSENTER_CS),
1044 (unsigned long long)vmr(HOST_SYSENTER_EIP));
1045 printk("Host PAT = 0x%08x%08x\n",
1046 (uint32_t)vmr(HOST_PAT_HIGH), (uint32_t)vmr(HOST_PAT));
1048 printk("*** Control State ***\n");
1049 printk("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n",
1050 (uint32_t)vmr(PIN_BASED_VM_EXEC_CONTROL),
1051 (uint32_t)vmr(CPU_BASED_VM_EXEC_CONTROL),
1052 (uint32_t)vmr(SECONDARY_VM_EXEC_CONTROL));
1053 printk("EntryControls=%08x ExitControls=%08x\n",
1054 (uint32_t)vmr(VM_ENTRY_CONTROLS),
1055 (uint32_t)vmr(VM_EXIT_CONTROLS));
1056 printk("ExceptionBitmap=%08x\n",
1057 (uint32_t)vmr(EXCEPTION_BITMAP));
1058 printk("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
1059 (uint32_t)vmr(VM_ENTRY_INTR_INFO),
1060 (uint32_t)vmr(VM_ENTRY_EXCEPTION_ERROR_CODE),
1061 (uint32_t)vmr(VM_ENTRY_INSTRUCTION_LEN));
1062 printk("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
1063 (uint32_t)vmr(VM_EXIT_INTR_INFO),
1064 (uint32_t)vmr(VM_EXIT_INTR_ERROR_CODE),
1065 (uint32_t)vmr(VM_ENTRY_INSTRUCTION_LEN));
1066 printk(" reason=%08x qualification=%08x\n",
1067 (uint32_t)vmr(VM_EXIT_REASON),
1068 (uint32_t)vmr(EXIT_QUALIFICATION));
1069 printk("IDTVectoring: info=%08x errcode=%08x\n",
1070 (uint32_t)vmr(IDT_VECTORING_INFO),
1071 (uint32_t)vmr(IDT_VECTORING_ERROR_CODE));
1072 printk("TPR Threshold = 0x%02x\n",
1073 (uint32_t)vmr(TPR_THRESHOLD));
1074 printk("EPT pointer = 0x%08x%08x\n",
1075 (uint32_t)vmr(EPT_POINTER_HIGH), (uint32_t)vmr(EPT_POINTER));
1076 printk("Virtual processor ID = 0x%04x\n",
1077 (uint32_t)vmr(VIRTUAL_PROCESSOR_ID));
1079 vmx_vmcs_exit(v);
1082 static void vmcs_dump(unsigned char ch)
1084 struct domain *d;
1085 struct vcpu *v;
1087 printk("*********** VMCS Areas **************\n");
1089 rcu_read_lock(&domlist_read_lock);
1091 for_each_domain ( d )
1093 if ( !is_hvm_domain(d) )
1094 continue;
1095 printk("\n>>> Domain %d <<<\n", d->domain_id);
1096 for_each_vcpu ( d, v )
1098 printk("\tVCPU %d\n", v->vcpu_id);
1099 vmcs_dump_vcpu(v);
1103 rcu_read_unlock(&domlist_read_lock);
1105 printk("**************************************\n");
1108 void setup_vmcs_dump(void)
1110 register_keyhandler('v', vmcs_dump, "dump Intel's VMCS");
1114 /*
1115 * Local variables:
1116 * mode: C
1117 * c-set-style: "BSD"
1118 * c-basic-offset: 4
1119 * tab-width: 4
1120 * indent-tabs-mode: nil
1121 * End:
1122 */