ia64/xen-unstable

view xen/arch/x86/hvm/vmx/vmcs.c @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents bf37a89269bf
children
line source
1 /*
2 * vmcs.c: VMCS management
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
19 #include <xen/config.h>
20 #include <xen/init.h>
21 #include <xen/mm.h>
22 #include <xen/lib.h>
23 #include <xen/errno.h>
24 #include <xen/domain_page.h>
25 #include <asm/current.h>
26 #include <asm/cpufeature.h>
27 #include <asm/processor.h>
28 #include <asm/msr.h>
29 #include <asm/hvm/hvm.h>
30 #include <asm/hvm/io.h>
31 #include <asm/hvm/support.h>
32 #include <asm/hvm/vmx/vmx.h>
33 #include <asm/hvm/vmx/vmcs.h>
34 #include <asm/flushtlb.h>
35 #include <xen/event.h>
36 #include <xen/kernel.h>
37 #include <xen/keyhandler.h>
38 #include <asm/shadow.h>
39 #include <asm/tboot.h>
41 static int opt_vpid_enabled = 1;
42 boolean_param("vpid", opt_vpid_enabled);
44 static int opt_unrestricted_guest_enabled = 1;
45 boolean_param("unrestricted_guest", opt_unrestricted_guest_enabled);
47 /* Dynamic (run-time adjusted) execution control flags. */
48 u32 vmx_pin_based_exec_control __read_mostly;
49 u32 vmx_cpu_based_exec_control __read_mostly;
50 u32 vmx_secondary_exec_control __read_mostly;
51 u32 vmx_vmexit_control __read_mostly;
52 u32 vmx_vmentry_control __read_mostly;
53 bool_t cpu_has_vmx_ins_outs_instr_info __read_mostly;
55 static DEFINE_PER_CPU(struct vmcs_struct *, host_vmcs);
56 static DEFINE_PER_CPU(struct vmcs_struct *, current_vmcs);
57 static DEFINE_PER_CPU(struct list_head, active_vmcs_list);
59 static u32 vmcs_revision_id __read_mostly;
61 static void __init vmx_display_features(void)
62 {
63 int printed = 0;
65 printk("VMX: Supported advanced features:\n");
67 #define P(p,s) if ( p ) { printk(" - %s\n", s); printed = 1; }
68 P(cpu_has_vmx_virtualize_apic_accesses, "APIC MMIO access virtualisation");
69 P(cpu_has_vmx_tpr_shadow, "APIC TPR shadow");
70 P(cpu_has_vmx_ept, "Extended Page Tables (EPT)");
71 P(cpu_has_vmx_vpid, "Virtual-Processor Identifiers (VPID)");
72 P(cpu_has_vmx_vnmi, "Virtual NMI");
73 P(cpu_has_vmx_msr_bitmap, "MSR direct-access bitmap");
74 P(cpu_has_vmx_unrestricted_guest, "Unrestricted Guest");
75 #undef P
77 if ( !printed )
78 printk(" - none\n");
79 }
81 static u32 adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr)
82 {
83 u32 vmx_msr_low, vmx_msr_high, ctl = ctl_min | ctl_opt;
85 rdmsr(msr, vmx_msr_low, vmx_msr_high);
87 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
88 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
90 /* Ensure minimum (required) set of control bits are supported. */
91 BUG_ON(ctl_min & ~ctl);
93 return ctl;
94 }
96 static void vmx_init_vmcs_config(void)
97 {
98 u32 vmx_basic_msr_low, vmx_basic_msr_high, min, opt;
99 u32 _vmx_pin_based_exec_control;
100 u32 _vmx_cpu_based_exec_control;
101 u32 _vmx_secondary_exec_control = 0;
102 u32 _vmx_vmexit_control;
103 u32 _vmx_vmentry_control;
105 rdmsr(MSR_IA32_VMX_BASIC, vmx_basic_msr_low, vmx_basic_msr_high);
107 min = (PIN_BASED_EXT_INTR_MASK |
108 PIN_BASED_NMI_EXITING);
109 opt = PIN_BASED_VIRTUAL_NMIS;
110 _vmx_pin_based_exec_control = adjust_vmx_controls(
111 min, opt, MSR_IA32_VMX_PINBASED_CTLS);
113 min = (CPU_BASED_HLT_EXITING |
114 CPU_BASED_INVLPG_EXITING |
115 CPU_BASED_CR3_LOAD_EXITING |
116 CPU_BASED_CR3_STORE_EXITING |
117 CPU_BASED_MONITOR_EXITING |
118 CPU_BASED_MWAIT_EXITING |
119 CPU_BASED_MOV_DR_EXITING |
120 CPU_BASED_ACTIVATE_IO_BITMAP |
121 CPU_BASED_USE_TSC_OFFSETING |
122 (opt_softtsc ? CPU_BASED_RDTSC_EXITING : 0));
123 opt = (CPU_BASED_ACTIVATE_MSR_BITMAP |
124 CPU_BASED_TPR_SHADOW |
125 CPU_BASED_MONITOR_TRAP_FLAG |
126 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
127 _vmx_cpu_based_exec_control = adjust_vmx_controls(
128 min, opt, MSR_IA32_VMX_PROCBASED_CTLS);
129 #ifdef __x86_64__
130 if ( !(_vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW) )
131 {
132 min |= CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING;
133 _vmx_cpu_based_exec_control = adjust_vmx_controls(
134 min, opt, MSR_IA32_VMX_PROCBASED_CTLS);
135 }
136 #endif
138 if ( _vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS )
139 {
140 min = 0;
141 opt = (SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
142 SECONDARY_EXEC_WBINVD_EXITING |
143 SECONDARY_EXEC_ENABLE_EPT);
144 if ( opt_vpid_enabled )
145 opt |= SECONDARY_EXEC_ENABLE_VPID;
146 if ( opt_unrestricted_guest_enabled )
147 opt |= SECONDARY_EXEC_UNRESTRICTED_GUEST;
149 _vmx_secondary_exec_control = adjust_vmx_controls(
150 min, opt, MSR_IA32_VMX_PROCBASED_CTLS2);
151 }
153 if ( _vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT )
154 {
155 /*
156 * To use EPT we expect to be able to clear certain intercepts.
157 * We check VMX_BASIC_MSR[55] to correctly handle default1 controls.
158 */
159 uint32_t must_be_one, must_be_zero, msr = MSR_IA32_VMX_PROCBASED_CTLS;
160 if ( vmx_basic_msr_high & (1u << 23) )
161 msr = MSR_IA32_VMX_TRUE_PROCBASED_CTLS;
162 rdmsr(msr, must_be_one, must_be_zero);
163 if ( must_be_one & (CPU_BASED_INVLPG_EXITING |
164 CPU_BASED_CR3_LOAD_EXITING |
165 CPU_BASED_CR3_STORE_EXITING) )
166 _vmx_secondary_exec_control &=
167 ~(SECONDARY_EXEC_ENABLE_EPT |
168 SECONDARY_EXEC_UNRESTRICTED_GUEST);
169 }
171 #if defined(__i386__)
172 /* If we can't virtualise APIC accesses, the TPR shadow is pointless. */
173 if ( !(_vmx_secondary_exec_control &
174 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) )
175 _vmx_cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
176 #endif
178 min = VM_EXIT_ACK_INTR_ON_EXIT;
179 opt = VM_EXIT_SAVE_GUEST_PAT | VM_EXIT_LOAD_HOST_PAT;
180 #ifdef __x86_64__
181 min |= VM_EXIT_IA32E_MODE;
182 #endif
183 _vmx_vmexit_control = adjust_vmx_controls(
184 min, opt, MSR_IA32_VMX_EXIT_CTLS);
186 min = 0;
187 opt = VM_ENTRY_LOAD_GUEST_PAT;
188 _vmx_vmentry_control = adjust_vmx_controls(
189 min, opt, MSR_IA32_VMX_ENTRY_CTLS);
191 if ( !vmx_pin_based_exec_control )
192 {
193 /* First time through. */
194 vmcs_revision_id = vmx_basic_msr_low;
195 vmx_pin_based_exec_control = _vmx_pin_based_exec_control;
196 vmx_cpu_based_exec_control = _vmx_cpu_based_exec_control;
197 vmx_secondary_exec_control = _vmx_secondary_exec_control;
198 vmx_vmexit_control = _vmx_vmexit_control;
199 vmx_vmentry_control = _vmx_vmentry_control;
200 cpu_has_vmx_ins_outs_instr_info = !!(vmx_basic_msr_high & (1U<<22));
201 vmx_display_features();
202 }
203 else
204 {
205 /* Globals are already initialised: re-check them. */
206 BUG_ON(vmcs_revision_id != vmx_basic_msr_low);
207 BUG_ON(vmx_pin_based_exec_control != _vmx_pin_based_exec_control);
208 BUG_ON(vmx_cpu_based_exec_control != _vmx_cpu_based_exec_control);
209 BUG_ON(vmx_secondary_exec_control != _vmx_secondary_exec_control);
210 BUG_ON(vmx_vmexit_control != _vmx_vmexit_control);
211 BUG_ON(vmx_vmentry_control != _vmx_vmentry_control);
212 BUG_ON(cpu_has_vmx_ins_outs_instr_info !=
213 !!(vmx_basic_msr_high & (1U<<22)));
214 }
216 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
217 BUG_ON((vmx_basic_msr_high & 0x1fff) > PAGE_SIZE);
219 #ifdef __x86_64__
220 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
221 BUG_ON(vmx_basic_msr_high & (1u<<16));
222 #endif
224 /* Require Write-Back (WB) memory type for VMCS accesses. */
225 BUG_ON(((vmx_basic_msr_high >> 18) & 15) != 6);
226 }
228 static struct vmcs_struct *vmx_alloc_vmcs(void)
229 {
230 struct vmcs_struct *vmcs;
232 if ( (vmcs = alloc_xenheap_page()) == NULL )
233 {
234 gdprintk(XENLOG_WARNING, "Failed to allocate VMCS.\n");
235 return NULL;
236 }
238 clear_page(vmcs);
239 vmcs->vmcs_revision_id = vmcs_revision_id;
241 return vmcs;
242 }
244 static void vmx_free_vmcs(struct vmcs_struct *vmcs)
245 {
246 free_xenheap_page(vmcs);
247 }
249 static void __vmx_clear_vmcs(void *info)
250 {
251 struct vcpu *v = info;
252 struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
254 /* Otherwise we can nest (vmx_cpu_down() vs. vmx_clear_vmcs()). */
255 ASSERT(!local_irq_is_enabled());
257 if ( arch_vmx->active_cpu == smp_processor_id() )
258 {
259 __vmpclear(virt_to_maddr(arch_vmx->vmcs));
261 arch_vmx->active_cpu = -1;
262 arch_vmx->launched = 0;
264 list_del(&arch_vmx->active_list);
266 if ( arch_vmx->vmcs == this_cpu(current_vmcs) )
267 this_cpu(current_vmcs) = NULL;
268 }
269 }
271 static void vmx_clear_vmcs(struct vcpu *v)
272 {
273 int cpu = v->arch.hvm_vmx.active_cpu;
275 if ( cpu != -1 )
276 on_selected_cpus(cpumask_of(cpu), __vmx_clear_vmcs, v, 1);
277 }
279 static void vmx_load_vmcs(struct vcpu *v)
280 {
281 unsigned long flags;
283 local_irq_save(flags);
285 if ( v->arch.hvm_vmx.active_cpu == -1 )
286 {
287 list_add(&v->arch.hvm_vmx.active_list, &this_cpu(active_vmcs_list));
288 v->arch.hvm_vmx.active_cpu = smp_processor_id();
289 }
291 ASSERT(v->arch.hvm_vmx.active_cpu == smp_processor_id());
293 __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
294 this_cpu(current_vmcs) = v->arch.hvm_vmx.vmcs;
296 local_irq_restore(flags);
297 }
299 int vmx_cpu_up(void)
300 {
301 u32 eax, edx;
302 int bios_locked, cpu = smp_processor_id();
303 u64 cr0, vmx_cr0_fixed0, vmx_cr0_fixed1;
305 BUG_ON(!(read_cr4() & X86_CR4_VMXE));
307 /*
308 * Ensure the current processor operating mode meets
309 * the requred CRO fixed bits in VMX operation.
310 */
311 cr0 = read_cr0();
312 rdmsrl(MSR_IA32_VMX_CR0_FIXED0, vmx_cr0_fixed0);
313 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx_cr0_fixed1);
314 if ( (~cr0 & vmx_cr0_fixed0) || (cr0 & ~vmx_cr0_fixed1) )
315 {
316 printk("CPU%d: some settings of host CR0 are "
317 "not allowed in VMX operation.\n", cpu);
318 return 0;
319 }
321 rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx);
323 bios_locked = !!(eax & IA32_FEATURE_CONTROL_MSR_LOCK);
324 if ( bios_locked )
325 {
326 if ( !(eax & (IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX |
327 IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX)) )
328 {
329 printk("CPU%d: VMX disabled by BIOS.\n", cpu);
330 return 0;
331 }
332 }
333 else
334 {
335 eax = IA32_FEATURE_CONTROL_MSR_LOCK;
336 eax |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX;
337 if ( test_bit(X86_FEATURE_SMXE, &boot_cpu_data.x86_capability) )
338 eax |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX;
339 wrmsr(IA32_FEATURE_CONTROL_MSR, eax, 0);
340 }
342 vmx_init_vmcs_config();
344 INIT_LIST_HEAD(&this_cpu(active_vmcs_list));
346 if ( this_cpu(host_vmcs) == NULL )
347 {
348 this_cpu(host_vmcs) = vmx_alloc_vmcs();
349 if ( this_cpu(host_vmcs) == NULL )
350 {
351 printk("CPU%d: Could not allocate host VMCS\n", cpu);
352 return 0;
353 }
354 }
356 switch ( __vmxon(virt_to_maddr(this_cpu(host_vmcs))) )
357 {
358 case -2: /* #UD or #GP */
359 if ( bios_locked &&
360 test_bit(X86_FEATURE_SMXE, &boot_cpu_data.x86_capability) &&
361 (!(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX) ||
362 !(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX)) )
363 {
364 printk("CPU%d: VMXON failed: perhaps because of TXT settings "
365 "in your BIOS configuration?\n", cpu);
366 printk(" --> Disable TXT in your BIOS unless using a secure "
367 "bootloader.\n");
368 return 0;
369 }
370 /* fall through */
371 case -1: /* CF==1 or ZF==1 */
372 printk("CPU%d: unexpected VMXON failure\n", cpu);
373 return 0;
374 case 0: /* success */
375 break;
376 default:
377 BUG();
378 }
380 ept_sync_all();
382 vpid_sync_all();
384 return 1;
385 }
387 void vmx_cpu_down(void)
388 {
389 struct list_head *active_vmcs_list = &this_cpu(active_vmcs_list);
390 unsigned long flags;
392 local_irq_save(flags);
394 while ( !list_empty(active_vmcs_list) )
395 __vmx_clear_vmcs(list_entry(active_vmcs_list->next,
396 struct vcpu, arch.hvm_vmx.active_list));
398 BUG_ON(!(read_cr4() & X86_CR4_VMXE));
399 __vmxoff();
401 local_irq_restore(flags);
402 }
404 struct foreign_vmcs {
405 struct vcpu *v;
406 unsigned int count;
407 };
408 static DEFINE_PER_CPU(struct foreign_vmcs, foreign_vmcs);
410 void vmx_vmcs_enter(struct vcpu *v)
411 {
412 struct foreign_vmcs *fv;
414 /*
415 * NB. We must *always* run an HVM VCPU on its own VMCS, except for
416 * vmx_vmcs_enter/exit critical regions.
417 */
418 if ( likely(v == current) )
419 return;
421 fv = &this_cpu(foreign_vmcs);
423 if ( fv->v == v )
424 {
425 BUG_ON(fv->count == 0);
426 }
427 else
428 {
429 BUG_ON(fv->v != NULL);
430 BUG_ON(fv->count != 0);
432 vcpu_pause(v);
433 spin_lock(&v->arch.hvm_vmx.vmcs_lock);
435 vmx_clear_vmcs(v);
436 vmx_load_vmcs(v);
438 fv->v = v;
439 }
441 fv->count++;
442 }
444 void vmx_vmcs_exit(struct vcpu *v)
445 {
446 struct foreign_vmcs *fv;
448 if ( likely(v == current) )
449 return;
451 fv = &this_cpu(foreign_vmcs);
452 BUG_ON(fv->v != v);
453 BUG_ON(fv->count == 0);
455 if ( --fv->count == 0 )
456 {
457 /* Don't confuse vmx_do_resume (for @v or @current!) */
458 vmx_clear_vmcs(v);
459 if ( is_hvm_vcpu(current) )
460 vmx_load_vmcs(current);
462 spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
463 vcpu_unpause(v);
465 fv->v = NULL;
466 }
467 }
469 struct xgt_desc {
470 unsigned short size;
471 unsigned long address __attribute__((packed));
472 };
474 static void vmx_set_host_env(struct vcpu *v)
475 {
476 unsigned int cpu = smp_processor_id();
478 __vmwrite(HOST_GDTR_BASE,
479 (unsigned long)(this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY));
480 __vmwrite(HOST_IDTR_BASE, (unsigned long)idt_tables[cpu]);
482 __vmwrite(HOST_TR_SELECTOR, TSS_ENTRY << 3);
483 __vmwrite(HOST_TR_BASE, (unsigned long)&init_tss[cpu]);
485 __vmwrite(HOST_SYSENTER_ESP, get_stack_bottom());
487 /*
488 * Skip end of cpu_user_regs when entering the hypervisor because the
489 * CPU does not save context onto the stack. SS,RSP,CS,RIP,RFLAGS,etc
490 * all get saved into the VMCS instead.
491 */
492 __vmwrite(HOST_RSP,
493 (unsigned long)&get_cpu_info()->guest_cpu_user_regs.error_code);
494 }
496 void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr)
497 {
498 unsigned long *msr_bitmap = v->arch.hvm_vmx.msr_bitmap;
500 /* VMX MSR bitmap supported? */
501 if ( msr_bitmap == NULL )
502 return;
504 /*
505 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
506 * have the write-low and read-high bitmap offsets the wrong way round.
507 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
508 */
509 if ( msr <= 0x1fff )
510 {
511 __clear_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* read-low */
512 __clear_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* write-low */
513 }
514 else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
515 {
516 msr &= 0x1fff;
517 __clear_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* read-high */
518 __clear_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* write-high */
519 }
520 }
522 static int construct_vmcs(struct vcpu *v)
523 {
524 struct domain *d = v->domain;
525 uint16_t sysenter_cs;
526 unsigned long sysenter_eip;
528 vmx_vmcs_enter(v);
530 /* VMCS controls. */
531 __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
533 v->arch.hvm_vmx.exec_control = vmx_cpu_based_exec_control;
534 v->arch.hvm_vmx.secondary_exec_control = vmx_secondary_exec_control;
536 if ( paging_mode_hap(d) )
537 {
538 v->arch.hvm_vmx.exec_control &= ~(CPU_BASED_INVLPG_EXITING |
539 CPU_BASED_CR3_LOAD_EXITING |
540 CPU_BASED_CR3_STORE_EXITING);
541 }
542 else
543 {
544 v->arch.hvm_vmx.secondary_exec_control &=
545 ~(SECONDARY_EXEC_ENABLE_EPT |
546 SECONDARY_EXEC_UNRESTRICTED_GUEST);
547 vmx_vmexit_control &= ~(VM_EXIT_SAVE_GUEST_PAT |
548 VM_EXIT_LOAD_HOST_PAT);
549 vmx_vmentry_control &= ~VM_ENTRY_LOAD_GUEST_PAT;
550 }
552 /* Do not enable Monitor Trap Flag unless start single step debug */
553 v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
555 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
556 __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
557 __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control);
559 if ( cpu_has_vmx_secondary_exec_control )
560 __vmwrite(SECONDARY_VM_EXEC_CONTROL,
561 v->arch.hvm_vmx.secondary_exec_control);
563 /* MSR access bitmap. */
564 if ( cpu_has_vmx_msr_bitmap )
565 {
566 unsigned long *msr_bitmap = alloc_xenheap_page();
568 if ( msr_bitmap == NULL )
569 return -ENOMEM;
571 memset(msr_bitmap, ~0, PAGE_SIZE);
572 v->arch.hvm_vmx.msr_bitmap = msr_bitmap;
573 __vmwrite(MSR_BITMAP, virt_to_maddr(msr_bitmap));
575 vmx_disable_intercept_for_msr(v, MSR_FS_BASE);
576 vmx_disable_intercept_for_msr(v, MSR_GS_BASE);
577 vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_CS);
578 vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_ESP);
579 vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP);
580 if ( cpu_has_vmx_pat && paging_mode_hap(d) )
581 vmx_disable_intercept_for_msr(v, MSR_IA32_CR_PAT);
582 }
584 /* I/O access bitmap. */
585 __vmwrite(IO_BITMAP_A, virt_to_maddr((char *)hvm_io_bitmap + 0));
586 __vmwrite(IO_BITMAP_B, virt_to_maddr((char *)hvm_io_bitmap + PAGE_SIZE));
588 /* Host data selectors. */
589 __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
590 __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS);
591 __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS);
592 __vmwrite(HOST_FS_SELECTOR, 0);
593 __vmwrite(HOST_GS_SELECTOR, 0);
594 __vmwrite(HOST_FS_BASE, 0);
595 __vmwrite(HOST_GS_BASE, 0);
597 /* Host control registers. */
598 v->arch.hvm_vmx.host_cr0 = read_cr0() | X86_CR0_TS;
599 __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
600 __vmwrite(HOST_CR4, mmu_cr4_features);
602 /* Host CS:RIP. */
603 __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
604 __vmwrite(HOST_RIP, (unsigned long)vmx_asm_vmexit_handler);
606 /* Host SYSENTER CS:RIP. */
607 rdmsrl(MSR_IA32_SYSENTER_CS, sysenter_cs);
608 __vmwrite(HOST_SYSENTER_CS, sysenter_cs);
609 rdmsrl(MSR_IA32_SYSENTER_EIP, sysenter_eip);
610 __vmwrite(HOST_SYSENTER_EIP, sysenter_eip);
612 /* MSR intercepts. */
613 __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
614 __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
615 __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
617 __vmwrite(VM_ENTRY_INTR_INFO, 0);
619 __vmwrite(CR0_GUEST_HOST_MASK, ~0UL);
620 __vmwrite(CR4_GUEST_HOST_MASK, ~0UL);
622 __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
623 __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
625 __vmwrite(CR3_TARGET_COUNT, 0);
627 __vmwrite(GUEST_ACTIVITY_STATE, 0);
629 /* Guest segment bases. */
630 __vmwrite(GUEST_ES_BASE, 0);
631 __vmwrite(GUEST_SS_BASE, 0);
632 __vmwrite(GUEST_DS_BASE, 0);
633 __vmwrite(GUEST_FS_BASE, 0);
634 __vmwrite(GUEST_GS_BASE, 0);
635 __vmwrite(GUEST_CS_BASE, 0);
637 /* Guest segment limits. */
638 __vmwrite(GUEST_ES_LIMIT, ~0u);
639 __vmwrite(GUEST_SS_LIMIT, ~0u);
640 __vmwrite(GUEST_DS_LIMIT, ~0u);
641 __vmwrite(GUEST_FS_LIMIT, ~0u);
642 __vmwrite(GUEST_GS_LIMIT, ~0u);
643 __vmwrite(GUEST_CS_LIMIT, ~0u);
645 /* Guest segment AR bytes. */
646 __vmwrite(GUEST_ES_AR_BYTES, 0xc093); /* read/write, accessed */
647 __vmwrite(GUEST_SS_AR_BYTES, 0xc093);
648 __vmwrite(GUEST_DS_AR_BYTES, 0xc093);
649 __vmwrite(GUEST_FS_AR_BYTES, 0xc093);
650 __vmwrite(GUEST_GS_AR_BYTES, 0xc093);
651 __vmwrite(GUEST_CS_AR_BYTES, 0xc09b); /* exec/read, accessed */
653 /* Guest IDT. */
654 __vmwrite(GUEST_IDTR_BASE, 0);
655 __vmwrite(GUEST_IDTR_LIMIT, 0);
657 /* Guest GDT. */
658 __vmwrite(GUEST_GDTR_BASE, 0);
659 __vmwrite(GUEST_GDTR_LIMIT, 0);
661 /* Guest LDT. */
662 __vmwrite(GUEST_LDTR_AR_BYTES, 0x0082); /* LDT */
663 __vmwrite(GUEST_LDTR_SELECTOR, 0);
664 __vmwrite(GUEST_LDTR_BASE, 0);
665 __vmwrite(GUEST_LDTR_LIMIT, 0);
667 /* Guest TSS. */
668 __vmwrite(GUEST_TR_AR_BYTES, 0x008b); /* 32-bit TSS (busy) */
669 __vmwrite(GUEST_TR_BASE, 0);
670 __vmwrite(GUEST_TR_LIMIT, 0xff);
672 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
673 __vmwrite(GUEST_DR7, 0);
674 __vmwrite(VMCS_LINK_POINTER, ~0UL);
675 #if defined(__i386__)
676 __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
677 #endif
679 __vmwrite(EXCEPTION_BITMAP,
680 HVM_TRAP_MASK
681 | (paging_mode_hap(d) ? 0 : (1U << TRAP_page_fault))
682 | (1U << TRAP_no_device)
683 | (1U << TRAP_invalid_op));
685 v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
686 hvm_update_guest_cr(v, 0);
688 v->arch.hvm_vcpu.guest_cr[4] = 0;
689 hvm_update_guest_cr(v, 4);
691 if ( cpu_has_vmx_tpr_shadow )
692 {
693 __vmwrite(VIRTUAL_APIC_PAGE_ADDR,
694 page_to_maddr(vcpu_vlapic(v)->regs_page));
695 __vmwrite(TPR_THRESHOLD, 0);
696 }
698 if ( paging_mode_hap(d) )
699 {
700 __vmwrite(EPT_POINTER, d->arch.hvm_domain.vmx.ept_control.eptp);
701 #ifdef __i386__
702 __vmwrite(EPT_POINTER_HIGH,
703 d->arch.hvm_domain.vmx.ept_control.eptp >> 32);
704 #endif
705 }
707 if ( cpu_has_vmx_vpid )
708 {
709 v->arch.hvm_vmx.vpid =
710 v->domain->arch.hvm_domain.vmx.vpid_base + v->vcpu_id;
711 __vmwrite(VIRTUAL_PROCESSOR_ID, v->arch.hvm_vmx.vpid);
712 }
714 if ( cpu_has_vmx_pat && paging_mode_hap(d) )
715 {
716 u64 host_pat, guest_pat;
718 rdmsrl(MSR_IA32_CR_PAT, host_pat);
719 guest_pat = 0x7040600070406ULL;
721 __vmwrite(HOST_PAT, host_pat);
722 __vmwrite(GUEST_PAT, guest_pat);
723 #ifdef __i386__
724 __vmwrite(HOST_PAT_HIGH, host_pat >> 32);
725 __vmwrite(GUEST_PAT_HIGH, guest_pat >> 32);
726 #endif
727 }
729 vmx_vmcs_exit(v);
731 paging_update_paging_modes(v); /* will update HOST & GUEST_CR3 as reqd */
733 vmx_vlapic_msr_changed(v);
735 return 0;
736 }
738 int vmx_read_guest_msr(u32 msr, u64 *val)
739 {
740 struct vcpu *curr = current;
741 unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
742 const struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
744 for ( i = 0; i < msr_count; i++ )
745 {
746 if ( msr_area[i].index == msr )
747 {
748 *val = msr_area[i].data;
749 return 0;
750 }
751 }
753 return -ESRCH;
754 }
756 int vmx_write_guest_msr(u32 msr, u64 val)
757 {
758 struct vcpu *curr = current;
759 unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
760 struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
762 for ( i = 0; i < msr_count; i++ )
763 {
764 if ( msr_area[i].index == msr )
765 {
766 msr_area[i].data = val;
767 return 0;
768 }
769 }
771 return -ESRCH;
772 }
774 int vmx_add_guest_msr(u32 msr)
775 {
776 struct vcpu *curr = current;
777 unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
778 struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
780 if ( msr_area == NULL )
781 {
782 if ( (msr_area = alloc_xenheap_page()) == NULL )
783 return -ENOMEM;
784 curr->arch.hvm_vmx.msr_area = msr_area;
785 __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(msr_area));
786 __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
787 }
789 for ( i = 0; i < msr_count; i++ )
790 if ( msr_area[i].index == msr )
791 return 0;
793 if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
794 return -ENOSPC;
796 msr_area[msr_count].index = msr;
797 msr_area[msr_count].mbz = 0;
798 msr_area[msr_count].data = 0;
799 curr->arch.hvm_vmx.msr_count = ++msr_count;
800 __vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count);
801 __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count);
803 return 0;
804 }
806 int vmx_add_host_load_msr(u32 msr)
807 {
808 struct vcpu *curr = current;
809 unsigned int i, msr_count = curr->arch.hvm_vmx.host_msr_count;
810 struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.host_msr_area;
812 if ( msr_area == NULL )
813 {
814 if ( (msr_area = alloc_xenheap_page()) == NULL )
815 return -ENOMEM;
816 curr->arch.hvm_vmx.host_msr_area = msr_area;
817 __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
818 }
820 for ( i = 0; i < msr_count; i++ )
821 if ( msr_area[i].index == msr )
822 return 0;
824 if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
825 return -ENOSPC;
827 msr_area[msr_count].index = msr;
828 msr_area[msr_count].mbz = 0;
829 rdmsrl(msr, msr_area[msr_count].data);
830 curr->arch.hvm_vmx.host_msr_count = ++msr_count;
831 __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count);
833 return 0;
834 }
836 int vmx_create_vmcs(struct vcpu *v)
837 {
838 struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
839 int rc;
841 if ( (arch_vmx->vmcs = vmx_alloc_vmcs()) == NULL )
842 return -ENOMEM;
844 INIT_LIST_HEAD(&arch_vmx->active_list);
845 __vmpclear(virt_to_maddr(arch_vmx->vmcs));
846 arch_vmx->active_cpu = -1;
847 arch_vmx->launched = 0;
849 if ( (rc = construct_vmcs(v)) != 0 )
850 {
851 vmx_free_vmcs(arch_vmx->vmcs);
852 return rc;
853 }
855 return 0;
856 }
858 void vmx_destroy_vmcs(struct vcpu *v)
859 {
860 struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
862 vmx_clear_vmcs(v);
864 vmx_free_vmcs(arch_vmx->vmcs);
866 free_xenheap_page(v->arch.hvm_vmx.host_msr_area);
867 free_xenheap_page(v->arch.hvm_vmx.msr_area);
868 free_xenheap_page(v->arch.hvm_vmx.msr_bitmap);
869 }
871 void vm_launch_fail(void)
872 {
873 unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
874 printk("<vm_launch_fail> error code %lx\n", error);
875 domain_crash_synchronous();
876 }
878 void vm_resume_fail(void)
879 {
880 unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
881 printk("<vm_resume_fail> error code %lx\n", error);
882 domain_crash_synchronous();
883 }
885 static void wbinvd_ipi(void *info)
886 {
887 wbinvd();
888 }
890 void vmx_do_resume(struct vcpu *v)
891 {
892 bool_t debug_state;
894 if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() )
895 {
896 if ( v->arch.hvm_vmx.vmcs != this_cpu(current_vmcs) )
897 vmx_load_vmcs(v);
898 }
899 else
900 {
901 /*
902 * For pass-through domain, guest PCI-E device driver may leverage the
903 * "Non-Snoop" I/O, and explicitly WBINVD or CLFLUSH to a RAM space.
904 * Since migration may occur before WBINVD or CLFLUSH, we need to
905 * maintain data consistency either by:
906 * 1: flushing cache (wbinvd) when the guest is scheduled out if
907 * there is no wbinvd exit, or
908 * 2: execute wbinvd on all dirty pCPUs when guest wbinvd exits.
909 */
910 if ( has_arch_pdevs(v->domain) && !cpu_has_wbinvd_exiting )
911 {
912 int cpu = v->arch.hvm_vmx.active_cpu;
913 if ( cpu != -1 )
914 on_selected_cpus(cpumask_of(cpu), wbinvd_ipi, NULL, 1);
915 }
917 vmx_clear_vmcs(v);
918 vmx_load_vmcs(v);
919 hvm_migrate_timers(v);
920 vmx_set_host_env(v);
921 vpid_sync_vcpu_all(v);
922 }
924 debug_state = v->domain->debugger_attached;
925 if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
926 {
927 v->arch.hvm_vcpu.debug_state_latch = debug_state;
928 vmx_update_debug_state(v);
929 }
931 hvm_do_resume(v);
932 reset_stack_and_jump(vmx_asm_do_vmentry);
933 }
935 static unsigned long vmr(unsigned long field)
936 {
937 int rc;
938 unsigned long val;
939 val = __vmread_safe(field, &rc);
940 return rc ? 0 : val;
941 }
943 static void vmx_dump_sel(char *name, uint32_t selector)
944 {
945 uint32_t sel, attr, limit;
946 uint64_t base;
947 sel = vmr(selector);
948 attr = vmr(selector + (GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR));
949 limit = vmr(selector + (GUEST_ES_LIMIT - GUEST_ES_SELECTOR));
950 base = vmr(selector + (GUEST_ES_BASE - GUEST_ES_SELECTOR));
951 printk("%s: sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016"PRIx64"\n",
952 name, sel, attr, limit, base);
953 }
955 static void vmx_dump_sel2(char *name, uint32_t lim)
956 {
957 uint32_t limit;
958 uint64_t base;
959 limit = vmr(lim);
960 base = vmr(lim + (GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
961 printk("%s: limit=0x%08x, base=0x%016"PRIx64"\n",
962 name, limit, base);
963 }
965 void vmcs_dump_vcpu(struct vcpu *v)
966 {
967 struct cpu_user_regs *regs = &v->arch.guest_context.user_regs;
968 unsigned long long x;
970 if ( v == current )
971 regs = guest_cpu_user_regs();
973 vmx_vmcs_enter(v);
975 printk("*** Guest State ***\n");
976 printk("CR0: actual=0x%016llx, shadow=0x%016llx, gh_mask=%016llx\n",
977 (unsigned long long)vmr(GUEST_CR0),
978 (unsigned long long)vmr(CR0_READ_SHADOW),
979 (unsigned long long)vmr(CR0_GUEST_HOST_MASK));
980 printk("CR4: actual=0x%016llx, shadow=0x%016llx, gh_mask=%016llx\n",
981 (unsigned long long)vmr(GUEST_CR4),
982 (unsigned long long)vmr(CR4_READ_SHADOW),
983 (unsigned long long)vmr(CR4_GUEST_HOST_MASK));
984 printk("CR3: actual=0x%016llx, target_count=%d\n",
985 (unsigned long long)vmr(GUEST_CR3),
986 (int)vmr(CR3_TARGET_COUNT));
987 printk(" target0=%016llx, target1=%016llx\n",
988 (unsigned long long)vmr(CR3_TARGET_VALUE0),
989 (unsigned long long)vmr(CR3_TARGET_VALUE1));
990 printk(" target2=%016llx, target3=%016llx\n",
991 (unsigned long long)vmr(CR3_TARGET_VALUE2),
992 (unsigned long long)vmr(CR3_TARGET_VALUE3));
993 printk("RSP = 0x%016llx (0x%016llx) RIP = 0x%016llx (0x%016llx)\n",
994 (unsigned long long)vmr(GUEST_RSP),
995 (unsigned long long)regs->esp,
996 (unsigned long long)vmr(GUEST_RIP),
997 (unsigned long long)regs->eip);
998 printk("RFLAGS=0x%016llx (0x%016llx) DR7 = 0x%016llx\n",
999 (unsigned long long)vmr(GUEST_RFLAGS),
1000 (unsigned long long)regs->eflags,
1001 (unsigned long long)vmr(GUEST_DR7));
1002 printk("Sysenter RSP=%016llx CS:RIP=%04x:%016llx\n",
1003 (unsigned long long)vmr(GUEST_SYSENTER_ESP),
1004 (int)vmr(GUEST_SYSENTER_CS),
1005 (unsigned long long)vmr(GUEST_SYSENTER_EIP));
1006 vmx_dump_sel("CS", GUEST_CS_SELECTOR);
1007 vmx_dump_sel("DS", GUEST_DS_SELECTOR);
1008 vmx_dump_sel("SS", GUEST_SS_SELECTOR);
1009 vmx_dump_sel("ES", GUEST_ES_SELECTOR);
1010 vmx_dump_sel("FS", GUEST_FS_SELECTOR);
1011 vmx_dump_sel("GS", GUEST_GS_SELECTOR);
1012 vmx_dump_sel2("GDTR", GUEST_GDTR_LIMIT);
1013 vmx_dump_sel("LDTR", GUEST_LDTR_SELECTOR);
1014 vmx_dump_sel2("IDTR", GUEST_IDTR_LIMIT);
1015 vmx_dump_sel("TR", GUEST_TR_SELECTOR);
1016 printk("Guest PAT = 0x%08x%08x\n",
1017 (uint32_t)vmr(GUEST_PAT_HIGH), (uint32_t)vmr(GUEST_PAT));
1018 x = (unsigned long long)vmr(TSC_OFFSET_HIGH) << 32;
1019 x |= (uint32_t)vmr(TSC_OFFSET);
1020 printk("TSC Offset = %016llx\n", x);
1021 x = (unsigned long long)vmr(GUEST_IA32_DEBUGCTL_HIGH) << 32;
1022 x |= (uint32_t)vmr(GUEST_IA32_DEBUGCTL);
1023 printk("DebugCtl=%016llx DebugExceptions=%016llx\n", x,
1024 (unsigned long long)vmr(GUEST_PENDING_DBG_EXCEPTIONS));
1025 printk("Interruptibility=%04x ActivityState=%04x\n",
1026 (int)vmr(GUEST_INTERRUPTIBILITY_INFO),
1027 (int)vmr(GUEST_ACTIVITY_STATE));
1029 printk("*** Host State ***\n");
1030 printk("RSP = 0x%016llx RIP = 0x%016llx\n",
1031 (unsigned long long)vmr(HOST_RSP),
1032 (unsigned long long)vmr(HOST_RIP));
1033 printk("CS=%04x DS=%04x ES=%04x FS=%04x GS=%04x SS=%04x TR=%04x\n",
1034 (uint16_t)vmr(HOST_CS_SELECTOR),
1035 (uint16_t)vmr(HOST_DS_SELECTOR),
1036 (uint16_t)vmr(HOST_ES_SELECTOR),
1037 (uint16_t)vmr(HOST_FS_SELECTOR),
1038 (uint16_t)vmr(HOST_GS_SELECTOR),
1039 (uint16_t)vmr(HOST_SS_SELECTOR),
1040 (uint16_t)vmr(HOST_TR_SELECTOR));
1041 printk("FSBase=%016llx GSBase=%016llx TRBase=%016llx\n",
1042 (unsigned long long)vmr(HOST_FS_BASE),
1043 (unsigned long long)vmr(HOST_GS_BASE),
1044 (unsigned long long)vmr(HOST_TR_BASE));
1045 printk("GDTBase=%016llx IDTBase=%016llx\n",
1046 (unsigned long long)vmr(HOST_GDTR_BASE),
1047 (unsigned long long)vmr(HOST_IDTR_BASE));
1048 printk("CR0=%016llx CR3=%016llx CR4=%016llx\n",
1049 (unsigned long long)vmr(HOST_CR0),
1050 (unsigned long long)vmr(HOST_CR3),
1051 (unsigned long long)vmr(HOST_CR4));
1052 printk("Sysenter RSP=%016llx CS:RIP=%04x:%016llx\n",
1053 (unsigned long long)vmr(HOST_SYSENTER_ESP),
1054 (int)vmr(HOST_SYSENTER_CS),
1055 (unsigned long long)vmr(HOST_SYSENTER_EIP));
1056 printk("Host PAT = 0x%08x%08x\n",
1057 (uint32_t)vmr(HOST_PAT_HIGH), (uint32_t)vmr(HOST_PAT));
1059 printk("*** Control State ***\n");
1060 printk("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n",
1061 (uint32_t)vmr(PIN_BASED_VM_EXEC_CONTROL),
1062 (uint32_t)vmr(CPU_BASED_VM_EXEC_CONTROL),
1063 (uint32_t)vmr(SECONDARY_VM_EXEC_CONTROL));
1064 printk("EntryControls=%08x ExitControls=%08x\n",
1065 (uint32_t)vmr(VM_ENTRY_CONTROLS),
1066 (uint32_t)vmr(VM_EXIT_CONTROLS));
1067 printk("ExceptionBitmap=%08x\n",
1068 (uint32_t)vmr(EXCEPTION_BITMAP));
1069 printk("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
1070 (uint32_t)vmr(VM_ENTRY_INTR_INFO),
1071 (uint32_t)vmr(VM_ENTRY_EXCEPTION_ERROR_CODE),
1072 (uint32_t)vmr(VM_ENTRY_INSTRUCTION_LEN));
1073 printk("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
1074 (uint32_t)vmr(VM_EXIT_INTR_INFO),
1075 (uint32_t)vmr(VM_EXIT_INTR_ERROR_CODE),
1076 (uint32_t)vmr(VM_ENTRY_INSTRUCTION_LEN));
1077 printk(" reason=%08x qualification=%08x\n",
1078 (uint32_t)vmr(VM_EXIT_REASON),
1079 (uint32_t)vmr(EXIT_QUALIFICATION));
1080 printk("IDTVectoring: info=%08x errcode=%08x\n",
1081 (uint32_t)vmr(IDT_VECTORING_INFO),
1082 (uint32_t)vmr(IDT_VECTORING_ERROR_CODE));
1083 printk("TPR Threshold = 0x%02x\n",
1084 (uint32_t)vmr(TPR_THRESHOLD));
1085 printk("EPT pointer = 0x%08x%08x\n",
1086 (uint32_t)vmr(EPT_POINTER_HIGH), (uint32_t)vmr(EPT_POINTER));
1087 printk("Virtual processor ID = 0x%04x\n",
1088 (uint32_t)vmr(VIRTUAL_PROCESSOR_ID));
1090 vmx_vmcs_exit(v);
1093 static void vmcs_dump(unsigned char ch)
1095 struct domain *d;
1096 struct vcpu *v;
1098 printk("*********** VMCS Areas **************\n");
1100 rcu_read_lock(&domlist_read_lock);
1102 for_each_domain ( d )
1104 if ( !is_hvm_domain(d) )
1105 continue;
1106 printk("\n>>> Domain %d <<<\n", d->domain_id);
1107 for_each_vcpu ( d, v )
1109 printk("\tVCPU %d\n", v->vcpu_id);
1110 vmcs_dump_vcpu(v);
1114 rcu_read_unlock(&domlist_read_lock);
1116 printk("**************************************\n");
1119 void setup_vmcs_dump(void)
1121 register_keyhandler('v', vmcs_dump, "dump Intel's VMCS");
1125 /*
1126 * Local variables:
1127 * mode: C
1128 * c-set-style: "BSD"
1129 * c-basic-offset: 4
1130 * tab-width: 4
1131 * indent-tabs-mode: nil
1132 * End:
1133 */