direct-io.hg

view xen/arch/x86/hvm/vmx/vmcs.c @ 10974:415614d3a1ee

[hvm/qemu] Flip the device model over to using the new Xen event channels
support.

Signed-off-by: Steven Smith <ssmith@xensource.com>
author sos22@douglas.cl.cam.ac.uk
date Tue Aug 08 11:19:29 2006 +0100 (2006-08-08)
parents 16b4abe0f925
children d20e1835c24b
line source
1 /*
2 * vmcs.c: VMCS management
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
19 #include <xen/config.h>
20 #include <xen/init.h>
21 #include <xen/mm.h>
22 #include <xen/lib.h>
23 #include <xen/errno.h>
24 #include <xen/domain_page.h>
25 #include <asm/current.h>
26 #include <asm/cpufeature.h>
27 #include <asm/processor.h>
28 #include <asm/msr.h>
29 #include <asm/hvm/hvm.h>
30 #include <asm/hvm/io.h>
31 #include <asm/hvm/support.h>
32 #include <asm/hvm/vmx/vmx.h>
33 #include <asm/hvm/vmx/vmcs.h>
34 #include <asm/flushtlb.h>
35 #include <xen/event.h>
36 #include <xen/kernel.h>
37 #include <asm/shadow.h>
38 #include <xen/keyhandler.h>
40 #if CONFIG_PAGING_LEVELS >= 3
41 #include <asm/shadow_64.h>
42 #endif
44 static int vmcs_size;
45 static int vmcs_order;
46 static u32 vmcs_revision_id;
48 void vmx_init_vmcs_config(void)
49 {
50 u32 vmx_msr_low, vmx_msr_high;
52 if ( vmcs_size )
53 return;
55 rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
57 vmcs_revision_id = vmx_msr_low;
59 vmcs_size = vmx_msr_high & 0x1fff;
60 vmcs_order = get_order_from_bytes(vmcs_size);
61 }
63 static struct vmcs_struct *vmx_alloc_vmcs(void)
64 {
65 struct vmcs_struct *vmcs;
67 if ( (vmcs = alloc_xenheap_pages(vmcs_order)) == NULL )
68 {
69 DPRINTK("Failed to allocate VMCS.\n");
70 return NULL;
71 }
73 memset(vmcs, 0, vmcs_size); /* don't remove this */
74 vmcs->vmcs_revision_id = vmcs_revision_id;
76 return vmcs;
77 }
79 static void vmx_free_vmcs(struct vmcs_struct *vmcs)
80 {
81 free_xenheap_pages(vmcs, vmcs_order);
82 }
84 static void __vmx_clear_vmcs(void *info)
85 {
86 struct vcpu *v = info;
88 __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
90 v->arch.hvm_vmx.active_cpu = -1;
91 v->arch.hvm_vmx.launched = 0;
92 }
94 static void vmx_clear_vmcs(struct vcpu *v)
95 {
96 int cpu = v->arch.hvm_vmx.active_cpu;
98 if ( cpu == -1 )
99 return;
101 if ( cpu == smp_processor_id() )
102 return __vmx_clear_vmcs(v);
104 on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs, v, 1, 1);
105 }
107 static void vmx_load_vmcs(struct vcpu *v)
108 {
109 __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
110 v->arch.hvm_vmx.active_cpu = smp_processor_id();
111 }
113 void vmx_vmcs_enter(struct vcpu *v)
114 {
115 /*
116 * NB. We must *always* run an HVM VCPU on its own VMCS, except for
117 * vmx_vmcs_enter/exit critical regions. This leads to some XXX TODOs XXX:
118 * 1. Move construct_vmcs() much earlier, to domain creation or
119 * context initialisation.
120 * 2. VMPTRLD as soon as we context-switch to a HVM VCPU.
121 * 3. VMCS destruction needs to happen later (from domain_destroy()).
122 * We can relax this a bit if a paused VCPU always commits its
123 * architectural state to a software structure.
124 */
125 if ( v == current )
126 return;
128 vcpu_pause(v);
129 spin_lock(&v->arch.hvm_vmx.vmcs_lock);
131 vmx_clear_vmcs(v);
132 vmx_load_vmcs(v);
133 }
135 void vmx_vmcs_exit(struct vcpu *v)
136 {
137 if ( v == current )
138 return;
140 /* Don't confuse arch_vmx_do_resume (for @v or @current!) */
141 vmx_clear_vmcs(v);
142 if ( hvm_guest(current) )
143 vmx_load_vmcs(current);
145 spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
146 vcpu_unpause(v);
147 }
149 struct vmcs_struct *vmx_alloc_host_vmcs(void)
150 {
151 return vmx_alloc_vmcs();
152 }
154 void vmx_free_host_vmcs(struct vmcs_struct *vmcs)
155 {
156 vmx_free_vmcs(vmcs);
157 }
159 static inline int construct_vmcs_controls(struct arch_vmx_struct *arch_vmx)
160 {
161 int error = 0;
163 error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL,
164 MONITOR_PIN_BASED_EXEC_CONTROLS);
166 error |= __vmwrite(VM_EXIT_CONTROLS, MONITOR_VM_EXIT_CONTROLS);
168 error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS);
170 error |= __vmwrite(IO_BITMAP_A, virt_to_maddr(arch_vmx->io_bitmap_a));
171 error |= __vmwrite(IO_BITMAP_B, virt_to_maddr(arch_vmx->io_bitmap_b));
173 #ifdef CONFIG_X86_PAE
174 /* On PAE bitmaps may in future be above 4GB. Write high words. */
175 error |= __vmwrite(IO_BITMAP_A_HIGH,
176 (paddr_t)virt_to_maddr(arch_vmx->io_bitmap_a) >> 32);
177 error |= __vmwrite(IO_BITMAP_B_HIGH,
178 (paddr_t)virt_to_maddr(arch_vmx->io_bitmap_b) >> 32);
179 #endif
181 return error;
182 }
184 #define GUEST_LAUNCH_DS 0x08
185 #define GUEST_LAUNCH_CS 0x10
186 #define GUEST_SEGMENT_LIMIT 0xffffffff
187 #define HOST_SEGMENT_LIMIT 0xffffffff
189 struct host_execution_env {
190 /* selectors */
191 unsigned short ldtr_selector;
192 unsigned short tr_selector;
193 unsigned short ds_selector;
194 unsigned short cs_selector;
195 /* limits */
196 unsigned short gdtr_limit;
197 unsigned short ldtr_limit;
198 unsigned short idtr_limit;
199 unsigned short tr_limit;
200 /* base */
201 unsigned long gdtr_base;
202 unsigned long ldtr_base;
203 unsigned long idtr_base;
204 unsigned long tr_base;
205 unsigned long ds_base;
206 unsigned long cs_base;
207 #ifdef __x86_64__
208 unsigned long fs_base;
209 unsigned long gs_base;
210 #endif
211 };
213 static void vmx_set_host_env(struct vcpu *v)
214 {
215 unsigned int tr, cpu, error = 0;
216 struct host_execution_env host_env;
217 struct Xgt_desc_struct desc;
219 cpu = smp_processor_id();
220 __asm__ __volatile__ ("sidt (%0) \n" :: "a"(&desc) : "memory");
221 host_env.idtr_limit = desc.size;
222 host_env.idtr_base = desc.address;
223 error |= __vmwrite(HOST_IDTR_BASE, host_env.idtr_base);
225 __asm__ __volatile__ ("sgdt (%0) \n" :: "a"(&desc) : "memory");
226 host_env.gdtr_limit = desc.size;
227 host_env.gdtr_base = desc.address;
228 error |= __vmwrite(HOST_GDTR_BASE, host_env.gdtr_base);
230 __asm__ __volatile__ ("str (%0) \n" :: "a"(&tr) : "memory");
231 host_env.tr_selector = tr;
232 host_env.tr_limit = sizeof(struct tss_struct);
233 host_env.tr_base = (unsigned long) &init_tss[cpu];
234 error |= __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
235 error |= __vmwrite(HOST_TR_BASE, host_env.tr_base);
236 error |= __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
237 }
239 static void vmx_do_launch(struct vcpu *v)
240 {
241 /* Update CR3, GDT, LDT, TR */
242 unsigned int error = 0;
243 unsigned long cr0, cr4;
245 if (v->vcpu_id == 0)
246 hvm_setup_platform(v->domain);
248 __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (cr0) : );
250 error |= __vmwrite(GUEST_CR0, cr0);
251 cr0 &= ~X86_CR0_PG;
252 error |= __vmwrite(CR0_READ_SHADOW, cr0);
253 error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
254 MONITOR_CPU_BASED_EXEC_CONTROLS);
255 v->arch.hvm_vcpu.u.vmx.exec_control = MONITOR_CPU_BASED_EXEC_CONTROLS;
257 __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (cr4) : );
259 error |= __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
260 cr4 &= ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
262 error |= __vmwrite(CR4_READ_SHADOW, cr4);
264 vmx_stts();
266 if(hvm_apic_support(v->domain))
267 vlapic_init(v);
269 vmx_set_host_env(v);
270 init_timer(&v->arch.hvm_vmx.hlt_timer, hlt_timer_fn, v, v->processor);
272 error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
273 error |= __vmwrite(GUEST_LDTR_BASE, 0);
274 error |= __vmwrite(GUEST_LDTR_LIMIT, 0);
276 error |= __vmwrite(GUEST_TR_BASE, 0);
277 error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
279 __vmwrite(GUEST_CR3, pagetable_get_paddr(v->domain->arch.phys_table));
280 __vmwrite(HOST_CR3, pagetable_get_paddr(v->arch.monitor_table));
282 v->arch.schedule_tail = arch_vmx_do_resume;
284 /* init guest tsc to start from 0 */
285 set_guest_time(v, 0);
286 }
288 /*
289 * Initially set the same environement as host.
290 */
291 static inline int construct_init_vmcs_guest(cpu_user_regs_t *regs)
292 {
293 int error = 0;
294 union vmcs_arbytes arbytes;
295 unsigned long dr7;
296 unsigned long eflags;
298 /* MSR */
299 error |= __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
300 error |= __vmwrite(VM_EXIT_MSR_STORE_ADDR, 0);
302 error |= __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
303 error |= __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
304 error |= __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
305 /* interrupt */
306 error |= __vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
307 /* mask */
308 error |= __vmwrite(CR0_GUEST_HOST_MASK, -1UL);
309 error |= __vmwrite(CR4_GUEST_HOST_MASK, -1UL);
311 error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
312 error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
314 /* TSC */
315 error |= __vmwrite(CR3_TARGET_COUNT, 0);
317 /* Guest Selectors */
318 error |= __vmwrite(GUEST_ES_SELECTOR, GUEST_LAUNCH_DS);
319 error |= __vmwrite(GUEST_SS_SELECTOR, GUEST_LAUNCH_DS);
320 error |= __vmwrite(GUEST_DS_SELECTOR, GUEST_LAUNCH_DS);
321 error |= __vmwrite(GUEST_FS_SELECTOR, GUEST_LAUNCH_DS);
322 error |= __vmwrite(GUEST_GS_SELECTOR, GUEST_LAUNCH_DS);
323 error |= __vmwrite(GUEST_CS_SELECTOR, GUEST_LAUNCH_CS);
325 /* Guest segment bases */
326 error |= __vmwrite(GUEST_ES_BASE, 0);
327 error |= __vmwrite(GUEST_SS_BASE, 0);
328 error |= __vmwrite(GUEST_DS_BASE, 0);
329 error |= __vmwrite(GUEST_FS_BASE, 0);
330 error |= __vmwrite(GUEST_GS_BASE, 0);
331 error |= __vmwrite(GUEST_CS_BASE, 0);
333 /* Guest segment Limits */
334 error |= __vmwrite(GUEST_ES_LIMIT, GUEST_SEGMENT_LIMIT);
335 error |= __vmwrite(GUEST_SS_LIMIT, GUEST_SEGMENT_LIMIT);
336 error |= __vmwrite(GUEST_DS_LIMIT, GUEST_SEGMENT_LIMIT);
337 error |= __vmwrite(GUEST_FS_LIMIT, GUEST_SEGMENT_LIMIT);
338 error |= __vmwrite(GUEST_GS_LIMIT, GUEST_SEGMENT_LIMIT);
339 error |= __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
341 /* Guest segment AR bytes */
342 arbytes.bytes = 0;
343 arbytes.fields.seg_type = 0x3; /* type = 3 */
344 arbytes.fields.s = 1; /* code or data, i.e. not system */
345 arbytes.fields.dpl = 0; /* DPL = 3 */
346 arbytes.fields.p = 1; /* segment present */
347 arbytes.fields.default_ops_size = 1; /* 32-bit */
348 arbytes.fields.g = 1;
349 arbytes.fields.null_bit = 0; /* not null */
351 error |= __vmwrite(GUEST_ES_AR_BYTES, arbytes.bytes);
352 error |= __vmwrite(GUEST_SS_AR_BYTES, arbytes.bytes);
353 error |= __vmwrite(GUEST_DS_AR_BYTES, arbytes.bytes);
354 error |= __vmwrite(GUEST_FS_AR_BYTES, arbytes.bytes);
355 error |= __vmwrite(GUEST_GS_AR_BYTES, arbytes.bytes);
357 arbytes.fields.seg_type = 0xb; /* type = 0xb */
358 error |= __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
360 /* Guest GDT */
361 error |= __vmwrite(GUEST_GDTR_BASE, 0);
362 error |= __vmwrite(GUEST_GDTR_LIMIT, 0);
364 /* Guest IDT */
365 error |= __vmwrite(GUEST_IDTR_BASE, 0);
366 error |= __vmwrite(GUEST_IDTR_LIMIT, 0);
368 /* Guest LDT & TSS */
369 arbytes.fields.s = 0; /* not code or data segement */
370 arbytes.fields.seg_type = 0x2; /* LTD */
371 arbytes.fields.default_ops_size = 0; /* 16-bit */
372 arbytes.fields.g = 0;
373 error |= __vmwrite(GUEST_LDTR_AR_BYTES, arbytes.bytes);
375 arbytes.fields.seg_type = 0xb; /* 32-bit TSS (busy) */
376 error |= __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
377 /* CR3 is set in vmx_final_setup_guest */
379 error |= __vmwrite(GUEST_RSP, 0);
380 error |= __vmwrite(GUEST_RIP, regs->eip);
382 /* Guest EFLAGS */
383 eflags = regs->eflags & ~HVM_EFLAGS_RESERVED_0; /* clear 0s */
384 eflags |= HVM_EFLAGS_RESERVED_1; /* set 1s */
385 error |= __vmwrite(GUEST_RFLAGS, eflags);
387 error |= __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
388 __asm__ __volatile__ ("mov %%dr7, %0\n" : "=r" (dr7));
389 error |= __vmwrite(GUEST_DR7, dr7);
390 error |= __vmwrite(VMCS_LINK_POINTER, ~0UL);
391 #if defined(__i386__)
392 error |= __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
393 #endif
395 return error;
396 }
398 static inline int construct_vmcs_host(void)
399 {
400 int error = 0;
401 #ifdef __x86_64__
402 unsigned long fs_base;
403 unsigned long gs_base;
404 #endif
405 unsigned long crn;
407 /* Host Selectors */
408 error |= __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS);
409 error |= __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
410 error |= __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS);
411 #if defined(__i386__)
412 error |= __vmwrite(HOST_FS_SELECTOR, __HYPERVISOR_DS);
413 error |= __vmwrite(HOST_GS_SELECTOR, __HYPERVISOR_DS);
414 error |= __vmwrite(HOST_FS_BASE, 0);
415 error |= __vmwrite(HOST_GS_BASE, 0);
417 #else
418 rdmsrl(MSR_FS_BASE, fs_base);
419 rdmsrl(MSR_GS_BASE, gs_base);
420 error |= __vmwrite(HOST_FS_BASE, fs_base);
421 error |= __vmwrite(HOST_GS_BASE, gs_base);
423 #endif
424 error |= __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
426 __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (crn) : );
427 error |= __vmwrite(HOST_CR0, crn); /* same CR0 */
429 /* CR3 is set in vmx_final_setup_hostos */
430 __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (crn) : );
431 error |= __vmwrite(HOST_CR4, crn);
433 error |= __vmwrite(HOST_RIP, (unsigned long) vmx_asm_vmexit_handler);
434 #ifdef __x86_64__
435 /* TBD: support cr8 for 64-bit guest */
436 __vmwrite(VIRTUAL_APIC_PAGE_ADDR, 0);
437 __vmwrite(TPR_THRESHOLD, 0);
438 __vmwrite(SECONDARY_VM_EXEC_CONTROL, 0);
439 #endif
441 return error;
442 }
444 /*
445 * the working VMCS pointer has been set properly
446 * just before entering this function.
447 */
448 static int construct_vmcs(struct vcpu *v,
449 cpu_user_regs_t *regs)
450 {
451 struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
452 int error;
454 if ( (error = construct_vmcs_controls(arch_vmx)) ) {
455 printk("construct_vmcs: construct_vmcs_controls failed.\n");
456 return error;
457 }
459 /* host selectors */
460 if ( (error = construct_vmcs_host()) ) {
461 printk("construct_vmcs: construct_vmcs_host failed.\n");
462 return error;
463 }
465 /* guest selectors */
466 if ( (error = construct_init_vmcs_guest(regs)) ) {
467 printk("construct_vmcs: construct_vmcs_guest failed.\n");
468 return error;
469 }
471 if ( (error = __vmwrite(EXCEPTION_BITMAP,
472 MONITOR_DEFAULT_EXCEPTION_BITMAP)) ) {
473 printk("construct_vmcs: setting exception bitmap failed.\n");
474 return error;
475 }
477 if ( regs->eflags & EF_TF )
478 error = __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
479 else
480 error = __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
482 return error;
483 }
485 int vmx_create_vmcs(struct vcpu *v)
486 {
487 if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL )
488 return -ENOMEM;
489 __vmx_clear_vmcs(v);
490 return 0;
491 }
493 void vmx_destroy_vmcs(struct vcpu *v)
494 {
495 struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
497 if ( arch_vmx->vmcs == NULL )
498 return;
500 vmx_clear_vmcs(v);
502 free_xenheap_pages(arch_vmx->io_bitmap_a, IO_BITMAP_ORDER);
503 free_xenheap_pages(arch_vmx->io_bitmap_b, IO_BITMAP_ORDER);
505 arch_vmx->io_bitmap_a = NULL;
506 arch_vmx->io_bitmap_b = NULL;
508 vmx_free_vmcs(arch_vmx->vmcs);
509 arch_vmx->vmcs = NULL;
510 }
512 void vm_launch_fail(unsigned long eflags)
513 {
514 unsigned long error;
515 __vmread(VM_INSTRUCTION_ERROR, &error);
516 printk("<vm_launch_fail> error code %lx\n", error);
517 __hvm_bug(guest_cpu_user_regs());
518 }
520 void vm_resume_fail(unsigned long eflags)
521 {
522 unsigned long error;
523 __vmread(VM_INSTRUCTION_ERROR, &error);
524 printk("<vm_resume_fail> error code %lx\n", error);
525 __hvm_bug(guest_cpu_user_regs());
526 }
528 void arch_vmx_do_resume(struct vcpu *v)
529 {
530 if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() )
531 {
532 vmx_load_vmcs(v);
533 }
534 else
535 {
536 vmx_clear_vmcs(v);
537 vmx_load_vmcs(v);
538 vmx_migrate_timers(v);
539 vmx_set_host_env(v);
540 }
542 vmx_do_resume(v);
543 reset_stack_and_jump(vmx_asm_do_vmentry);
544 }
546 void arch_vmx_do_launch(struct vcpu *v)
547 {
548 cpu_user_regs_t *regs = &current->arch.guest_context.user_regs;
550 vmx_load_vmcs(v);
552 if ( construct_vmcs(v, regs) < 0 )
553 {
554 if ( v->vcpu_id == 0 ) {
555 printk("Failed to construct VMCS for BSP.\n");
556 } else {
557 printk("Failed to construct VMCS for AP %d.\n", v->vcpu_id);
558 }
559 domain_crash_synchronous();
560 }
562 vmx_do_launch(v);
563 reset_stack_and_jump(vmx_asm_do_vmentry);
564 }
567 /* Dump a section of VMCS */
568 static void print_section(char *header, uint32_t start,
569 uint32_t end, int incr)
570 {
571 uint32_t addr, j;
572 unsigned long val;
573 int code;
574 char *fmt[4] = {"0x%04lx ", "0x%016lx ", "0x%08lx ", "0x%016lx "};
575 char *err[4] = {"------ ", "------------------ ",
576 "---------- ", "------------------ "};
578 /* Find width of the field (encoded in bits 14:13 of address) */
579 code = (start>>13)&3;
581 if (header)
582 printk("\t %s", header);
584 for (addr=start, j=0; addr<=end; addr+=incr, j++) {
586 if (!(j&3))
587 printk("\n\t\t0x%08x: ", addr);
589 if (!__vmread(addr, &val))
590 printk(fmt[code], val);
591 else
592 printk("%s", err[code]);
593 }
595 printk("\n");
596 }
598 /* Dump current VMCS */
599 void vmcs_dump_vcpu(void)
600 {
601 print_section("16-bit Guest-State Fields", 0x800, 0x80e, 2);
602 print_section("16-bit Host-State Fields", 0xc00, 0xc0c, 2);
603 print_section("64-bit Control Fields", 0x2000, 0x2013, 1);
604 print_section("64-bit Guest-State Fields", 0x2800, 0x2803, 1);
605 print_section("32-bit Control Fields", 0x4000, 0x401c, 2);
606 print_section("32-bit RO Data Fields", 0x4400, 0x440e, 2);
607 print_section("32-bit Guest-State Fields", 0x4800, 0x482a, 2);
608 print_section("32-bit Host-State Fields", 0x4c00, 0x4c00, 2);
609 print_section("Natural 64-bit Control Fields", 0x6000, 0x600e, 2);
610 print_section("64-bit RO Data Fields", 0x6400, 0x640A, 2);
611 print_section("Natural 64-bit Guest-State Fields", 0x6800, 0x6826, 2);
612 print_section("Natural 64-bit Host-State Fields", 0x6c00, 0x6c16, 2);
613 }
616 static void vmcs_dump(unsigned char ch)
617 {
618 struct domain *d;
619 struct vcpu *v;
621 printk("*********** VMCS Areas **************\n");
622 for_each_domain(d) {
623 printk("\n>>> Domain %d <<<\n", d->domain_id);
624 for_each_vcpu(d, v) {
626 /*
627 * Presumably, if a domain is not an HVM guest,
628 * the very first CPU will not pass this test
629 */
630 if (!hvm_guest(v)) {
631 printk("\t\tNot HVM guest\n");
632 break;
633 }
634 printk("\tVCPU %d\n", v->vcpu_id);
636 vmx_vmcs_enter(v);
637 vmcs_dump_vcpu();
638 vmx_vmcs_exit(v);
639 }
640 }
642 printk("**************************************\n");
643 }
645 static int __init setup_vmcs_dump(void)
646 {
647 register_keyhandler('v', vmcs_dump, "dump Intel's VMCS");
648 return 0;
649 }
651 __initcall(setup_vmcs_dump);
653 /*
654 * Local variables:
655 * mode: C
656 * c-set-style: "BSD"
657 * c-basic-offset: 4
658 * tab-width: 4
659 * indent-tabs-mode: nil
660 * End:
661 */