direct-io.hg

view xen/arch/x86/hvm/vmx/vmcs.c @ 10634:82f481bda1c7

[HVM][VMX] Move vmcs and I/O bitmap allocation into
vmx_initialise_guest_resources().

Signed-off-by: Xin B Li <xin.b.li@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Jul 05 11:21:19 2006 +0100 (2006-07-05)
parents 3f8d9b128d71
children 16b4abe0f925
line source
1 /*
2 * vmcs.c: VMCS management
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
19 #include <xen/config.h>
20 #include <xen/init.h>
21 #include <xen/mm.h>
22 #include <xen/lib.h>
23 #include <xen/errno.h>
24 #include <xen/domain_page.h>
25 #include <asm/current.h>
26 #include <asm/cpufeature.h>
27 #include <asm/processor.h>
28 #include <asm/msr.h>
29 #include <asm/hvm/hvm.h>
30 #include <asm/hvm/io.h>
31 #include <asm/hvm/support.h>
32 #include <asm/hvm/vmx/vmx.h>
33 #include <asm/hvm/vmx/vmcs.h>
34 #include <asm/flushtlb.h>
35 #include <xen/event.h>
36 #include <xen/kernel.h>
37 #include <asm/shadow.h>
38 #include <xen/keyhandler.h>
40 #if CONFIG_PAGING_LEVELS >= 3
41 #include <asm/shadow_64.h>
42 #endif
44 static int vmcs_size;
45 static int vmcs_order;
46 static u32 vmcs_revision_id;
48 void vmx_init_vmcs_config(void)
49 {
50 u32 vmx_msr_low, vmx_msr_high;
52 if ( vmcs_size )
53 return;
55 rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
57 vmcs_revision_id = vmx_msr_low;
59 vmcs_size = vmx_msr_high & 0x1fff;
60 vmcs_order = get_order_from_bytes(vmcs_size);
61 }
63 static struct vmcs_struct *vmx_alloc_vmcs(void)
64 {
65 struct vmcs_struct *vmcs;
67 if ( (vmcs = alloc_xenheap_pages(vmcs_order)) == NULL )
68 {
69 DPRINTK("Failed to allocate VMCS.\n");
70 return NULL;
71 }
73 memset(vmcs, 0, vmcs_size); /* don't remove this */
74 vmcs->vmcs_revision_id = vmcs_revision_id;
76 return vmcs;
77 }
79 static void vmx_free_vmcs(struct vmcs_struct *vmcs)
80 {
81 free_xenheap_pages(vmcs, vmcs_order);
82 }
84 static void __vmx_clear_vmcs(void *info)
85 {
86 struct vcpu *v = info;
88 __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
90 v->arch.hvm_vmx.active_cpu = -1;
91 v->arch.hvm_vmx.launched = 0;
92 }
94 static void vmx_clear_vmcs(struct vcpu *v)
95 {
96 int cpu = v->arch.hvm_vmx.active_cpu;
98 if ( cpu == -1 )
99 return;
101 if ( cpu == smp_processor_id() )
102 return __vmx_clear_vmcs(v);
104 on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs, v, 1, 1);
105 }
107 static void vmx_load_vmcs(struct vcpu *v)
108 {
109 __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
110 v->arch.hvm_vmx.active_cpu = smp_processor_id();
111 }
113 void vmx_vmcs_enter(struct vcpu *v)
114 {
115 /*
116 * NB. We must *always* run an HVM VCPU on its own VMCS, except for
117 * vmx_vmcs_enter/exit critical regions. This leads to some XXX TODOs XXX:
118 * 1. Move construct_vmcs() much earlier, to domain creation or
119 * context initialisation.
120 * 2. VMPTRLD as soon as we context-switch to a HVM VCPU.
121 * 3. VMCS destruction needs to happen later (from domain_destroy()).
122 * We can relax this a bit if a paused VCPU always commits its
123 * architectural state to a software structure.
124 */
125 if ( v == current )
126 return;
128 vcpu_pause(v);
129 spin_lock(&v->arch.hvm_vmx.vmcs_lock);
131 vmx_clear_vmcs(v);
132 vmx_load_vmcs(v);
133 }
135 void vmx_vmcs_exit(struct vcpu *v)
136 {
137 if ( v == current )
138 return;
140 /* Don't confuse arch_vmx_do_resume (for @v or @current!) */
141 vmx_clear_vmcs(v);
142 if ( hvm_guest(current) )
143 vmx_load_vmcs(current);
145 spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
146 vcpu_unpause(v);
147 }
149 struct vmcs_struct *vmx_alloc_host_vmcs(void)
150 {
151 return vmx_alloc_vmcs();
152 }
154 void vmx_free_host_vmcs(struct vmcs_struct *vmcs)
155 {
156 vmx_free_vmcs(vmcs);
157 }
159 static inline int construct_vmcs_controls(struct arch_vmx_struct *arch_vmx)
160 {
161 int error = 0;
163 error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL,
164 MONITOR_PIN_BASED_EXEC_CONTROLS);
166 error |= __vmwrite(VM_EXIT_CONTROLS, MONITOR_VM_EXIT_CONTROLS);
168 error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS);
170 error |= __vmwrite(IO_BITMAP_A, (u64)virt_to_maddr(arch_vmx->io_bitmap_a));
171 error |= __vmwrite(IO_BITMAP_B, (u64)virt_to_maddr(arch_vmx->io_bitmap_b));
173 return error;
174 }
176 #define GUEST_LAUNCH_DS 0x08
177 #define GUEST_LAUNCH_CS 0x10
178 #define GUEST_SEGMENT_LIMIT 0xffffffff
179 #define HOST_SEGMENT_LIMIT 0xffffffff
181 struct host_execution_env {
182 /* selectors */
183 unsigned short ldtr_selector;
184 unsigned short tr_selector;
185 unsigned short ds_selector;
186 unsigned short cs_selector;
187 /* limits */
188 unsigned short gdtr_limit;
189 unsigned short ldtr_limit;
190 unsigned short idtr_limit;
191 unsigned short tr_limit;
192 /* base */
193 unsigned long gdtr_base;
194 unsigned long ldtr_base;
195 unsigned long idtr_base;
196 unsigned long tr_base;
197 unsigned long ds_base;
198 unsigned long cs_base;
199 #ifdef __x86_64__
200 unsigned long fs_base;
201 unsigned long gs_base;
202 #endif
203 };
205 static void vmx_set_host_env(struct vcpu *v)
206 {
207 unsigned int tr, cpu, error = 0;
208 struct host_execution_env host_env;
209 struct Xgt_desc_struct desc;
211 cpu = smp_processor_id();
212 __asm__ __volatile__ ("sidt (%0) \n" :: "a"(&desc) : "memory");
213 host_env.idtr_limit = desc.size;
214 host_env.idtr_base = desc.address;
215 error |= __vmwrite(HOST_IDTR_BASE, host_env.idtr_base);
217 __asm__ __volatile__ ("sgdt (%0) \n" :: "a"(&desc) : "memory");
218 host_env.gdtr_limit = desc.size;
219 host_env.gdtr_base = desc.address;
220 error |= __vmwrite(HOST_GDTR_BASE, host_env.gdtr_base);
222 __asm__ __volatile__ ("str (%0) \n" :: "a"(&tr) : "memory");
223 host_env.tr_selector = tr;
224 host_env.tr_limit = sizeof(struct tss_struct);
225 host_env.tr_base = (unsigned long) &init_tss[cpu];
226 error |= __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
227 error |= __vmwrite(HOST_TR_BASE, host_env.tr_base);
228 error |= __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
229 }
231 static void vmx_do_launch(struct vcpu *v)
232 {
233 /* Update CR3, GDT, LDT, TR */
234 unsigned int error = 0;
235 unsigned long cr0, cr4;
237 if (v->vcpu_id == 0)
238 hvm_setup_platform(v->domain);
240 if ( evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0 )
241 {
242 printk("VMX domain bind port %d to vcpu %d failed!\n",
243 iopacket_port(v), v->vcpu_id);
244 domain_crash_synchronous();
245 }
247 HVM_DBG_LOG(DBG_LEVEL_1, "eport: %x", iopacket_port(v));
249 clear_bit(iopacket_port(v),
250 &v->domain->shared_info->evtchn_mask[0]);
252 __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (cr0) : );
254 error |= __vmwrite(GUEST_CR0, cr0);
255 cr0 &= ~X86_CR0_PG;
256 error |= __vmwrite(CR0_READ_SHADOW, cr0);
257 error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
258 MONITOR_CPU_BASED_EXEC_CONTROLS);
259 v->arch.hvm_vcpu.u.vmx.exec_control = MONITOR_CPU_BASED_EXEC_CONTROLS;
261 __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (cr4) : );
263 error |= __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
264 cr4 &= ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
266 error |= __vmwrite(CR4_READ_SHADOW, cr4);
268 vmx_stts();
270 if(hvm_apic_support(v->domain))
271 vlapic_init(v);
273 vmx_set_host_env(v);
274 init_timer(&v->arch.hvm_vmx.hlt_timer, hlt_timer_fn, v, v->processor);
276 error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
277 error |= __vmwrite(GUEST_LDTR_BASE, 0);
278 error |= __vmwrite(GUEST_LDTR_LIMIT, 0);
280 error |= __vmwrite(GUEST_TR_BASE, 0);
281 error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
283 __vmwrite(GUEST_CR3, pagetable_get_paddr(v->domain->arch.phys_table));
284 __vmwrite(HOST_CR3, pagetable_get_paddr(v->arch.monitor_table));
286 v->arch.schedule_tail = arch_vmx_do_resume;
288 /* init guest tsc to start from 0 */
289 set_guest_time(v, 0);
290 }
292 /*
293 * Initially set the same environement as host.
294 */
295 static inline int construct_init_vmcs_guest(cpu_user_regs_t *regs)
296 {
297 int error = 0;
298 union vmcs_arbytes arbytes;
299 unsigned long dr7;
300 unsigned long eflags;
302 /* MSR */
303 error |= __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
304 error |= __vmwrite(VM_EXIT_MSR_STORE_ADDR, 0);
306 error |= __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
307 error |= __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
308 error |= __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
309 /* interrupt */
310 error |= __vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
311 /* mask */
312 error |= __vmwrite(CR0_GUEST_HOST_MASK, -1UL);
313 error |= __vmwrite(CR4_GUEST_HOST_MASK, -1UL);
315 error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
316 error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
318 /* TSC */
319 error |= __vmwrite(CR3_TARGET_COUNT, 0);
321 /* Guest Selectors */
322 error |= __vmwrite(GUEST_ES_SELECTOR, GUEST_LAUNCH_DS);
323 error |= __vmwrite(GUEST_SS_SELECTOR, GUEST_LAUNCH_DS);
324 error |= __vmwrite(GUEST_DS_SELECTOR, GUEST_LAUNCH_DS);
325 error |= __vmwrite(GUEST_FS_SELECTOR, GUEST_LAUNCH_DS);
326 error |= __vmwrite(GUEST_GS_SELECTOR, GUEST_LAUNCH_DS);
327 error |= __vmwrite(GUEST_CS_SELECTOR, GUEST_LAUNCH_CS);
329 /* Guest segment bases */
330 error |= __vmwrite(GUEST_ES_BASE, 0);
331 error |= __vmwrite(GUEST_SS_BASE, 0);
332 error |= __vmwrite(GUEST_DS_BASE, 0);
333 error |= __vmwrite(GUEST_FS_BASE, 0);
334 error |= __vmwrite(GUEST_GS_BASE, 0);
335 error |= __vmwrite(GUEST_CS_BASE, 0);
337 /* Guest segment Limits */
338 error |= __vmwrite(GUEST_ES_LIMIT, GUEST_SEGMENT_LIMIT);
339 error |= __vmwrite(GUEST_SS_LIMIT, GUEST_SEGMENT_LIMIT);
340 error |= __vmwrite(GUEST_DS_LIMIT, GUEST_SEGMENT_LIMIT);
341 error |= __vmwrite(GUEST_FS_LIMIT, GUEST_SEGMENT_LIMIT);
342 error |= __vmwrite(GUEST_GS_LIMIT, GUEST_SEGMENT_LIMIT);
343 error |= __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
345 /* Guest segment AR bytes */
346 arbytes.bytes = 0;
347 arbytes.fields.seg_type = 0x3; /* type = 3 */
348 arbytes.fields.s = 1; /* code or data, i.e. not system */
349 arbytes.fields.dpl = 0; /* DPL = 3 */
350 arbytes.fields.p = 1; /* segment present */
351 arbytes.fields.default_ops_size = 1; /* 32-bit */
352 arbytes.fields.g = 1;
353 arbytes.fields.null_bit = 0; /* not null */
355 error |= __vmwrite(GUEST_ES_AR_BYTES, arbytes.bytes);
356 error |= __vmwrite(GUEST_SS_AR_BYTES, arbytes.bytes);
357 error |= __vmwrite(GUEST_DS_AR_BYTES, arbytes.bytes);
358 error |= __vmwrite(GUEST_FS_AR_BYTES, arbytes.bytes);
359 error |= __vmwrite(GUEST_GS_AR_BYTES, arbytes.bytes);
361 arbytes.fields.seg_type = 0xb; /* type = 0xb */
362 error |= __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
364 /* Guest GDT */
365 error |= __vmwrite(GUEST_GDTR_BASE, 0);
366 error |= __vmwrite(GUEST_GDTR_LIMIT, 0);
368 /* Guest IDT */
369 error |= __vmwrite(GUEST_IDTR_BASE, 0);
370 error |= __vmwrite(GUEST_IDTR_LIMIT, 0);
372 /* Guest LDT & TSS */
373 arbytes.fields.s = 0; /* not code or data segement */
374 arbytes.fields.seg_type = 0x2; /* LTD */
375 arbytes.fields.default_ops_size = 0; /* 16-bit */
376 arbytes.fields.g = 0;
377 error |= __vmwrite(GUEST_LDTR_AR_BYTES, arbytes.bytes);
379 arbytes.fields.seg_type = 0xb; /* 32-bit TSS (busy) */
380 error |= __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
381 /* CR3 is set in vmx_final_setup_guest */
383 error |= __vmwrite(GUEST_RSP, 0);
384 error |= __vmwrite(GUEST_RIP, regs->eip);
386 /* Guest EFLAGS */
387 eflags = regs->eflags & ~HVM_EFLAGS_RESERVED_0; /* clear 0s */
388 eflags |= HVM_EFLAGS_RESERVED_1; /* set 1s */
389 error |= __vmwrite(GUEST_RFLAGS, eflags);
391 error |= __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
392 __asm__ __volatile__ ("mov %%dr7, %0\n" : "=r" (dr7));
393 error |= __vmwrite(GUEST_DR7, dr7);
394 error |= __vmwrite(VMCS_LINK_POINTER, 0xffffffff);
395 error |= __vmwrite(VMCS_LINK_POINTER_HIGH, 0xffffffff);
397 return error;
398 }
400 static inline int construct_vmcs_host(void)
401 {
402 int error = 0;
403 #ifdef __x86_64__
404 unsigned long fs_base;
405 unsigned long gs_base;
406 #endif
407 unsigned long crn;
409 /* Host Selectors */
410 error |= __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS);
411 error |= __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
412 error |= __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS);
413 #if defined (__i386__)
414 error |= __vmwrite(HOST_FS_SELECTOR, __HYPERVISOR_DS);
415 error |= __vmwrite(HOST_GS_SELECTOR, __HYPERVISOR_DS);
416 error |= __vmwrite(HOST_FS_BASE, 0);
417 error |= __vmwrite(HOST_GS_BASE, 0);
419 #else
420 rdmsrl(MSR_FS_BASE, fs_base);
421 rdmsrl(MSR_GS_BASE, gs_base);
422 error |= __vmwrite(HOST_FS_BASE, fs_base);
423 error |= __vmwrite(HOST_GS_BASE, gs_base);
425 #endif
426 error |= __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
428 __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (crn) : );
429 error |= __vmwrite(HOST_CR0, crn); /* same CR0 */
431 /* CR3 is set in vmx_final_setup_hostos */
432 __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (crn) : );
433 error |= __vmwrite(HOST_CR4, crn);
435 error |= __vmwrite(HOST_RIP, (unsigned long) vmx_asm_vmexit_handler);
436 #ifdef __x86_64__
437 /* TBD: support cr8 for 64-bit guest */
438 __vmwrite(VIRTUAL_APIC_PAGE_ADDR, 0);
439 __vmwrite(TPR_THRESHOLD, 0);
440 __vmwrite(SECONDARY_VM_EXEC_CONTROL, 0);
441 #endif
443 return error;
444 }
446 /*
447 * the working VMCS pointer has been set properly
448 * just before entering this function.
449 */
450 static int construct_vmcs(struct vcpu *v,
451 cpu_user_regs_t *regs)
452 {
453 struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
454 int error;
456 if ( (error = construct_vmcs_controls(arch_vmx)) ) {
457 printk("construct_vmcs: construct_vmcs_controls failed.\n");
458 return error;
459 }
461 /* host selectors */
462 if ( (error = construct_vmcs_host()) ) {
463 printk("construct_vmcs: construct_vmcs_host failed.\n");
464 return error;
465 }
467 /* guest selectors */
468 if ( (error = construct_init_vmcs_guest(regs)) ) {
469 printk("construct_vmcs: construct_vmcs_guest failed.\n");
470 return error;
471 }
473 if ( (error = __vmwrite(EXCEPTION_BITMAP,
474 MONITOR_DEFAULT_EXCEPTION_BITMAP)) ) {
475 printk("construct_vmcs: setting exception bitmap failed.\n");
476 return error;
477 }
479 if ( regs->eflags & EF_TF )
480 error = __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
481 else
482 error = __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
484 return error;
485 }
487 int vmx_create_vmcs(struct vcpu *v)
488 {
489 if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL )
490 return -ENOMEM;
491 __vmx_clear_vmcs(v);
492 return 0;
493 }
495 void vmx_destroy_vmcs(struct vcpu *v)
496 {
497 struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
499 if ( arch_vmx->vmcs == NULL )
500 return;
502 vmx_clear_vmcs(v);
504 free_xenheap_pages(arch_vmx->io_bitmap_a, IO_BITMAP_ORDER);
505 free_xenheap_pages(arch_vmx->io_bitmap_b, IO_BITMAP_ORDER);
507 arch_vmx->io_bitmap_a = NULL;
508 arch_vmx->io_bitmap_b = NULL;
510 vmx_free_vmcs(arch_vmx->vmcs);
511 arch_vmx->vmcs = NULL;
512 }
514 void vm_launch_fail(unsigned long eflags)
515 {
516 unsigned long error;
517 __vmread(VM_INSTRUCTION_ERROR, &error);
518 printk("<vm_launch_fail> error code %lx\n", error);
519 __hvm_bug(guest_cpu_user_regs());
520 }
522 void vm_resume_fail(unsigned long eflags)
523 {
524 unsigned long error;
525 __vmread(VM_INSTRUCTION_ERROR, &error);
526 printk("<vm_resume_fail> error code %lx\n", error);
527 __hvm_bug(guest_cpu_user_regs());
528 }
530 void arch_vmx_do_resume(struct vcpu *v)
531 {
532 if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() )
533 {
534 vmx_load_vmcs(v);
535 }
536 else
537 {
538 vmx_clear_vmcs(v);
539 vmx_load_vmcs(v);
540 vmx_migrate_timers(v);
541 vmx_set_host_env(v);
542 }
544 vmx_do_resume(v);
545 reset_stack_and_jump(vmx_asm_do_vmentry);
546 }
548 void arch_vmx_do_launch(struct vcpu *v)
549 {
550 cpu_user_regs_t *regs = &current->arch.guest_context.user_regs;
552 vmx_load_vmcs(v);
554 if ( construct_vmcs(v, regs) < 0 )
555 {
556 if ( v->vcpu_id == 0 ) {
557 printk("Failed to construct VMCS for BSP.\n");
558 } else {
559 printk("Failed to construct VMCS for AP %d.\n", v->vcpu_id);
560 }
561 domain_crash_synchronous();
562 }
564 vmx_do_launch(v);
565 reset_stack_and_jump(vmx_asm_do_vmentry);
566 }
569 /* Dump a section of VMCS */
570 static void print_section(char *header, uint32_t start,
571 uint32_t end, int incr)
572 {
573 uint32_t addr, j;
574 unsigned long val;
575 int code;
576 char *fmt[4] = {"0x%04lx ", "0x%016lx ", "0x%08lx ", "0x%016lx "};
577 char *err[4] = {"------ ", "------------------ ",
578 "---------- ", "------------------ "};
580 /* Find width of the field (encoded in bits 14:13 of address) */
581 code = (start>>13)&3;
583 if (header)
584 printk("\t %s", header);
586 for (addr=start, j=0; addr<=end; addr+=incr, j++) {
588 if (!(j&3))
589 printk("\n\t\t0x%08x: ", addr);
591 if (!__vmread(addr, &val))
592 printk(fmt[code], val);
593 else
594 printk("%s", err[code]);
595 }
597 printk("\n");
598 }
600 /* Dump current VMCS */
601 void vmcs_dump_vcpu(void)
602 {
603 print_section("16-bit Guest-State Fields", 0x800, 0x80e, 2);
604 print_section("16-bit Host-State Fields", 0xc00, 0xc0c, 2);
605 print_section("64-bit Control Fields", 0x2000, 0x2013, 1);
606 print_section("64-bit Guest-State Fields", 0x2800, 0x2803, 1);
607 print_section("32-bit Control Fields", 0x4000, 0x401c, 2);
608 print_section("32-bit RO Data Fields", 0x4400, 0x440e, 2);
609 print_section("32-bit Guest-State Fields", 0x4800, 0x482a, 2);
610 print_section("32-bit Host-State Fields", 0x4c00, 0x4c00, 2);
611 print_section("Natural 64-bit Control Fields", 0x6000, 0x600e, 2);
612 print_section("64-bit RO Data Fields", 0x6400, 0x640A, 2);
613 print_section("Natural 64-bit Guest-State Fields", 0x6800, 0x6826, 2);
614 print_section("Natural 64-bit Host-State Fields", 0x6c00, 0x6c16, 2);
615 }
618 static void vmcs_dump(unsigned char ch)
619 {
620 struct domain *d;
621 struct vcpu *v;
623 printk("*********** VMCS Areas **************\n");
624 for_each_domain(d) {
625 printk("\n>>> Domain %d <<<\n", d->domain_id);
626 for_each_vcpu(d, v) {
628 /*
629 * Presumably, if a domain is not an HVM guest,
630 * the very first CPU will not pass this test
631 */
632 if (!hvm_guest(v)) {
633 printk("\t\tNot HVM guest\n");
634 break;
635 }
636 printk("\tVCPU %d\n", v->vcpu_id);
638 vmx_vmcs_enter(v);
639 vmcs_dump_vcpu();
640 vmx_vmcs_exit(v);
641 }
642 }
644 printk("**************************************\n");
645 }
647 static int __init setup_vmcs_dump(void)
648 {
649 register_keyhandler('v', vmcs_dump, "dump Intel's VMCS");
650 return 0;
651 }
653 __initcall(setup_vmcs_dump);
655 /*
656 * Local variables:
657 * mode: C
658 * c-set-style: "BSD"
659 * c-basic-offset: 4
660 * tab-width: 4
661 * indent-tabs-mode: nil
662 * End:
663 */