direct-io.hg

view xen/arch/x86/hvm/vmx/vmcs.c @ 10666:16b4abe0f925

[HVM][VMX] Clean up some writes to 64-bit VMCS fields in 32-bit Xen.
Signed-off-by: Eddie Dong <eddie.dong@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Mon Jul 10 17:47:28 2006 +0100 (2006-07-10)
parents 82f481bda1c7
children 415614d3a1ee
line source
1 /*
2 * vmcs.c: VMCS management
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
19 #include <xen/config.h>
20 #include <xen/init.h>
21 #include <xen/mm.h>
22 #include <xen/lib.h>
23 #include <xen/errno.h>
24 #include <xen/domain_page.h>
25 #include <asm/current.h>
26 #include <asm/cpufeature.h>
27 #include <asm/processor.h>
28 #include <asm/msr.h>
29 #include <asm/hvm/hvm.h>
30 #include <asm/hvm/io.h>
31 #include <asm/hvm/support.h>
32 #include <asm/hvm/vmx/vmx.h>
33 #include <asm/hvm/vmx/vmcs.h>
34 #include <asm/flushtlb.h>
35 #include <xen/event.h>
36 #include <xen/kernel.h>
37 #include <asm/shadow.h>
38 #include <xen/keyhandler.h>
40 #if CONFIG_PAGING_LEVELS >= 3
41 #include <asm/shadow_64.h>
42 #endif
44 static int vmcs_size;
45 static int vmcs_order;
46 static u32 vmcs_revision_id;
48 void vmx_init_vmcs_config(void)
49 {
50 u32 vmx_msr_low, vmx_msr_high;
52 if ( vmcs_size )
53 return;
55 rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
57 vmcs_revision_id = vmx_msr_low;
59 vmcs_size = vmx_msr_high & 0x1fff;
60 vmcs_order = get_order_from_bytes(vmcs_size);
61 }
63 static struct vmcs_struct *vmx_alloc_vmcs(void)
64 {
65 struct vmcs_struct *vmcs;
67 if ( (vmcs = alloc_xenheap_pages(vmcs_order)) == NULL )
68 {
69 DPRINTK("Failed to allocate VMCS.\n");
70 return NULL;
71 }
73 memset(vmcs, 0, vmcs_size); /* don't remove this */
74 vmcs->vmcs_revision_id = vmcs_revision_id;
76 return vmcs;
77 }
79 static void vmx_free_vmcs(struct vmcs_struct *vmcs)
80 {
81 free_xenheap_pages(vmcs, vmcs_order);
82 }
84 static void __vmx_clear_vmcs(void *info)
85 {
86 struct vcpu *v = info;
88 __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
90 v->arch.hvm_vmx.active_cpu = -1;
91 v->arch.hvm_vmx.launched = 0;
92 }
94 static void vmx_clear_vmcs(struct vcpu *v)
95 {
96 int cpu = v->arch.hvm_vmx.active_cpu;
98 if ( cpu == -1 )
99 return;
101 if ( cpu == smp_processor_id() )
102 return __vmx_clear_vmcs(v);
104 on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs, v, 1, 1);
105 }
107 static void vmx_load_vmcs(struct vcpu *v)
108 {
109 __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
110 v->arch.hvm_vmx.active_cpu = smp_processor_id();
111 }
113 void vmx_vmcs_enter(struct vcpu *v)
114 {
115 /*
116 * NB. We must *always* run an HVM VCPU on its own VMCS, except for
117 * vmx_vmcs_enter/exit critical regions. This leads to some XXX TODOs XXX:
118 * 1. Move construct_vmcs() much earlier, to domain creation or
119 * context initialisation.
120 * 2. VMPTRLD as soon as we context-switch to a HVM VCPU.
121 * 3. VMCS destruction needs to happen later (from domain_destroy()).
122 * We can relax this a bit if a paused VCPU always commits its
123 * architectural state to a software structure.
124 */
125 if ( v == current )
126 return;
128 vcpu_pause(v);
129 spin_lock(&v->arch.hvm_vmx.vmcs_lock);
131 vmx_clear_vmcs(v);
132 vmx_load_vmcs(v);
133 }
135 void vmx_vmcs_exit(struct vcpu *v)
136 {
137 if ( v == current )
138 return;
140 /* Don't confuse arch_vmx_do_resume (for @v or @current!) */
141 vmx_clear_vmcs(v);
142 if ( hvm_guest(current) )
143 vmx_load_vmcs(current);
145 spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
146 vcpu_unpause(v);
147 }
149 struct vmcs_struct *vmx_alloc_host_vmcs(void)
150 {
151 return vmx_alloc_vmcs();
152 }
154 void vmx_free_host_vmcs(struct vmcs_struct *vmcs)
155 {
156 vmx_free_vmcs(vmcs);
157 }
159 static inline int construct_vmcs_controls(struct arch_vmx_struct *arch_vmx)
160 {
161 int error = 0;
163 error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL,
164 MONITOR_PIN_BASED_EXEC_CONTROLS);
166 error |= __vmwrite(VM_EXIT_CONTROLS, MONITOR_VM_EXIT_CONTROLS);
168 error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS);
170 error |= __vmwrite(IO_BITMAP_A, virt_to_maddr(arch_vmx->io_bitmap_a));
171 error |= __vmwrite(IO_BITMAP_B, virt_to_maddr(arch_vmx->io_bitmap_b));
173 #ifdef CONFIG_X86_PAE
174 /* On PAE bitmaps may in future be above 4GB. Write high words. */
175 error |= __vmwrite(IO_BITMAP_A_HIGH,
176 (paddr_t)virt_to_maddr(arch_vmx->io_bitmap_a) >> 32);
177 error |= __vmwrite(IO_BITMAP_B_HIGH,
178 (paddr_t)virt_to_maddr(arch_vmx->io_bitmap_b) >> 32);
179 #endif
181 return error;
182 }
184 #define GUEST_LAUNCH_DS 0x08
185 #define GUEST_LAUNCH_CS 0x10
186 #define GUEST_SEGMENT_LIMIT 0xffffffff
187 #define HOST_SEGMENT_LIMIT 0xffffffff
189 struct host_execution_env {
190 /* selectors */
191 unsigned short ldtr_selector;
192 unsigned short tr_selector;
193 unsigned short ds_selector;
194 unsigned short cs_selector;
195 /* limits */
196 unsigned short gdtr_limit;
197 unsigned short ldtr_limit;
198 unsigned short idtr_limit;
199 unsigned short tr_limit;
200 /* base */
201 unsigned long gdtr_base;
202 unsigned long ldtr_base;
203 unsigned long idtr_base;
204 unsigned long tr_base;
205 unsigned long ds_base;
206 unsigned long cs_base;
207 #ifdef __x86_64__
208 unsigned long fs_base;
209 unsigned long gs_base;
210 #endif
211 };
213 static void vmx_set_host_env(struct vcpu *v)
214 {
215 unsigned int tr, cpu, error = 0;
216 struct host_execution_env host_env;
217 struct Xgt_desc_struct desc;
219 cpu = smp_processor_id();
220 __asm__ __volatile__ ("sidt (%0) \n" :: "a"(&desc) : "memory");
221 host_env.idtr_limit = desc.size;
222 host_env.idtr_base = desc.address;
223 error |= __vmwrite(HOST_IDTR_BASE, host_env.idtr_base);
225 __asm__ __volatile__ ("sgdt (%0) \n" :: "a"(&desc) : "memory");
226 host_env.gdtr_limit = desc.size;
227 host_env.gdtr_base = desc.address;
228 error |= __vmwrite(HOST_GDTR_BASE, host_env.gdtr_base);
230 __asm__ __volatile__ ("str (%0) \n" :: "a"(&tr) : "memory");
231 host_env.tr_selector = tr;
232 host_env.tr_limit = sizeof(struct tss_struct);
233 host_env.tr_base = (unsigned long) &init_tss[cpu];
234 error |= __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
235 error |= __vmwrite(HOST_TR_BASE, host_env.tr_base);
236 error |= __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
237 }
239 static void vmx_do_launch(struct vcpu *v)
240 {
241 /* Update CR3, GDT, LDT, TR */
242 unsigned int error = 0;
243 unsigned long cr0, cr4;
245 if (v->vcpu_id == 0)
246 hvm_setup_platform(v->domain);
248 if ( evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0 )
249 {
250 printk("VMX domain bind port %d to vcpu %d failed!\n",
251 iopacket_port(v), v->vcpu_id);
252 domain_crash_synchronous();
253 }
255 HVM_DBG_LOG(DBG_LEVEL_1, "eport: %x", iopacket_port(v));
257 clear_bit(iopacket_port(v),
258 &v->domain->shared_info->evtchn_mask[0]);
260 __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (cr0) : );
262 error |= __vmwrite(GUEST_CR0, cr0);
263 cr0 &= ~X86_CR0_PG;
264 error |= __vmwrite(CR0_READ_SHADOW, cr0);
265 error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
266 MONITOR_CPU_BASED_EXEC_CONTROLS);
267 v->arch.hvm_vcpu.u.vmx.exec_control = MONITOR_CPU_BASED_EXEC_CONTROLS;
269 __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (cr4) : );
271 error |= __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
272 cr4 &= ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
274 error |= __vmwrite(CR4_READ_SHADOW, cr4);
276 vmx_stts();
278 if(hvm_apic_support(v->domain))
279 vlapic_init(v);
281 vmx_set_host_env(v);
282 init_timer(&v->arch.hvm_vmx.hlt_timer, hlt_timer_fn, v, v->processor);
284 error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
285 error |= __vmwrite(GUEST_LDTR_BASE, 0);
286 error |= __vmwrite(GUEST_LDTR_LIMIT, 0);
288 error |= __vmwrite(GUEST_TR_BASE, 0);
289 error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
291 __vmwrite(GUEST_CR3, pagetable_get_paddr(v->domain->arch.phys_table));
292 __vmwrite(HOST_CR3, pagetable_get_paddr(v->arch.monitor_table));
294 v->arch.schedule_tail = arch_vmx_do_resume;
296 /* init guest tsc to start from 0 */
297 set_guest_time(v, 0);
298 }
300 /*
301 * Initially set the same environement as host.
302 */
303 static inline int construct_init_vmcs_guest(cpu_user_regs_t *regs)
304 {
305 int error = 0;
306 union vmcs_arbytes arbytes;
307 unsigned long dr7;
308 unsigned long eflags;
310 /* MSR */
311 error |= __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
312 error |= __vmwrite(VM_EXIT_MSR_STORE_ADDR, 0);
314 error |= __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
315 error |= __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
316 error |= __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
317 /* interrupt */
318 error |= __vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
319 /* mask */
320 error |= __vmwrite(CR0_GUEST_HOST_MASK, -1UL);
321 error |= __vmwrite(CR4_GUEST_HOST_MASK, -1UL);
323 error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
324 error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
326 /* TSC */
327 error |= __vmwrite(CR3_TARGET_COUNT, 0);
329 /* Guest Selectors */
330 error |= __vmwrite(GUEST_ES_SELECTOR, GUEST_LAUNCH_DS);
331 error |= __vmwrite(GUEST_SS_SELECTOR, GUEST_LAUNCH_DS);
332 error |= __vmwrite(GUEST_DS_SELECTOR, GUEST_LAUNCH_DS);
333 error |= __vmwrite(GUEST_FS_SELECTOR, GUEST_LAUNCH_DS);
334 error |= __vmwrite(GUEST_GS_SELECTOR, GUEST_LAUNCH_DS);
335 error |= __vmwrite(GUEST_CS_SELECTOR, GUEST_LAUNCH_CS);
337 /* Guest segment bases */
338 error |= __vmwrite(GUEST_ES_BASE, 0);
339 error |= __vmwrite(GUEST_SS_BASE, 0);
340 error |= __vmwrite(GUEST_DS_BASE, 0);
341 error |= __vmwrite(GUEST_FS_BASE, 0);
342 error |= __vmwrite(GUEST_GS_BASE, 0);
343 error |= __vmwrite(GUEST_CS_BASE, 0);
345 /* Guest segment Limits */
346 error |= __vmwrite(GUEST_ES_LIMIT, GUEST_SEGMENT_LIMIT);
347 error |= __vmwrite(GUEST_SS_LIMIT, GUEST_SEGMENT_LIMIT);
348 error |= __vmwrite(GUEST_DS_LIMIT, GUEST_SEGMENT_LIMIT);
349 error |= __vmwrite(GUEST_FS_LIMIT, GUEST_SEGMENT_LIMIT);
350 error |= __vmwrite(GUEST_GS_LIMIT, GUEST_SEGMENT_LIMIT);
351 error |= __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
353 /* Guest segment AR bytes */
354 arbytes.bytes = 0;
355 arbytes.fields.seg_type = 0x3; /* type = 3 */
356 arbytes.fields.s = 1; /* code or data, i.e. not system */
357 arbytes.fields.dpl = 0; /* DPL = 3 */
358 arbytes.fields.p = 1; /* segment present */
359 arbytes.fields.default_ops_size = 1; /* 32-bit */
360 arbytes.fields.g = 1;
361 arbytes.fields.null_bit = 0; /* not null */
363 error |= __vmwrite(GUEST_ES_AR_BYTES, arbytes.bytes);
364 error |= __vmwrite(GUEST_SS_AR_BYTES, arbytes.bytes);
365 error |= __vmwrite(GUEST_DS_AR_BYTES, arbytes.bytes);
366 error |= __vmwrite(GUEST_FS_AR_BYTES, arbytes.bytes);
367 error |= __vmwrite(GUEST_GS_AR_BYTES, arbytes.bytes);
369 arbytes.fields.seg_type = 0xb; /* type = 0xb */
370 error |= __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
372 /* Guest GDT */
373 error |= __vmwrite(GUEST_GDTR_BASE, 0);
374 error |= __vmwrite(GUEST_GDTR_LIMIT, 0);
376 /* Guest IDT */
377 error |= __vmwrite(GUEST_IDTR_BASE, 0);
378 error |= __vmwrite(GUEST_IDTR_LIMIT, 0);
380 /* Guest LDT & TSS */
381 arbytes.fields.s = 0; /* not code or data segement */
382 arbytes.fields.seg_type = 0x2; /* LTD */
383 arbytes.fields.default_ops_size = 0; /* 16-bit */
384 arbytes.fields.g = 0;
385 error |= __vmwrite(GUEST_LDTR_AR_BYTES, arbytes.bytes);
387 arbytes.fields.seg_type = 0xb; /* 32-bit TSS (busy) */
388 error |= __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
389 /* CR3 is set in vmx_final_setup_guest */
391 error |= __vmwrite(GUEST_RSP, 0);
392 error |= __vmwrite(GUEST_RIP, regs->eip);
394 /* Guest EFLAGS */
395 eflags = regs->eflags & ~HVM_EFLAGS_RESERVED_0; /* clear 0s */
396 eflags |= HVM_EFLAGS_RESERVED_1; /* set 1s */
397 error |= __vmwrite(GUEST_RFLAGS, eflags);
399 error |= __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
400 __asm__ __volatile__ ("mov %%dr7, %0\n" : "=r" (dr7));
401 error |= __vmwrite(GUEST_DR7, dr7);
402 error |= __vmwrite(VMCS_LINK_POINTER, ~0UL);
403 #if defined(__i386__)
404 error |= __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
405 #endif
407 return error;
408 }
410 static inline int construct_vmcs_host(void)
411 {
412 int error = 0;
413 #ifdef __x86_64__
414 unsigned long fs_base;
415 unsigned long gs_base;
416 #endif
417 unsigned long crn;
419 /* Host Selectors */
420 error |= __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS);
421 error |= __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
422 error |= __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS);
423 #if defined(__i386__)
424 error |= __vmwrite(HOST_FS_SELECTOR, __HYPERVISOR_DS);
425 error |= __vmwrite(HOST_GS_SELECTOR, __HYPERVISOR_DS);
426 error |= __vmwrite(HOST_FS_BASE, 0);
427 error |= __vmwrite(HOST_GS_BASE, 0);
429 #else
430 rdmsrl(MSR_FS_BASE, fs_base);
431 rdmsrl(MSR_GS_BASE, gs_base);
432 error |= __vmwrite(HOST_FS_BASE, fs_base);
433 error |= __vmwrite(HOST_GS_BASE, gs_base);
435 #endif
436 error |= __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
438 __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (crn) : );
439 error |= __vmwrite(HOST_CR0, crn); /* same CR0 */
441 /* CR3 is set in vmx_final_setup_hostos */
442 __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (crn) : );
443 error |= __vmwrite(HOST_CR4, crn);
445 error |= __vmwrite(HOST_RIP, (unsigned long) vmx_asm_vmexit_handler);
446 #ifdef __x86_64__
447 /* TBD: support cr8 for 64-bit guest */
448 __vmwrite(VIRTUAL_APIC_PAGE_ADDR, 0);
449 __vmwrite(TPR_THRESHOLD, 0);
450 __vmwrite(SECONDARY_VM_EXEC_CONTROL, 0);
451 #endif
453 return error;
454 }
456 /*
457 * the working VMCS pointer has been set properly
458 * just before entering this function.
459 */
460 static int construct_vmcs(struct vcpu *v,
461 cpu_user_regs_t *regs)
462 {
463 struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
464 int error;
466 if ( (error = construct_vmcs_controls(arch_vmx)) ) {
467 printk("construct_vmcs: construct_vmcs_controls failed.\n");
468 return error;
469 }
471 /* host selectors */
472 if ( (error = construct_vmcs_host()) ) {
473 printk("construct_vmcs: construct_vmcs_host failed.\n");
474 return error;
475 }
477 /* guest selectors */
478 if ( (error = construct_init_vmcs_guest(regs)) ) {
479 printk("construct_vmcs: construct_vmcs_guest failed.\n");
480 return error;
481 }
483 if ( (error = __vmwrite(EXCEPTION_BITMAP,
484 MONITOR_DEFAULT_EXCEPTION_BITMAP)) ) {
485 printk("construct_vmcs: setting exception bitmap failed.\n");
486 return error;
487 }
489 if ( regs->eflags & EF_TF )
490 error = __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
491 else
492 error = __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
494 return error;
495 }
497 int vmx_create_vmcs(struct vcpu *v)
498 {
499 if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL )
500 return -ENOMEM;
501 __vmx_clear_vmcs(v);
502 return 0;
503 }
505 void vmx_destroy_vmcs(struct vcpu *v)
506 {
507 struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
509 if ( arch_vmx->vmcs == NULL )
510 return;
512 vmx_clear_vmcs(v);
514 free_xenheap_pages(arch_vmx->io_bitmap_a, IO_BITMAP_ORDER);
515 free_xenheap_pages(arch_vmx->io_bitmap_b, IO_BITMAP_ORDER);
517 arch_vmx->io_bitmap_a = NULL;
518 arch_vmx->io_bitmap_b = NULL;
520 vmx_free_vmcs(arch_vmx->vmcs);
521 arch_vmx->vmcs = NULL;
522 }
524 void vm_launch_fail(unsigned long eflags)
525 {
526 unsigned long error;
527 __vmread(VM_INSTRUCTION_ERROR, &error);
528 printk("<vm_launch_fail> error code %lx\n", error);
529 __hvm_bug(guest_cpu_user_regs());
530 }
532 void vm_resume_fail(unsigned long eflags)
533 {
534 unsigned long error;
535 __vmread(VM_INSTRUCTION_ERROR, &error);
536 printk("<vm_resume_fail> error code %lx\n", error);
537 __hvm_bug(guest_cpu_user_regs());
538 }
540 void arch_vmx_do_resume(struct vcpu *v)
541 {
542 if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() )
543 {
544 vmx_load_vmcs(v);
545 }
546 else
547 {
548 vmx_clear_vmcs(v);
549 vmx_load_vmcs(v);
550 vmx_migrate_timers(v);
551 vmx_set_host_env(v);
552 }
554 vmx_do_resume(v);
555 reset_stack_and_jump(vmx_asm_do_vmentry);
556 }
558 void arch_vmx_do_launch(struct vcpu *v)
559 {
560 cpu_user_regs_t *regs = &current->arch.guest_context.user_regs;
562 vmx_load_vmcs(v);
564 if ( construct_vmcs(v, regs) < 0 )
565 {
566 if ( v->vcpu_id == 0 ) {
567 printk("Failed to construct VMCS for BSP.\n");
568 } else {
569 printk("Failed to construct VMCS for AP %d.\n", v->vcpu_id);
570 }
571 domain_crash_synchronous();
572 }
574 vmx_do_launch(v);
575 reset_stack_and_jump(vmx_asm_do_vmentry);
576 }
579 /* Dump a section of VMCS */
580 static void print_section(char *header, uint32_t start,
581 uint32_t end, int incr)
582 {
583 uint32_t addr, j;
584 unsigned long val;
585 int code;
586 char *fmt[4] = {"0x%04lx ", "0x%016lx ", "0x%08lx ", "0x%016lx "};
587 char *err[4] = {"------ ", "------------------ ",
588 "---------- ", "------------------ "};
590 /* Find width of the field (encoded in bits 14:13 of address) */
591 code = (start>>13)&3;
593 if (header)
594 printk("\t %s", header);
596 for (addr=start, j=0; addr<=end; addr+=incr, j++) {
598 if (!(j&3))
599 printk("\n\t\t0x%08x: ", addr);
601 if (!__vmread(addr, &val))
602 printk(fmt[code], val);
603 else
604 printk("%s", err[code]);
605 }
607 printk("\n");
608 }
610 /* Dump current VMCS */
611 void vmcs_dump_vcpu(void)
612 {
613 print_section("16-bit Guest-State Fields", 0x800, 0x80e, 2);
614 print_section("16-bit Host-State Fields", 0xc00, 0xc0c, 2);
615 print_section("64-bit Control Fields", 0x2000, 0x2013, 1);
616 print_section("64-bit Guest-State Fields", 0x2800, 0x2803, 1);
617 print_section("32-bit Control Fields", 0x4000, 0x401c, 2);
618 print_section("32-bit RO Data Fields", 0x4400, 0x440e, 2);
619 print_section("32-bit Guest-State Fields", 0x4800, 0x482a, 2);
620 print_section("32-bit Host-State Fields", 0x4c00, 0x4c00, 2);
621 print_section("Natural 64-bit Control Fields", 0x6000, 0x600e, 2);
622 print_section("64-bit RO Data Fields", 0x6400, 0x640A, 2);
623 print_section("Natural 64-bit Guest-State Fields", 0x6800, 0x6826, 2);
624 print_section("Natural 64-bit Host-State Fields", 0x6c00, 0x6c16, 2);
625 }
628 static void vmcs_dump(unsigned char ch)
629 {
630 struct domain *d;
631 struct vcpu *v;
633 printk("*********** VMCS Areas **************\n");
634 for_each_domain(d) {
635 printk("\n>>> Domain %d <<<\n", d->domain_id);
636 for_each_vcpu(d, v) {
638 /*
639 * Presumably, if a domain is not an HVM guest,
640 * the very first CPU will not pass this test
641 */
642 if (!hvm_guest(v)) {
643 printk("\t\tNot HVM guest\n");
644 break;
645 }
646 printk("\tVCPU %d\n", v->vcpu_id);
648 vmx_vmcs_enter(v);
649 vmcs_dump_vcpu();
650 vmx_vmcs_exit(v);
651 }
652 }
654 printk("**************************************\n");
655 }
657 static int __init setup_vmcs_dump(void)
658 {
659 register_keyhandler('v', vmcs_dump, "dump Intel's VMCS");
660 return 0;
661 }
663 __initcall(setup_vmcs_dump);
665 /*
666 * Local variables:
667 * mode: C
668 * c-set-style: "BSD"
669 * c-basic-offset: 4
670 * tab-width: 4
671 * indent-tabs-mode: nil
672 * End:
673 */