ia64/xen-unstable

view xen/arch/x86/hvm/vmx/vmcs.c @ 14635:5c52e5ca8459

hvm: Clean up handling of exception intercepts.
Only intercept #DB/#BP if a debugger is attached.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Wed Mar 28 18:47:17 2007 +0100 (2007-03-28)
parents 97826d77bd4d
children ea55ead0fd47
line source
1 /*
2 * vmcs.c: VMCS management
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
19 #include <xen/config.h>
20 #include <xen/init.h>
21 #include <xen/mm.h>
22 #include <xen/lib.h>
23 #include <xen/errno.h>
24 #include <xen/domain_page.h>
25 #include <asm/current.h>
26 #include <asm/cpufeature.h>
27 #include <asm/processor.h>
28 #include <asm/msr.h>
29 #include <asm/hvm/hvm.h>
30 #include <asm/hvm/io.h>
31 #include <asm/hvm/support.h>
32 #include <asm/hvm/vmx/vmx.h>
33 #include <asm/hvm/vmx/vmcs.h>
34 #include <asm/flushtlb.h>
35 #include <xen/event.h>
36 #include <xen/kernel.h>
37 #include <xen/keyhandler.h>
38 #include <asm/shadow.h>
40 /* Basic flags for Pin-based VM-execution controls. */
41 #define MONITOR_PIN_BASED_EXEC_CONTROLS \
42 ( PIN_BASED_EXT_INTR_MASK | \
43 PIN_BASED_NMI_EXITING )
45 /* Basic flags for CPU-based VM-execution controls. */
46 #ifdef __x86_64__
47 #define MONITOR_CPU_BASED_EXEC_CONTROLS_SUBARCH \
48 ( CPU_BASED_CR8_LOAD_EXITING | \
49 CPU_BASED_CR8_STORE_EXITING )
50 #else
51 #define MONITOR_CPU_BASED_EXEC_CONTROLS_SUBARCH 0
52 #endif
53 #define MONITOR_CPU_BASED_EXEC_CONTROLS \
54 ( MONITOR_CPU_BASED_EXEC_CONTROLS_SUBARCH | \
55 CPU_BASED_HLT_EXITING | \
56 CPU_BASED_INVDPG_EXITING | \
57 CPU_BASED_MWAIT_EXITING | \
58 CPU_BASED_MOV_DR_EXITING | \
59 CPU_BASED_ACTIVATE_IO_BITMAP | \
60 CPU_BASED_USE_TSC_OFFSETING )
62 /* Basic flags for VM-Exit controls. */
63 #ifdef __x86_64__
64 #define MONITOR_VM_EXIT_CONTROLS_SUBARCH VM_EXIT_IA32E_MODE
65 #else
66 #define MONITOR_VM_EXIT_CONTROLS_SUBARCH 0
67 #endif
68 #define MONITOR_VM_EXIT_CONTROLS \
69 ( MONITOR_VM_EXIT_CONTROLS_SUBARCH | \
70 VM_EXIT_ACK_INTR_ON_EXIT )
72 /* Basic flags for VM-Entry controls. */
73 #define MONITOR_VM_ENTRY_CONTROLS 0x00000000
75 /* Dynamic (run-time adjusted) execution control flags. */
76 static u32 vmx_pin_based_exec_control;
77 static u32 vmx_cpu_based_exec_control;
78 static u32 vmx_vmexit_control;
79 static u32 vmx_vmentry_control;
81 static u32 vmcs_revision_id;
83 static u32 adjust_vmx_controls(u32 ctrls, u32 msr)
84 {
85 u32 vmx_msr_low, vmx_msr_high;
87 rdmsr(msr, vmx_msr_low, vmx_msr_high);
89 /* Bit == 0 means must be zero. */
90 BUG_ON(ctrls & ~vmx_msr_high);
92 /* Bit == 1 means must be one. */
93 ctrls |= vmx_msr_low;
95 return ctrls;
96 }
98 void vmx_init_vmcs_config(void)
99 {
100 u32 vmx_msr_low, vmx_msr_high;
101 u32 _vmx_pin_based_exec_control;
102 u32 _vmx_cpu_based_exec_control;
103 u32 _vmx_vmexit_control;
104 u32 _vmx_vmentry_control;
106 _vmx_pin_based_exec_control =
107 adjust_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS,
108 MSR_IA32_VMX_PINBASED_CTLS_MSR);
109 _vmx_cpu_based_exec_control =
110 adjust_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS,
111 MSR_IA32_VMX_PROCBASED_CTLS_MSR);
112 _vmx_vmexit_control =
113 adjust_vmx_controls(MONITOR_VM_EXIT_CONTROLS,
114 MSR_IA32_VMX_EXIT_CTLS_MSR);
115 _vmx_vmentry_control =
116 adjust_vmx_controls(MONITOR_VM_ENTRY_CONTROLS,
117 MSR_IA32_VMX_ENTRY_CTLS_MSR);
119 rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
121 if ( smp_processor_id() == 0 )
122 {
123 vmcs_revision_id = vmx_msr_low;
124 vmx_pin_based_exec_control = _vmx_pin_based_exec_control;
125 vmx_cpu_based_exec_control = _vmx_cpu_based_exec_control;
126 vmx_vmexit_control = _vmx_vmexit_control;
127 vmx_vmentry_control = _vmx_vmentry_control;
128 }
129 else
130 {
131 BUG_ON(vmcs_revision_id != vmx_msr_low);
132 BUG_ON(vmx_pin_based_exec_control != _vmx_pin_based_exec_control);
133 BUG_ON(vmx_cpu_based_exec_control != _vmx_cpu_based_exec_control);
134 BUG_ON(vmx_vmexit_control != _vmx_vmexit_control);
135 BUG_ON(vmx_vmentry_control != _vmx_vmentry_control);
136 }
138 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
139 BUG_ON((vmx_msr_high & 0x1fff) > PAGE_SIZE);
140 }
142 static struct vmcs_struct *vmx_alloc_vmcs(void)
143 {
144 struct vmcs_struct *vmcs;
146 if ( (vmcs = alloc_xenheap_page()) == NULL )
147 {
148 gdprintk(XENLOG_WARNING, "Failed to allocate VMCS.\n");
149 return NULL;
150 }
152 memset(vmcs, 0, PAGE_SIZE);
153 vmcs->vmcs_revision_id = vmcs_revision_id;
155 return vmcs;
156 }
158 static void vmx_free_vmcs(struct vmcs_struct *vmcs)
159 {
160 free_xenheap_page(vmcs);
161 }
163 static void __vmx_clear_vmcs(void *info)
164 {
165 struct vcpu *v = info;
167 __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
169 v->arch.hvm_vmx.active_cpu = -1;
170 v->arch.hvm_vmx.launched = 0;
171 }
173 static void vmx_clear_vmcs(struct vcpu *v)
174 {
175 int cpu = v->arch.hvm_vmx.active_cpu;
177 if ( cpu == -1 )
178 return;
180 if ( cpu == smp_processor_id() )
181 return __vmx_clear_vmcs(v);
183 on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs, v, 1, 1);
184 }
186 static void vmx_load_vmcs(struct vcpu *v)
187 {
188 __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
189 v->arch.hvm_vmx.active_cpu = smp_processor_id();
190 }
192 void vmx_vmcs_enter(struct vcpu *v)
193 {
194 /*
195 * NB. We must *always* run an HVM VCPU on its own VMCS, except for
196 * vmx_vmcs_enter/exit critical regions.
197 */
198 if ( v == current )
199 return;
201 vcpu_pause(v);
202 spin_lock(&v->arch.hvm_vmx.vmcs_lock);
204 vmx_clear_vmcs(v);
205 vmx_load_vmcs(v);
206 }
208 void vmx_vmcs_exit(struct vcpu *v)
209 {
210 if ( v == current )
211 return;
213 /* Don't confuse vmx_do_resume (for @v or @current!) */
214 vmx_clear_vmcs(v);
215 if ( is_hvm_vcpu(current) )
216 vmx_load_vmcs(current);
218 spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
219 vcpu_unpause(v);
220 }
222 struct vmcs_struct *vmx_alloc_host_vmcs(void)
223 {
224 return vmx_alloc_vmcs();
225 }
227 void vmx_free_host_vmcs(struct vmcs_struct *vmcs)
228 {
229 vmx_free_vmcs(vmcs);
230 }
232 #define GUEST_SEGMENT_LIMIT 0xffffffff
234 struct host_execution_env {
235 /* selectors */
236 unsigned short ldtr_selector;
237 unsigned short tr_selector;
238 unsigned short ds_selector;
239 unsigned short cs_selector;
240 /* limits */
241 unsigned short gdtr_limit;
242 unsigned short ldtr_limit;
243 unsigned short idtr_limit;
244 unsigned short tr_limit;
245 /* base */
246 unsigned long gdtr_base;
247 unsigned long ldtr_base;
248 unsigned long idtr_base;
249 unsigned long tr_base;
250 unsigned long ds_base;
251 unsigned long cs_base;
252 #ifdef __x86_64__
253 unsigned long fs_base;
254 unsigned long gs_base;
255 #endif
256 };
258 static void vmx_set_host_env(struct vcpu *v)
259 {
260 unsigned int tr, cpu;
261 struct host_execution_env host_env;
262 struct Xgt_desc_struct desc;
264 cpu = smp_processor_id();
265 __asm__ __volatile__ ("sidt (%0) \n" :: "a"(&desc) : "memory");
266 host_env.idtr_limit = desc.size;
267 host_env.idtr_base = desc.address;
268 __vmwrite(HOST_IDTR_BASE, host_env.idtr_base);
270 __asm__ __volatile__ ("sgdt (%0) \n" :: "a"(&desc) : "memory");
271 host_env.gdtr_limit = desc.size;
272 host_env.gdtr_base = desc.address;
273 __vmwrite(HOST_GDTR_BASE, host_env.gdtr_base);
275 __asm__ __volatile__ ("str (%0) \n" :: "a"(&tr) : "memory");
276 host_env.tr_selector = tr;
277 host_env.tr_limit = sizeof(struct tss_struct);
278 host_env.tr_base = (unsigned long) &init_tss[cpu];
279 __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
280 __vmwrite(HOST_TR_BASE, host_env.tr_base);
282 /*
283 * Skip end of cpu_user_regs when entering the hypervisor because the
284 * CPU does not save context onto the stack. SS,RSP,CS,RIP,RFLAGS,etc
285 * all get saved into the VMCS instead.
286 */
287 __vmwrite(HOST_RSP,
288 (unsigned long)&get_cpu_info()->guest_cpu_user_regs.error_code);
289 }
291 static void construct_vmcs(struct vcpu *v)
292 {
293 unsigned long cr0, cr4;
294 union vmcs_arbytes arbytes;
296 vmx_vmcs_enter(v);
298 v->arch.hvm_vmx.cpu_cr2 = 0;
299 v->arch.hvm_vmx.cpu_cr3 = 0;
300 memset(&v->arch.hvm_vmx.msr_state, 0, sizeof(v->arch.hvm_vmx.msr_state));
301 v->arch.hvm_vmx.vmxassist_enabled = 0;
303 /* VMCS controls. */
304 __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
305 __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
306 __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control);
307 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control);
308 v->arch.hvm_vcpu.u.vmx.exec_control = vmx_cpu_based_exec_control;
310 /* I/O access bitmap. */
311 __vmwrite(IO_BITMAP_A, virt_to_maddr(hvm_io_bitmap));
312 __vmwrite(IO_BITMAP_B, virt_to_maddr(hvm_io_bitmap + PAGE_SIZE));
314 /* Host data selectors. */
315 __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
316 __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS);
317 __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS);
318 #if defined(__i386__)
319 __vmwrite(HOST_FS_SELECTOR, __HYPERVISOR_DS);
320 __vmwrite(HOST_GS_SELECTOR, __HYPERVISOR_DS);
321 __vmwrite(HOST_FS_BASE, 0);
322 __vmwrite(HOST_GS_BASE, 0);
323 #elif defined(__x86_64__)
324 {
325 unsigned long msr;
326 rdmsrl(MSR_FS_BASE, msr); __vmwrite(HOST_FS_BASE, msr);
327 rdmsrl(MSR_GS_BASE, msr); __vmwrite(HOST_GS_BASE, msr);
328 }
329 #endif
331 /* Host control registers. */
332 __vmwrite(HOST_CR0, read_cr0());
333 __vmwrite(HOST_CR4, read_cr4());
335 /* Host CS:RIP. */
336 __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
337 __vmwrite(HOST_RIP, (unsigned long)vmx_asm_vmexit_handler);
339 /* MSR intercepts. */
340 __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
341 __vmwrite(VM_EXIT_MSR_STORE_ADDR, 0);
342 __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
343 __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
344 __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
346 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
348 __vmwrite(CR0_GUEST_HOST_MASK, ~0UL);
349 __vmwrite(CR4_GUEST_HOST_MASK, ~0UL);
351 __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
352 __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
354 __vmwrite(CR3_TARGET_COUNT, 0);
356 __vmwrite(GUEST_ACTIVITY_STATE, 0);
358 /* Guest segment bases. */
359 __vmwrite(GUEST_ES_BASE, 0);
360 __vmwrite(GUEST_SS_BASE, 0);
361 __vmwrite(GUEST_DS_BASE, 0);
362 __vmwrite(GUEST_FS_BASE, 0);
363 __vmwrite(GUEST_GS_BASE, 0);
364 __vmwrite(GUEST_CS_BASE, 0);
366 /* Guest segment limits. */
367 __vmwrite(GUEST_ES_LIMIT, GUEST_SEGMENT_LIMIT);
368 __vmwrite(GUEST_SS_LIMIT, GUEST_SEGMENT_LIMIT);
369 __vmwrite(GUEST_DS_LIMIT, GUEST_SEGMENT_LIMIT);
370 __vmwrite(GUEST_FS_LIMIT, GUEST_SEGMENT_LIMIT);
371 __vmwrite(GUEST_GS_LIMIT, GUEST_SEGMENT_LIMIT);
372 __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
374 /* Guest segment AR bytes. */
375 arbytes.bytes = 0;
376 arbytes.fields.seg_type = 0x3; /* type = 3 */
377 arbytes.fields.s = 1; /* code or data, i.e. not system */
378 arbytes.fields.dpl = 0; /* DPL = 3 */
379 arbytes.fields.p = 1; /* segment present */
380 arbytes.fields.default_ops_size = 1; /* 32-bit */
381 arbytes.fields.g = 1;
382 arbytes.fields.null_bit = 0; /* not null */
383 __vmwrite(GUEST_ES_AR_BYTES, arbytes.bytes);
384 __vmwrite(GUEST_SS_AR_BYTES, arbytes.bytes);
385 __vmwrite(GUEST_DS_AR_BYTES, arbytes.bytes);
386 __vmwrite(GUEST_FS_AR_BYTES, arbytes.bytes);
387 __vmwrite(GUEST_GS_AR_BYTES, arbytes.bytes);
388 arbytes.fields.seg_type = 0xb; /* type = 0xb */
389 __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
391 /* Guest GDT. */
392 __vmwrite(GUEST_GDTR_BASE, 0);
393 __vmwrite(GUEST_GDTR_LIMIT, 0);
395 /* Guest IDT. */
396 __vmwrite(GUEST_IDTR_BASE, 0);
397 __vmwrite(GUEST_IDTR_LIMIT, 0);
399 /* Guest LDT and TSS. */
400 arbytes.fields.s = 0; /* not code or data segement */
401 arbytes.fields.seg_type = 0x2; /* LTD */
402 arbytes.fields.default_ops_size = 0; /* 16-bit */
403 arbytes.fields.g = 0;
404 __vmwrite(GUEST_LDTR_AR_BYTES, arbytes.bytes);
405 arbytes.fields.seg_type = 0xb; /* 32-bit TSS (busy) */
406 __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
408 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
409 __vmwrite(GUEST_DR7, 0);
410 __vmwrite(VMCS_LINK_POINTER, ~0UL);
411 #if defined(__i386__)
412 __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
413 #endif
415 __vmwrite(EXCEPTION_BITMAP, 1U << TRAP_page_fault);
417 /* Guest CR0. */
418 cr0 = read_cr0();
419 v->arch.hvm_vmx.cpu_cr0 = cr0;
420 __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
421 v->arch.hvm_vmx.cpu_shadow_cr0 = cr0 & ~(X86_CR0_PG | X86_CR0_TS);
422 __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
424 /* Guest CR4. */
425 cr4 = read_cr4();
426 __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
427 v->arch.hvm_vmx.cpu_shadow_cr4 =
428 cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
429 __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
431 #ifdef __x86_64__
432 /* VLAPIC TPR optimisation. */
433 v->arch.hvm_vcpu.u.vmx.exec_control |= CPU_BASED_TPR_SHADOW;
434 v->arch.hvm_vcpu.u.vmx.exec_control &=
435 ~(CPU_BASED_CR8_STORE_EXITING | CPU_BASED_CR8_LOAD_EXITING);
436 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vcpu.u.vmx.exec_control);
437 __vmwrite(VIRTUAL_APIC_PAGE_ADDR,
438 page_to_maddr(vcpu_vlapic(v)->regs_page));
439 __vmwrite(TPR_THRESHOLD, 0);
440 #endif
442 __vmwrite(GUEST_LDTR_SELECTOR, 0);
443 __vmwrite(GUEST_LDTR_BASE, 0);
444 __vmwrite(GUEST_LDTR_LIMIT, 0);
446 __vmwrite(GUEST_TR_BASE, 0);
447 __vmwrite(GUEST_TR_LIMIT, 0xff);
449 vmx_vmcs_exit(v);
451 paging_update_paging_modes(v); /* will update HOST & GUEST_CR3 as reqd */
452 }
454 int vmx_create_vmcs(struct vcpu *v)
455 {
456 if ( v->arch.hvm_vmx.vmcs == NULL )
457 {
458 if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL )
459 return -ENOMEM;
461 __vmx_clear_vmcs(v);
462 }
464 construct_vmcs(v);
466 return 0;
467 }
469 void vmx_destroy_vmcs(struct vcpu *v)
470 {
471 struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
473 if ( arch_vmx->vmcs == NULL )
474 return;
476 vmx_clear_vmcs(v);
478 vmx_free_vmcs(arch_vmx->vmcs);
479 arch_vmx->vmcs = NULL;
480 }
482 void vm_launch_fail(unsigned long eflags)
483 {
484 unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
485 printk("<vm_launch_fail> error code %lx\n", error);
486 domain_crash_synchronous();
487 }
489 void vm_resume_fail(unsigned long eflags)
490 {
491 unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
492 printk("<vm_resume_fail> error code %lx\n", error);
493 domain_crash_synchronous();
494 }
496 void vmx_do_resume(struct vcpu *v)
497 {
498 bool_t debug_state;
500 if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() )
501 {
502 vmx_load_vmcs(v);
503 }
504 else
505 {
506 vmx_clear_vmcs(v);
507 vmx_load_vmcs(v);
508 hvm_migrate_timers(v);
509 vmx_set_host_env(v);
510 }
512 debug_state = v->domain->debugger_attached;
513 if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
514 {
515 unsigned long intercepts = __vmread(EXCEPTION_BITMAP);
516 unsigned long mask = (1U << TRAP_debug) | (1U << TRAP_int3);
517 v->arch.hvm_vcpu.debug_state_latch = debug_state;
518 if ( debug_state )
519 intercepts |= mask;
520 else
521 intercepts &= ~mask;
522 __vmwrite(EXCEPTION_BITMAP, intercepts);
523 }
525 hvm_do_resume(v);
526 reset_stack_and_jump(vmx_asm_do_vmentry);
527 }
529 /* Dump a section of VMCS */
530 static void print_section(char *header, uint32_t start,
531 uint32_t end, int incr)
532 {
533 uint32_t addr, j;
534 unsigned long val;
535 int code, rc;
536 char *fmt[4] = {"0x%04lx ", "0x%016lx ", "0x%08lx ", "0x%016lx "};
537 char *err[4] = {"------ ", "------------------ ",
538 "---------- ", "------------------ "};
540 /* Find width of the field (encoded in bits 14:13 of address) */
541 code = (start>>13)&3;
543 if (header)
544 printk("\t %s", header);
546 for (addr=start, j=0; addr<=end; addr+=incr, j++) {
548 if (!(j&3))
549 printk("\n\t\t0x%08x: ", addr);
551 val = __vmread_safe(addr, &rc);
552 if (rc == 0)
553 printk(fmt[code], val);
554 else
555 printk("%s", err[code]);
556 }
558 printk("\n");
559 }
561 /* Dump current VMCS */
562 void vmcs_dump_vcpu(void)
563 {
564 print_section("16-bit Guest-State Fields", 0x800, 0x80e, 2);
565 print_section("16-bit Host-State Fields", 0xc00, 0xc0c, 2);
566 print_section("64-bit Control Fields", 0x2000, 0x2013, 1);
567 print_section("64-bit Guest-State Fields", 0x2800, 0x2803, 1);
568 print_section("32-bit Control Fields", 0x4000, 0x401c, 2);
569 print_section("32-bit RO Data Fields", 0x4400, 0x440e, 2);
570 print_section("32-bit Guest-State Fields", 0x4800, 0x482a, 2);
571 print_section("32-bit Host-State Fields", 0x4c00, 0x4c00, 2);
572 print_section("Natural 64-bit Control Fields", 0x6000, 0x600e, 2);
573 print_section("64-bit RO Data Fields", 0x6400, 0x640A, 2);
574 print_section("Natural 64-bit Guest-State Fields", 0x6800, 0x6826, 2);
575 print_section("Natural 64-bit Host-State Fields", 0x6c00, 0x6c16, 2);
576 }
579 static void vmcs_dump(unsigned char ch)
580 {
581 struct domain *d;
582 struct vcpu *v;
584 printk("*********** VMCS Areas **************\n");
586 rcu_read_lock(&domlist_read_lock);
588 for_each_domain ( d )
589 {
590 if ( !is_hvm_domain(d) )
591 continue;
592 printk("\n>>> Domain %d <<<\n", d->domain_id);
593 for_each_vcpu ( d, v )
594 {
595 printk("\tVCPU %d\n", v->vcpu_id);
596 vmx_vmcs_enter(v);
597 vmcs_dump_vcpu();
598 vmx_vmcs_exit(v);
599 }
600 }
602 rcu_read_unlock(&domlist_read_lock);
604 printk("**************************************\n");
605 }
607 void setup_vmcs_dump(void)
608 {
609 register_keyhandler('v', vmcs_dump, "dump Intel's VMCS");
610 }
613 /*
614 * Local variables:
615 * mode: C
616 * c-set-style: "BSD"
617 * c-basic-offset: 4
618 * tab-width: 4
619 * indent-tabs-mode: nil
620 * End:
621 */