direct-io.hg

view xen/arch/x86/hvm/vmx/vmcs.c @ 15534:da4c76340184

NativeDom 1:1 support for x86_64, 32bitbios reloc bug fix
author Guy Zana <guy@neocleus.com>
date Wed Sep 19 10:51:46 2007 +0200 (2007-09-19)
parents 57398acc1480
children
line source
1 /*
2 * vmcs.c: VMCS management
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
19 #include <xen/config.h>
20 #include <xen/init.h>
21 #include <xen/mm.h>
22 #include <xen/lib.h>
23 #include <xen/errno.h>
24 #include <xen/domain_page.h>
25 #include <asm/current.h>
26 #include <asm/cpufeature.h>
27 #include <asm/processor.h>
28 #include <asm/msr.h>
29 #include <asm/hvm/hvm.h>
30 #include <asm/hvm/io.h>
31 #include <asm/hvm/support.h>
32 #include <asm/hvm/vmx/vmx.h>
33 #include <asm/hvm/vmx/vmcs.h>
34 #include <asm/flushtlb.h>
35 #include <xen/event.h>
36 #include <xen/kernel.h>
37 #include <xen/keyhandler.h>
38 #include <asm/shadow.h>
40 /* Dynamic (run-time adjusted) execution control flags. */
41 u32 vmx_pin_based_exec_control __read_mostly;
42 u32 vmx_cpu_based_exec_control __read_mostly;
43 u32 vmx_secondary_exec_control __read_mostly;
44 u32 vmx_vmexit_control __read_mostly;
45 u32 vmx_vmentry_control __read_mostly;
46 bool_t cpu_has_vmx_ins_outs_instr_info __read_mostly;
48 static DEFINE_PER_CPU(struct vmcs_struct *, current_vmcs);
50 static u32 vmcs_revision_id __read_mostly;
52 static u32 adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr)
53 {
54 u32 vmx_msr_low, vmx_msr_high, ctl = ctl_min | ctl_opt;
56 rdmsr(msr, vmx_msr_low, vmx_msr_high);
58 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
59 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
61 /* Ensure minimum (required) set of control bits are supported. */
62 BUG_ON(ctl_min & ~ctl);
64 return ctl;
65 }
67 void vmx_init_vmcs_config(void)
68 {
69 u32 vmx_msr_low, vmx_msr_high, min, opt;
70 u32 _vmx_pin_based_exec_control;
71 u32 _vmx_cpu_based_exec_control;
72 u32 _vmx_secondary_exec_control = 0;
73 u32 _vmx_vmexit_control;
74 u32 _vmx_vmentry_control;
76 min = (PIN_BASED_EXT_INTR_MASK |
77 PIN_BASED_NMI_EXITING);
78 opt = PIN_BASED_VIRTUAL_NMIS;
79 _vmx_pin_based_exec_control = adjust_vmx_controls(
80 min, opt, MSR_IA32_VMX_PINBASED_CTLS);
82 min = (CPU_BASED_HLT_EXITING |
83 CPU_BASED_INVLPG_EXITING |
84 CPU_BASED_MWAIT_EXITING |
85 CPU_BASED_MOV_DR_EXITING |
86 CPU_BASED_ACTIVATE_IO_BITMAP |
87 CPU_BASED_USE_TSC_OFFSETING);
88 opt = CPU_BASED_ACTIVATE_MSR_BITMAP;
89 opt |= CPU_BASED_TPR_SHADOW;
90 opt |= CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
91 _vmx_cpu_based_exec_control = adjust_vmx_controls(
92 min, opt, MSR_IA32_VMX_PROCBASED_CTLS);
93 #ifdef __x86_64__
94 if ( !(_vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW) )
95 {
96 min |= CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING;
97 _vmx_cpu_based_exec_control = adjust_vmx_controls(
98 min, opt, MSR_IA32_VMX_PROCBASED_CTLS);
99 }
100 #endif
102 if ( _vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS )
103 {
104 min = 0;
105 opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
106 _vmx_secondary_exec_control = adjust_vmx_controls(
107 min, opt, MSR_IA32_VMX_PROCBASED_CTLS2);
108 }
110 #if defined(__i386__)
111 /* If we can't virtualise APIC accesses, the TPR shadow is pointless. */
112 if ( !(_vmx_secondary_exec_control &
113 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) )
114 _vmx_cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
115 #endif
117 min = VM_EXIT_ACK_INTR_ON_EXIT;
118 opt = 0;
119 #ifdef __x86_64__
120 min |= VM_EXIT_IA32E_MODE;
121 #endif
122 _vmx_vmexit_control = adjust_vmx_controls(
123 min, opt, MSR_IA32_VMX_EXIT_CTLS);
125 min = opt = 0;
126 _vmx_vmentry_control = adjust_vmx_controls(
127 min, opt, MSR_IA32_VMX_ENTRY_CTLS);
129 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
131 if ( smp_processor_id() == 0 )
132 {
133 vmcs_revision_id = vmx_msr_low;
134 vmx_pin_based_exec_control = _vmx_pin_based_exec_control;
135 vmx_cpu_based_exec_control = _vmx_cpu_based_exec_control;
136 vmx_secondary_exec_control = _vmx_secondary_exec_control;
137 vmx_vmexit_control = _vmx_vmexit_control;
138 vmx_vmentry_control = _vmx_vmentry_control;
139 cpu_has_vmx_ins_outs_instr_info = !!(vmx_msr_high & (1U<<22));
140 }
141 else
142 {
143 BUG_ON(vmcs_revision_id != vmx_msr_low);
144 BUG_ON(vmx_pin_based_exec_control != _vmx_pin_based_exec_control);
145 BUG_ON(vmx_cpu_based_exec_control != _vmx_cpu_based_exec_control);
146 BUG_ON(vmx_secondary_exec_control != _vmx_secondary_exec_control);
147 BUG_ON(vmx_vmexit_control != _vmx_vmexit_control);
148 BUG_ON(vmx_vmentry_control != _vmx_vmentry_control);
149 BUG_ON(cpu_has_vmx_ins_outs_instr_info != !!(vmx_msr_high & (1U<<22)));
150 }
152 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
153 BUG_ON((vmx_msr_high & 0x1fff) > PAGE_SIZE);
154 }
156 static struct vmcs_struct *vmx_alloc_vmcs(void)
157 {
158 struct vmcs_struct *vmcs;
160 if ( (vmcs = alloc_xenheap_page()) == NULL )
161 {
162 gdprintk(XENLOG_WARNING, "Failed to allocate VMCS.\n");
163 return NULL;
164 }
166 clear_page(vmcs);
167 vmcs->vmcs_revision_id = vmcs_revision_id;
169 return vmcs;
170 }
172 static void vmx_free_vmcs(struct vmcs_struct *vmcs)
173 {
174 free_xenheap_page(vmcs);
175 }
177 static void __vmx_clear_vmcs(void *info)
178 {
179 struct vcpu *v = info;
181 __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
183 v->arch.hvm_vmx.active_cpu = -1;
184 v->arch.hvm_vmx.launched = 0;
186 if ( v->arch.hvm_vmx.vmcs == this_cpu(current_vmcs) )
187 this_cpu(current_vmcs) = NULL;
188 }
190 static void vmx_clear_vmcs(struct vcpu *v)
191 {
192 int cpu = v->arch.hvm_vmx.active_cpu;
194 if ( cpu == -1 )
195 return;
197 if ( cpu == smp_processor_id() )
198 return __vmx_clear_vmcs(v);
200 on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs, v, 1, 1);
201 }
203 static void vmx_load_vmcs(struct vcpu *v)
204 {
205 __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
206 v->arch.hvm_vmx.active_cpu = smp_processor_id();
207 this_cpu(current_vmcs) = v->arch.hvm_vmx.vmcs;
208 }
210 void vmx_vmcs_enter(struct vcpu *v)
211 {
212 /*
213 * NB. We must *always* run an HVM VCPU on its own VMCS, except for
214 * vmx_vmcs_enter/exit critical regions.
215 */
216 if ( v == current )
217 return;
219 vcpu_pause(v);
220 spin_lock(&v->arch.hvm_vmx.vmcs_lock);
222 vmx_clear_vmcs(v);
223 vmx_load_vmcs(v);
224 }
226 void vmx_vmcs_exit(struct vcpu *v)
227 {
228 if ( v == current )
229 return;
231 /* Don't confuse vmx_do_resume (for @v or @current!) */
232 vmx_clear_vmcs(v);
233 if ( is_hvm_vcpu(current) )
234 vmx_load_vmcs(current);
236 spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
237 vcpu_unpause(v);
238 }
240 struct vmcs_struct *vmx_alloc_host_vmcs(void)
241 {
242 return vmx_alloc_vmcs();
243 }
245 void vmx_free_host_vmcs(struct vmcs_struct *vmcs)
246 {
247 vmx_free_vmcs(vmcs);
248 }
250 #define GUEST_SEGMENT_LIMIT 0xffffffff
252 struct host_execution_env {
253 /* selectors */
254 unsigned short ldtr_selector;
255 unsigned short tr_selector;
256 unsigned short ds_selector;
257 unsigned short cs_selector;
258 /* limits */
259 unsigned short gdtr_limit;
260 unsigned short ldtr_limit;
261 unsigned short idtr_limit;
262 unsigned short tr_limit;
263 /* base */
264 unsigned long gdtr_base;
265 unsigned long ldtr_base;
266 unsigned long idtr_base;
267 unsigned long tr_base;
268 unsigned long ds_base;
269 unsigned long cs_base;
270 #ifdef __x86_64__
271 unsigned long fs_base;
272 unsigned long gs_base;
273 #endif
274 };
276 static void vmx_set_host_env(struct vcpu *v)
277 {
278 unsigned int tr, cpu;
279 struct host_execution_env host_env;
280 struct Xgt_desc_struct desc;
282 cpu = smp_processor_id();
283 __asm__ __volatile__ ("sidt (%0) \n" :: "a"(&desc) : "memory");
284 host_env.idtr_limit = desc.size;
285 host_env.idtr_base = desc.address;
286 __vmwrite(HOST_IDTR_BASE, host_env.idtr_base);
288 __asm__ __volatile__ ("sgdt (%0) \n" :: "a"(&desc) : "memory");
289 host_env.gdtr_limit = desc.size;
290 host_env.gdtr_base = desc.address;
291 __vmwrite(HOST_GDTR_BASE, host_env.gdtr_base);
293 __asm__ __volatile__ ("str (%0) \n" :: "a"(&tr) : "memory");
294 host_env.tr_selector = tr;
295 host_env.tr_limit = sizeof(struct tss_struct);
296 host_env.tr_base = (unsigned long) &init_tss[cpu];
297 __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
298 __vmwrite(HOST_TR_BASE, host_env.tr_base);
300 /*
301 * Skip end of cpu_user_regs when entering the hypervisor because the
302 * CPU does not save context onto the stack. SS,RSP,CS,RIP,RFLAGS,etc
303 * all get saved into the VMCS instead.
304 */
305 __vmwrite(HOST_RSP,
306 (unsigned long)&get_cpu_info()->guest_cpu_user_regs.error_code);
307 }
309 static void construct_vmcs(struct vcpu *v)
310 {
311 unsigned long cr0, cr4;
312 union vmcs_arbytes arbytes;
314 vmx_vmcs_enter(v);
316 /* VMCS controls. */
317 __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
318 __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
319 __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control);
320 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control);
321 v->arch.hvm_vmx.exec_control = vmx_cpu_based_exec_control;
322 if ( vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS )
323 __vmwrite(SECONDARY_VM_EXEC_CONTROL, vmx_secondary_exec_control);
325 if ( cpu_has_vmx_msr_bitmap )
326 __vmwrite(MSR_BITMAP, virt_to_maddr(vmx_msr_bitmap));
328 /* I/O access bitmap. */
329 __vmwrite(IO_BITMAP_A, virt_to_maddr(hvm_io_bitmap));
330 __vmwrite(IO_BITMAP_B, virt_to_maddr(hvm_io_bitmap + PAGE_SIZE));
332 /* Host data selectors. */
333 __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
334 __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS);
335 __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS);
336 #if defined(__i386__)
337 __vmwrite(HOST_FS_SELECTOR, __HYPERVISOR_DS);
338 __vmwrite(HOST_GS_SELECTOR, __HYPERVISOR_DS);
339 __vmwrite(HOST_FS_BASE, 0);
340 __vmwrite(HOST_GS_BASE, 0);
341 #elif defined(__x86_64__)
342 {
343 unsigned long msr;
344 rdmsrl(MSR_FS_BASE, msr); __vmwrite(HOST_FS_BASE, msr);
345 rdmsrl(MSR_GS_BASE, msr); __vmwrite(HOST_GS_BASE, msr);
346 }
347 #endif
349 /* Host control registers. */
350 __vmwrite(HOST_CR0, read_cr0() | X86_CR0_TS);
351 __vmwrite(HOST_CR4, read_cr4());
353 /* Host CS:RIP. */
354 __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
355 __vmwrite(HOST_RIP, (unsigned long)vmx_asm_vmexit_handler);
357 /* MSR intercepts. */
358 __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
359 __vmwrite(VM_EXIT_MSR_STORE_ADDR, 0);
360 __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
361 __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
362 __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
364 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
366 __vmwrite(CR0_GUEST_HOST_MASK, ~0UL);
367 __vmwrite(CR4_GUEST_HOST_MASK, ~0UL);
369 __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
370 __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
372 __vmwrite(CR3_TARGET_COUNT, 0);
374 __vmwrite(GUEST_ACTIVITY_STATE, 0);
376 /* Guest segment bases. */
377 __vmwrite(GUEST_ES_BASE, 0);
378 __vmwrite(GUEST_SS_BASE, 0);
379 __vmwrite(GUEST_DS_BASE, 0);
380 __vmwrite(GUEST_FS_BASE, 0);
381 __vmwrite(GUEST_GS_BASE, 0);
382 __vmwrite(GUEST_CS_BASE, 0);
384 /* Guest segment limits. */
385 __vmwrite(GUEST_ES_LIMIT, GUEST_SEGMENT_LIMIT);
386 __vmwrite(GUEST_SS_LIMIT, GUEST_SEGMENT_LIMIT);
387 __vmwrite(GUEST_DS_LIMIT, GUEST_SEGMENT_LIMIT);
388 __vmwrite(GUEST_FS_LIMIT, GUEST_SEGMENT_LIMIT);
389 __vmwrite(GUEST_GS_LIMIT, GUEST_SEGMENT_LIMIT);
390 __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
392 /* Guest segment AR bytes. */
393 arbytes.bytes = 0;
394 arbytes.fields.seg_type = 0x3; /* type = 3 */
395 arbytes.fields.s = 1; /* code or data, i.e. not system */
396 arbytes.fields.dpl = 0; /* DPL = 3 */
397 arbytes.fields.p = 1; /* segment present */
398 arbytes.fields.default_ops_size = 1; /* 32-bit */
399 arbytes.fields.g = 1;
400 arbytes.fields.null_bit = 0; /* not null */
401 __vmwrite(GUEST_ES_AR_BYTES, arbytes.bytes);
402 __vmwrite(GUEST_SS_AR_BYTES, arbytes.bytes);
403 __vmwrite(GUEST_DS_AR_BYTES, arbytes.bytes);
404 __vmwrite(GUEST_FS_AR_BYTES, arbytes.bytes);
405 __vmwrite(GUEST_GS_AR_BYTES, arbytes.bytes);
406 arbytes.fields.seg_type = 0xb; /* type = 0xb */
407 __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
409 /* Guest GDT. */
410 __vmwrite(GUEST_GDTR_BASE, 0);
411 __vmwrite(GUEST_GDTR_LIMIT, 0);
413 /* Guest IDT. */
414 __vmwrite(GUEST_IDTR_BASE, 0);
415 __vmwrite(GUEST_IDTR_LIMIT, 0);
417 /* Guest LDT and TSS. */
418 arbytes.fields.s = 0; /* not code or data segement */
419 arbytes.fields.seg_type = 0x2; /* LTD */
420 arbytes.fields.default_ops_size = 0; /* 16-bit */
421 arbytes.fields.g = 0;
422 __vmwrite(GUEST_LDTR_AR_BYTES, arbytes.bytes);
423 arbytes.fields.seg_type = 0xb; /* 32-bit TSS (busy) */
424 __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
426 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
427 __vmwrite(GUEST_DR7, 0);
428 __vmwrite(VMCS_LINK_POINTER, ~0UL);
429 #if defined(__i386__)
430 __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
431 #endif
433 __vmwrite(EXCEPTION_BITMAP, HVM_TRAP_MASK | (1U << TRAP_page_fault));
435 /* Guest CR0. */
436 cr0 = read_cr0();
437 v->arch.hvm_vmx.cpu_cr0 = cr0;
438 __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
439 v->arch.hvm_vmx.cpu_shadow_cr0 = cr0 & ~(X86_CR0_PG | X86_CR0_TS);
440 __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
442 /* Guest CR4. */
443 cr4 = read_cr4();
444 __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
445 v->arch.hvm_vmx.cpu_shadow_cr4 =
446 cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
447 __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
449 if ( cpu_has_vmx_tpr_shadow )
450 {
451 paddr_t virt_page_ma = page_to_maddr(vcpu_vlapic(v)->regs_page);
452 __vmwrite(VIRTUAL_APIC_PAGE_ADDR, virt_page_ma);
453 #if defined (CONFIG_X86_PAE)
454 __vmwrite(VIRTUAL_APIC_PAGE_ADDR_HIGH, virt_page_ma >> 32);
455 #endif
456 __vmwrite(TPR_THRESHOLD, 0);
457 }
459 __vmwrite(GUEST_LDTR_SELECTOR, 0);
460 __vmwrite(GUEST_LDTR_BASE, 0);
461 __vmwrite(GUEST_LDTR_LIMIT, 0);
463 __vmwrite(GUEST_TR_BASE, 0);
464 __vmwrite(GUEST_TR_LIMIT, 0xff);
466 vmx_vmcs_exit(v);
468 paging_update_paging_modes(v); /* will update HOST & GUEST_CR3 as reqd */
470 vmx_vlapic_msr_changed(v);
471 }
473 int vmx_create_vmcs(struct vcpu *v)
474 {
475 if ( v->arch.hvm_vmx.vmcs == NULL )
476 {
477 if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL )
478 return -ENOMEM;
480 __vmx_clear_vmcs(v);
481 }
483 construct_vmcs(v);
485 return 0;
486 }
488 void vmx_destroy_vmcs(struct vcpu *v)
489 {
490 struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
492 if ( arch_vmx->vmcs == NULL )
493 return;
495 vmx_clear_vmcs(v);
497 vmx_free_vmcs(arch_vmx->vmcs);
498 arch_vmx->vmcs = NULL;
499 }
501 void vm_launch_fail(unsigned long eflags)
502 {
503 unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
504 printk("<vm_launch_fail> error code %lx\n", error);
505 domain_crash_synchronous();
506 }
508 void vm_resume_fail(unsigned long eflags)
509 {
510 unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
511 printk("<vm_resume_fail> error code %lx\n", error);
512 domain_crash_synchronous();
513 }
515 void vmx_do_resume(struct vcpu *v)
516 {
517 bool_t debug_state;
519 if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() )
520 {
521 if ( v->arch.hvm_vmx.vmcs != this_cpu(current_vmcs) )
522 vmx_load_vmcs(v);
523 }
524 else
525 {
526 vmx_clear_vmcs(v);
527 vmx_load_vmcs(v);
528 hvm_migrate_timers(v);
529 vmx_set_host_env(v);
530 }
532 debug_state = v->domain->debugger_attached;
533 if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
534 {
535 unsigned long intercepts = __vmread(EXCEPTION_BITMAP);
536 unsigned long mask = (1U << TRAP_debug) | (1U << TRAP_int3);
537 v->arch.hvm_vcpu.debug_state_latch = debug_state;
538 if ( debug_state )
539 intercepts |= mask;
540 else
541 intercepts &= ~mask;
542 __vmwrite(EXCEPTION_BITMAP, intercepts);
543 }
545 hvm_do_resume(v);
546 reset_stack_and_jump(vmx_asm_do_vmentry);
547 }
549 /* Dump a section of VMCS */
550 static void print_section(char *header, uint32_t start,
551 uint32_t end, int incr)
552 {
553 uint32_t addr, j;
554 unsigned long val;
555 int code, rc;
556 char *fmt[4] = {"0x%04lx ", "0x%016lx ", "0x%08lx ", "0x%016lx "};
557 char *err[4] = {"------ ", "------------------ ",
558 "---------- ", "------------------ "};
560 /* Find width of the field (encoded in bits 14:13 of address) */
561 code = (start>>13)&3;
563 if (header)
564 printk("\t %s", header);
566 for (addr=start, j=0; addr<=end; addr+=incr, j++) {
568 if (!(j&3))
569 printk("\n\t\t0x%08x: ", addr);
571 val = __vmread_safe(addr, &rc);
572 if (rc == 0)
573 printk(fmt[code], val);
574 else
575 printk("%s", err[code]);
576 }
578 printk("\n");
579 }
581 /* Dump current VMCS */
582 void vmcs_dump_vcpu(void)
583 {
584 print_section("16-bit Guest-State Fields", 0x800, 0x80e, 2);
585 print_section("16-bit Host-State Fields", 0xc00, 0xc0c, 2);
586 print_section("64-bit Control Fields", 0x2000, 0x2013, 1);
587 print_section("64-bit Guest-State Fields", 0x2800, 0x2803, 1);
588 print_section("32-bit Control Fields", 0x4000, 0x401c, 2);
589 print_section("32-bit RO Data Fields", 0x4400, 0x440e, 2);
590 print_section("32-bit Guest-State Fields", 0x4800, 0x482a, 2);
591 print_section("32-bit Host-State Fields", 0x4c00, 0x4c00, 2);
592 print_section("Natural 64-bit Control Fields", 0x6000, 0x600e, 2);
593 print_section("64-bit RO Data Fields", 0x6400, 0x640A, 2);
594 print_section("Natural 64-bit Guest-State Fields", 0x6800, 0x6826, 2);
595 print_section("Natural 64-bit Host-State Fields", 0x6c00, 0x6c16, 2);
596 }
599 static void vmcs_dump(unsigned char ch)
600 {
601 struct domain *d;
602 struct vcpu *v;
604 printk("*********** VMCS Areas **************\n");
606 rcu_read_lock(&domlist_read_lock);
608 for_each_domain ( d )
609 {
610 if ( !is_hvm_domain(d) )
611 continue;
612 printk("\n>>> Domain %d <<<\n", d->domain_id);
613 for_each_vcpu ( d, v )
614 {
615 printk("\tVCPU %d\n", v->vcpu_id);
616 vmx_vmcs_enter(v);
617 vmcs_dump_vcpu();
618 vmx_vmcs_exit(v);
619 }
620 }
622 rcu_read_unlock(&domlist_read_lock);
624 printk("**************************************\n");
625 }
627 void setup_vmcs_dump(void)
628 {
629 register_keyhandler('v', vmcs_dump, "dump Intel's VMCS");
630 }
633 /*
634 * Local variables:
635 * mode: C
636 * c-set-style: "BSD"
637 * c-basic-offset: 4
638 * tab-width: 4
639 * indent-tabs-mode: nil
640 * End:
641 */