ia64/xen-unstable

view xen/arch/x86/hvm/vmx/vmcs.c @ 9334:56a775219c88

This patch fix HVM/VMX time resolution issue that cause IA32E complain
"loss tick" occationally and APIC time calibration issue.

Signed-off-by: Xiaowei Yang <xiaowei.yang@intel.com>
Signed-off-by: Eddie Dong <eddie.dong@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Sun Mar 19 18:52:20 2006 +0100 (2006-03-19)
parents d4e433d615b0
children f0e14b4e535c
line source
1 /*
2 * vmcs.c: VMCS management
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
19 #include <xen/config.h>
20 #include <xen/init.h>
21 #include <xen/mm.h>
22 #include <xen/lib.h>
23 #include <xen/errno.h>
24 #include <xen/domain_page.h>
25 #include <asm/current.h>
26 #include <asm/cpufeature.h>
27 #include <asm/processor.h>
28 #include <asm/msr.h>
29 #include <asm/hvm/hvm.h>
30 #include <asm/hvm/io.h>
31 #include <asm/hvm/support.h>
32 #include <asm/hvm/vmx/vmx.h>
33 #include <asm/hvm/vmx/vmcs.h>
34 #include <asm/flushtlb.h>
35 #include <xen/event.h>
36 #include <xen/kernel.h>
37 #include <asm/shadow.h>
38 #if CONFIG_PAGING_LEVELS >= 3
39 #include <asm/shadow_64.h>
40 #endif
42 int vmcs_size;
44 struct vmcs_struct *alloc_vmcs(void)
45 {
46 struct vmcs_struct *vmcs;
47 u32 vmx_msr_low, vmx_msr_high;
49 rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
50 vmcs_size = vmx_msr_high & 0x1fff;
51 vmcs = alloc_xenheap_pages(get_order_from_bytes(vmcs_size));
52 memset((char *)vmcs, 0, vmcs_size); /* don't remove this */
54 vmcs->vmcs_revision_id = vmx_msr_low;
55 return vmcs;
56 }
58 static void free_vmcs(struct vmcs_struct *vmcs)
59 {
60 int order;
62 order = get_order_from_bytes(vmcs_size);
63 free_xenheap_pages(vmcs, order);
64 }
66 static int load_vmcs(struct arch_vmx_struct *arch_vmx, u64 phys_ptr)
67 {
68 int error;
70 if ((error = __vmptrld(phys_ptr))) {
71 clear_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
72 return error;
73 }
74 set_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
75 return 0;
76 }
78 static void vmx_smp_clear_vmcs(void *info)
79 {
80 struct vcpu *v = (struct vcpu *)info;
82 ASSERT(hvm_guest(v));
84 if (v->arch.hvm_vmx.launch_cpu == smp_processor_id())
85 __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
86 }
88 void vmx_request_clear_vmcs(struct vcpu *v)
89 {
90 ASSERT(hvm_guest(v));
92 if (v->arch.hvm_vmx.launch_cpu == smp_processor_id())
93 __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
94 else
95 smp_call_function(vmx_smp_clear_vmcs, v, 1, 1);
96 }
98 #if 0
99 static int store_vmcs(struct arch_vmx_struct *arch_vmx, u64 phys_ptr)
100 {
101 /* take the current VMCS */
102 __vmptrst(phys_ptr);
103 clear_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags);
104 return 0;
105 }
106 #endif
108 static inline int construct_vmcs_controls(struct arch_vmx_struct *arch_vmx)
109 {
110 int error = 0;
111 void *io_bitmap_a;
112 void *io_bitmap_b;
114 error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL,
115 MONITOR_PIN_BASED_EXEC_CONTROLS);
117 error |= __vmwrite(VM_EXIT_CONTROLS, MONITOR_VM_EXIT_CONTROLS);
119 error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS);
121 /* need to use 0x1000 instead of PAGE_SIZE */
122 io_bitmap_a = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000));
123 io_bitmap_b = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000));
124 memset(io_bitmap_a, 0xff, 0x1000);
125 /* don't bother debug port access */
126 clear_bit(PC_DEBUG_PORT, io_bitmap_a);
127 memset(io_bitmap_b, 0xff, 0x1000);
129 error |= __vmwrite(IO_BITMAP_A, (u64) virt_to_maddr(io_bitmap_a));
130 error |= __vmwrite(IO_BITMAP_B, (u64) virt_to_maddr(io_bitmap_b));
132 arch_vmx->io_bitmap_a = io_bitmap_a;
133 arch_vmx->io_bitmap_b = io_bitmap_b;
135 return error;
136 }
138 #define GUEST_LAUNCH_DS 0x08
139 #define GUEST_LAUNCH_CS 0x10
140 #define GUEST_SEGMENT_LIMIT 0xffffffff
141 #define HOST_SEGMENT_LIMIT 0xffffffff
143 struct host_execution_env {
144 /* selectors */
145 unsigned short ldtr_selector;
146 unsigned short tr_selector;
147 unsigned short ds_selector;
148 unsigned short cs_selector;
149 /* limits */
150 unsigned short gdtr_limit;
151 unsigned short ldtr_limit;
152 unsigned short idtr_limit;
153 unsigned short tr_limit;
154 /* base */
155 unsigned long gdtr_base;
156 unsigned long ldtr_base;
157 unsigned long idtr_base;
158 unsigned long tr_base;
159 unsigned long ds_base;
160 unsigned long cs_base;
161 #ifdef __x86_64__
162 unsigned long fs_base;
163 unsigned long gs_base;
164 #endif
165 };
167 static void vmx_set_host_env(struct vcpu *v)
168 {
169 unsigned int tr, cpu, error = 0;
170 struct host_execution_env host_env;
171 struct Xgt_desc_struct desc;
173 cpu = smp_processor_id();
174 __asm__ __volatile__ ("sidt (%0) \n" :: "a"(&desc) : "memory");
175 host_env.idtr_limit = desc.size;
176 host_env.idtr_base = desc.address;
177 error |= __vmwrite(HOST_IDTR_BASE, host_env.idtr_base);
179 __asm__ __volatile__ ("sgdt (%0) \n" :: "a"(&desc) : "memory");
180 host_env.gdtr_limit = desc.size;
181 host_env.gdtr_base = desc.address;
182 error |= __vmwrite(HOST_GDTR_BASE, host_env.gdtr_base);
184 __asm__ __volatile__ ("str (%0) \n" :: "a"(&tr) : "memory");
185 host_env.tr_selector = tr;
186 host_env.tr_limit = sizeof(struct tss_struct);
187 host_env.tr_base = (unsigned long) &init_tss[cpu];
188 error |= __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
189 error |= __vmwrite(HOST_TR_BASE, host_env.tr_base);
190 error |= __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
191 }
193 static void vmx_do_launch(struct vcpu *v)
194 {
195 /* Update CR3, GDT, LDT, TR */
196 unsigned int error = 0;
197 unsigned long cr0, cr4;
199 if (v->vcpu_id == 0)
200 hvm_setup_platform(v->domain);
202 if ( evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0 )
203 {
204 printk("VMX domain bind port %d to vcpu %d failed!\n",
205 iopacket_port(v), v->vcpu_id);
206 domain_crash_synchronous();
207 }
209 HVM_DBG_LOG(DBG_LEVEL_1, "eport: %x", iopacket_port(v));
211 clear_bit(iopacket_port(v),
212 &v->domain->shared_info->evtchn_mask[0]);
214 __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (cr0) : );
216 error |= __vmwrite(GUEST_CR0, cr0);
217 cr0 &= ~X86_CR0_PG;
218 error |= __vmwrite(CR0_READ_SHADOW, cr0);
219 error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
220 MONITOR_CPU_BASED_EXEC_CONTROLS);
221 v->arch.hvm_vcpu.u.vmx.exec_control = MONITOR_CPU_BASED_EXEC_CONTROLS;
223 __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (cr4) : );
225 error |= __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
226 cr4 &= ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
228 error |= __vmwrite(CR4_READ_SHADOW, cr4);
230 vmx_stts();
232 if(hvm_apic_support(v->domain))
233 vlapic_init(v);
235 vmx_set_host_env(v);
236 init_timer(&v->arch.hvm_vmx.hlt_timer, hlt_timer_fn, v, v->processor);
238 error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
239 error |= __vmwrite(GUEST_LDTR_BASE, 0);
240 error |= __vmwrite(GUEST_LDTR_LIMIT, 0);
242 error |= __vmwrite(GUEST_TR_BASE, 0);
243 error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
245 __vmwrite(GUEST_CR3, pagetable_get_paddr(v->domain->arch.phys_table));
246 __vmwrite(HOST_CR3, pagetable_get_paddr(v->arch.monitor_table));
248 v->arch.schedule_tail = arch_vmx_do_resume;
249 v->arch.hvm_vmx.launch_cpu = smp_processor_id();
251 /* init guest tsc to start from 0 */
252 set_guest_time(v, 0);
253 }
255 /*
256 * Initially set the same environement as host.
257 */
258 static inline int construct_init_vmcs_guest(cpu_user_regs_t *regs)
259 {
260 int error = 0;
261 union vmcs_arbytes arbytes;
262 unsigned long dr7;
263 unsigned long eflags;
265 /* MSR */
266 error |= __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
267 error |= __vmwrite(VM_EXIT_MSR_STORE_ADDR, 0);
269 error |= __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
270 error |= __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
271 error |= __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
272 /* interrupt */
273 error |= __vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
274 /* mask */
275 error |= __vmwrite(CR0_GUEST_HOST_MASK, -1UL);
276 error |= __vmwrite(CR4_GUEST_HOST_MASK, -1UL);
278 error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
279 error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
281 /* TSC */
282 error |= __vmwrite(CR3_TARGET_COUNT, 0);
284 /* Guest Selectors */
285 error |= __vmwrite(GUEST_ES_SELECTOR, GUEST_LAUNCH_DS);
286 error |= __vmwrite(GUEST_SS_SELECTOR, GUEST_LAUNCH_DS);
287 error |= __vmwrite(GUEST_DS_SELECTOR, GUEST_LAUNCH_DS);
288 error |= __vmwrite(GUEST_FS_SELECTOR, GUEST_LAUNCH_DS);
289 error |= __vmwrite(GUEST_GS_SELECTOR, GUEST_LAUNCH_DS);
290 error |= __vmwrite(GUEST_CS_SELECTOR, GUEST_LAUNCH_CS);
292 /* Guest segment bases */
293 error |= __vmwrite(GUEST_ES_BASE, 0);
294 error |= __vmwrite(GUEST_SS_BASE, 0);
295 error |= __vmwrite(GUEST_DS_BASE, 0);
296 error |= __vmwrite(GUEST_FS_BASE, 0);
297 error |= __vmwrite(GUEST_GS_BASE, 0);
298 error |= __vmwrite(GUEST_CS_BASE, 0);
300 /* Guest segment Limits */
301 error |= __vmwrite(GUEST_ES_LIMIT, GUEST_SEGMENT_LIMIT);
302 error |= __vmwrite(GUEST_SS_LIMIT, GUEST_SEGMENT_LIMIT);
303 error |= __vmwrite(GUEST_DS_LIMIT, GUEST_SEGMENT_LIMIT);
304 error |= __vmwrite(GUEST_FS_LIMIT, GUEST_SEGMENT_LIMIT);
305 error |= __vmwrite(GUEST_GS_LIMIT, GUEST_SEGMENT_LIMIT);
306 error |= __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
308 /* Guest segment AR bytes */
309 arbytes.bytes = 0;
310 arbytes.fields.seg_type = 0x3; /* type = 3 */
311 arbytes.fields.s = 1; /* code or data, i.e. not system */
312 arbytes.fields.dpl = 0; /* DPL = 3 */
313 arbytes.fields.p = 1; /* segment present */
314 arbytes.fields.default_ops_size = 1; /* 32-bit */
315 arbytes.fields.g = 1;
316 arbytes.fields.null_bit = 0; /* not null */
318 error |= __vmwrite(GUEST_ES_AR_BYTES, arbytes.bytes);
319 error |= __vmwrite(GUEST_SS_AR_BYTES, arbytes.bytes);
320 error |= __vmwrite(GUEST_DS_AR_BYTES, arbytes.bytes);
321 error |= __vmwrite(GUEST_FS_AR_BYTES, arbytes.bytes);
322 error |= __vmwrite(GUEST_GS_AR_BYTES, arbytes.bytes);
324 arbytes.fields.seg_type = 0xb; /* type = 0xb */
325 error |= __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
327 /* Guest GDT */
328 error |= __vmwrite(GUEST_GDTR_BASE, 0);
329 error |= __vmwrite(GUEST_GDTR_LIMIT, 0);
331 /* Guest IDT */
332 error |= __vmwrite(GUEST_IDTR_BASE, 0);
333 error |= __vmwrite(GUEST_IDTR_LIMIT, 0);
335 /* Guest LDT & TSS */
336 arbytes.fields.s = 0; /* not code or data segement */
337 arbytes.fields.seg_type = 0x2; /* LTD */
338 arbytes.fields.default_ops_size = 0; /* 16-bit */
339 arbytes.fields.g = 0;
340 error |= __vmwrite(GUEST_LDTR_AR_BYTES, arbytes.bytes);
342 arbytes.fields.seg_type = 0xb; /* 32-bit TSS (busy) */
343 error |= __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
344 /* CR3 is set in vmx_final_setup_guest */
346 error |= __vmwrite(GUEST_RSP, 0);
347 error |= __vmwrite(GUEST_RIP, regs->eip);
349 /* Guest EFLAGS */
350 eflags = regs->eflags & ~HVM_EFLAGS_RESERVED_0; /* clear 0s */
351 eflags |= HVM_EFLAGS_RESERVED_1; /* set 1s */
352 error |= __vmwrite(GUEST_RFLAGS, eflags);
354 error |= __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
355 __asm__ __volatile__ ("mov %%dr7, %0\n" : "=r" (dr7));
356 error |= __vmwrite(GUEST_DR7, dr7);
357 error |= __vmwrite(VMCS_LINK_POINTER, 0xffffffff);
358 error |= __vmwrite(VMCS_LINK_POINTER_HIGH, 0xffffffff);
360 return error;
361 }
363 static inline int construct_vmcs_host(void)
364 {
365 int error = 0;
366 #ifdef __x86_64__
367 unsigned long fs_base;
368 unsigned long gs_base;
369 #endif
370 unsigned long crn;
372 /* Host Selectors */
373 error |= __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS);
374 error |= __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
375 error |= __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS);
376 #if defined (__i386__)
377 error |= __vmwrite(HOST_FS_SELECTOR, __HYPERVISOR_DS);
378 error |= __vmwrite(HOST_GS_SELECTOR, __HYPERVISOR_DS);
379 error |= __vmwrite(HOST_FS_BASE, 0);
380 error |= __vmwrite(HOST_GS_BASE, 0);
382 #else
383 rdmsrl(MSR_FS_BASE, fs_base);
384 rdmsrl(MSR_GS_BASE, gs_base);
385 error |= __vmwrite(HOST_FS_BASE, fs_base);
386 error |= __vmwrite(HOST_GS_BASE, gs_base);
388 #endif
389 error |= __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
391 __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (crn) : );
392 error |= __vmwrite(HOST_CR0, crn); /* same CR0 */
394 /* CR3 is set in vmx_final_setup_hostos */
395 __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (crn) : );
396 error |= __vmwrite(HOST_CR4, crn);
398 error |= __vmwrite(HOST_RIP, (unsigned long) vmx_asm_vmexit_handler);
399 #ifdef __x86_64__
400 /* TBD: support cr8 for 64-bit guest */
401 __vmwrite(VIRTUAL_APIC_PAGE_ADDR, 0);
402 __vmwrite(TPR_THRESHOLD, 0);
403 __vmwrite(SECONDARY_VM_EXEC_CONTROL, 0);
404 #endif
406 return error;
407 }
409 /*
410 * Need to extend to support full virtualization.
411 */
412 static int construct_vmcs(struct arch_vmx_struct *arch_vmx,
413 cpu_user_regs_t *regs)
414 {
415 int error;
416 long rc;
417 u64 vmcs_phys_ptr;
419 memset(arch_vmx, 0, sizeof(struct arch_vmx_struct));
421 /*
422 * Create a new VMCS
423 */
424 if (!(arch_vmx->vmcs = alloc_vmcs())) {
425 printk("Failed to create a new VMCS\n");
426 rc = -ENOMEM;
427 goto err_out;
428 }
429 vmcs_phys_ptr = (u64) virt_to_maddr(arch_vmx->vmcs);
431 if ((error = __vmpclear(vmcs_phys_ptr))) {
432 printk("construct_vmcs: VMCLEAR failed\n");
433 rc = -EINVAL;
434 goto err_out;
435 }
436 if ((error = load_vmcs(arch_vmx, vmcs_phys_ptr))) {
437 printk("construct_vmcs: load_vmcs failed: VMCS = %lx\n",
438 (unsigned long) vmcs_phys_ptr);
439 rc = -EINVAL;
440 goto err_out;
441 }
442 if ((error = construct_vmcs_controls(arch_vmx))) {
443 printk("construct_vmcs: construct_vmcs_controls failed\n");
444 rc = -EINVAL;
445 goto err_out;
446 }
447 /* host selectors */
448 if ((error = construct_vmcs_host())) {
449 printk("construct_vmcs: construct_vmcs_host failed\n");
450 rc = -EINVAL;
451 goto err_out;
452 }
453 /* guest selectors */
454 if ((error = construct_init_vmcs_guest(regs))) {
455 printk("construct_vmcs: construct_vmcs_guest failed\n");
456 rc = -EINVAL;
457 goto err_out;
458 }
459 if ((error |= __vmwrite(EXCEPTION_BITMAP,
460 MONITOR_DEFAULT_EXCEPTION_BITMAP))) {
461 printk("construct_vmcs: setting Exception bitmap failed\n");
462 rc = -EINVAL;
463 goto err_out;
464 }
466 if (regs->eflags & EF_TF)
467 __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
468 else
469 __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
471 return 0;
473 err_out:
474 destroy_vmcs(arch_vmx);
475 return rc;
476 }
478 void destroy_vmcs(struct arch_vmx_struct *arch_vmx)
479 {
480 free_vmcs(arch_vmx->vmcs);
481 arch_vmx->vmcs = NULL;
483 free_xenheap_pages(arch_vmx->io_bitmap_a, get_order_from_bytes(0x1000));
484 arch_vmx->io_bitmap_a = NULL;
486 free_xenheap_pages(arch_vmx->io_bitmap_b, get_order_from_bytes(0x1000));
487 arch_vmx->io_bitmap_b = NULL;
488 }
490 /*
491 * modify guest eflags and execption bitmap for gdb
492 */
493 int modify_vmcs(struct arch_vmx_struct *arch_vmx,
494 struct cpu_user_regs *regs)
495 {
496 int error;
497 u64 vmcs_phys_ptr, old, old_phys_ptr;
498 vmcs_phys_ptr = (u64) virt_to_maddr(arch_vmx->vmcs);
500 old_phys_ptr = virt_to_maddr(&old);
501 __vmptrst(old_phys_ptr);
502 if ((error = load_vmcs(arch_vmx, vmcs_phys_ptr))) {
503 printk("modify_vmcs: load_vmcs failed: VMCS = %lx\n",
504 (unsigned long) vmcs_phys_ptr);
505 return -EINVAL;
506 }
508 /* XXX VMX change modify_vmcs arg to v */
509 hvm_load_cpu_guest_regs(current, regs);
511 __vmptrld(old_phys_ptr);
513 return 0;
514 }
516 void vm_launch_fail(unsigned long eflags)
517 {
518 unsigned long error;
519 __vmread(VM_INSTRUCTION_ERROR, &error);
520 printk("<vm_launch_fail> error code %lx\n", error);
521 __hvm_bug(guest_cpu_user_regs());
522 }
524 void vm_resume_fail(unsigned long eflags)
525 {
526 unsigned long error;
527 __vmread(VM_INSTRUCTION_ERROR, &error);
528 printk("<vm_resume_fail> error code %lx\n", error);
529 __hvm_bug(guest_cpu_user_regs());
530 }
532 void arch_vmx_do_resume(struct vcpu *v)
533 {
534 if ( v->arch.hvm_vmx.launch_cpu == smp_processor_id() )
535 {
536 load_vmcs(&v->arch.hvm_vmx, virt_to_maddr(v->arch.hvm_vmx.vmcs));
537 vmx_do_resume(v);
538 reset_stack_and_jump(vmx_asm_do_resume);
539 }
540 else
541 {
542 vmx_request_clear_vmcs(v);
543 load_vmcs(&v->arch.hvm_vmx, virt_to_maddr(v->arch.hvm_vmx.vmcs));
544 vmx_migrate_timers(v);
545 vmx_set_host_env(v);
546 vmx_do_resume(v);
547 v->arch.hvm_vmx.launch_cpu = smp_processor_id();
548 reset_stack_and_jump(vmx_asm_do_relaunch);
549 }
550 }
552 void arch_vmx_do_launch(struct vcpu *v)
553 {
554 int error;
555 cpu_user_regs_t *regs = &current->arch.guest_context.user_regs;
557 error = construct_vmcs(&v->arch.hvm_vmx, regs);
558 if ( error < 0 )
559 {
560 if (v->vcpu_id == 0) {
561 printk("Failed to construct a new VMCS for BSP.\n");
562 } else {
563 printk("Failed to construct a new VMCS for AP %d\n", v->vcpu_id);
564 }
565 domain_crash_synchronous();
566 }
567 vmx_do_launch(v);
568 reset_stack_and_jump(vmx_asm_do_launch);
569 }
571 /*
572 * Local variables:
573 * mode: C
574 * c-set-style: "BSD"
575 * c-basic-offset: 4
576 * tab-width: 4
577 * indent-tabs-mode: nil
578 * End:
579 */