ia64/xen-unstable
changeset 9016:cf1c1bb9f6d2
Bring up AP of VMX domain.
1) add INIT-SIPI-SIPI IPI sequence handling code to HVM virtual lapic
code.
2) add an new interface init_ap_context to hvm_funcs, and implement the
VMX side.
3) add a hvm generic function hvm_bringup_ap, which in turn calls
init_ap_context.
Signed-off-by: Xin Li <xin.b.li@intel.com>
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
1) add INIT-SIPI-SIPI IPI sequence handling code to HVM virtual lapic
code.
2) add an new interface init_ap_context to hvm_funcs, and implement the
VMX side.
3) add a hvm generic function hvm_bringup_ap, which in turn calls
init_ap_context.
Signed-off-by: Xin Li <xin.b.li@intel.com>
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Fri Feb 24 17:32:58 2006 +0100 (2006-02-24) |
parents | 71f2d19cd3a5 |
children | ec92f986411e |
files | xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/vlapic.c xen/arch/x86/hvm/vmx/io.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/vcpu.h xen/include/asm-x86/hvm/vlapic.h |
line diff
1.1 --- a/xen/arch/x86/hvm/hvm.c Fri Feb 24 17:32:19 2006 +0100 1.2 +++ b/xen/arch/x86/hvm/hvm.c Fri Feb 24 17:32:58 2006 +0100 1.3 @@ -25,6 +25,7 @@ 1.4 #include <xen/sched.h> 1.5 #include <xen/irq.h> 1.6 #include <xen/softirq.h> 1.7 +#include <xen/domain.h> 1.8 #include <xen/domain_page.h> 1.9 #include <asm/current.h> 1.10 #include <asm/io.h> 1.11 @@ -59,9 +60,9 @@ static void hvm_zap_mmio_range( 1.12 1.13 for ( i = 0; i < nr_pfn; i++ ) 1.14 { 1.15 - if ( pfn + i >= 0xfffff ) 1.16 + if ( pfn + i >= 0xfffff ) 1.17 break; 1.18 - 1.19 + 1.20 __copy_to_user(&phys_to_machine_mapping[pfn + i], &val, sizeof (val)); 1.21 } 1.22 } 1.23 @@ -217,7 +218,7 @@ void hvm_pic_assist(struct vcpu *v) 1.24 global_iodata_t *spg; 1.25 u16 *virq_line, irqs; 1.26 struct hvm_virpic *pic = &v->domain->arch.hvm_domain.vpic; 1.27 - 1.28 + 1.29 spg = &get_sp(v->domain)->sp_global; 1.30 virq_line = &spg->pic_clear_irr; 1.31 if ( *virq_line ) { 1.32 @@ -312,6 +313,52 @@ void hvm_print_line(struct vcpu *v, cons 1.33 } 1.34 1.35 /* 1.36 + * only called in HVM domain BSP context 1.37 + * when booting, vcpuid is always equal to apic_id 1.38 + */ 1.39 +int hvm_bringup_ap(int vcpuid, int trampoline_vector) 1.40 +{ 1.41 + struct vcpu *bsp = current, *v; 1.42 + struct domain *d = bsp->domain; 1.43 + struct vcpu_guest_context *ctxt; 1.44 + int rc = 0; 1.45 + 1.46 + /* current must be HVM domain BSP */ 1.47 + if ( !(HVM_DOMAIN(bsp) && bsp->vcpu_id == 0) ) { 1.48 + printk("Not calling hvm_bringup_ap from BSP context.\n"); 1.49 + domain_crash_synchronous(); 1.50 + } 1.51 + 1.52 + if ( (v = d->vcpu[vcpuid]) == NULL ) 1.53 + return -ENOENT; 1.54 + 1.55 + if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL ) { 1.56 + printk("Failed to allocate memory in hvm_bringup_ap.\n"); 1.57 + return -ENOMEM; 1.58 + } 1.59 + 1.60 + hvm_init_ap_context(ctxt, vcpuid, trampoline_vector); 1.61 + 1.62 + LOCK_BIGLOCK(d); 1.63 + rc = -EEXIST; 1.64 + if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 1.65 + rc = boot_vcpu(d, vcpuid, ctxt); 1.66 + UNLOCK_BIGLOCK(d); 1.67 + 1.68 + if ( rc != 0 ) 1.69 + printk("AP %d bringup failed in boot_vcpu %x.\n", vcpuid, rc); 1.70 + else { 1.71 + if ( test_and_clear_bit(_VCPUF_down, &d->vcpu[vcpuid]->vcpu_flags) ) 1.72 + vcpu_wake(d->vcpu[vcpuid]); 1.73 + printk("AP %d bringup suceeded.\n", vcpuid); 1.74 + } 1.75 + 1.76 + xfree(ctxt); 1.77 + 1.78 + return rc; 1.79 +} 1.80 + 1.81 +/* 1.82 * Local variables: 1.83 * mode: C 1.84 * c-set-style: "BSD"
2.1 --- a/xen/arch/x86/hvm/vlapic.c Fri Feb 24 17:32:19 2006 +0100 2.2 +++ b/xen/arch/x86/hvm/vlapic.c Fri Feb 24 17:32:58 2006 +0100 2.3 @@ -225,27 +225,35 @@ static int vlapic_accept_irq(struct vcpu 2.4 break; 2.5 2.6 case VLAPIC_DELIV_MODE_INIT: 2.7 - if (!level && trig_mode == 1) { //Deassert 2.8 + if ( !level && trig_mode == 1 ) { //Deassert 2.9 printk("This hvm_vlapic is for P4, no work for De-assert init\n"); 2.10 } else { 2.11 /* FIXME How to check the situation after vcpu reset? */ 2.12 - vlapic->init_sipi_sipi_state = VLAPIC_INIT_SIPI_SIPI_STATE_WAIT_SIPI; 2.13 - if (vlapic->vcpu) { 2.14 - vcpu_pause(vlapic->vcpu); 2.15 + if ( test_and_clear_bit(_VCPUF_initialised, &v->vcpu_flags) ) { 2.16 + printk("Reset hvm vcpu not supported yet\n"); 2.17 + domain_crash_synchronous(); 2.18 } 2.19 + v->arch.hvm_vcpu.init_sipi_sipi_state = 2.20 + HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI; 2.21 + result = 1; 2.22 } 2.23 break; 2.24 2.25 case VLAPIC_DELIV_MODE_STARTUP: 2.26 - if (vlapic->init_sipi_sipi_state != VLAPIC_INIT_SIPI_SIPI_STATE_WAIT_SIPI) 2.27 + if ( v->arch.hvm_vcpu.init_sipi_sipi_state == 2.28 + HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM ) 2.29 break; 2.30 - vlapic->init_sipi_sipi_state = VLAPIC_INIT_SIPI_SIPI_STATE_NORM; 2.31 - if (!vlapic->vcpu) { 2.32 - /* XXX Call hvm_bringup_ap here */ 2.33 - result = 0; 2.34 - }else{ 2.35 - //hvm_vcpu_reset(vlapic->vcpu); 2.36 + 2.37 + v->arch.hvm_vcpu.init_sipi_sipi_state = 2.38 + HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM; 2.39 + 2.40 + if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) ) { 2.41 + printk("SIPI for initialized vcpu vcpuid %x\n", v->vcpu_id); 2.42 + domain_crash_synchronous(); 2.43 } 2.44 + 2.45 + if ( hvm_bringup_ap(v->vcpu_id, vector) != 0 ) 2.46 + result = 0; 2.47 break; 2.48 2.49 default:
3.1 --- a/xen/arch/x86/hvm/vmx/io.c Fri Feb 24 17:32:19 2006 +0100 3.2 +++ b/xen/arch/x86/hvm/vmx/io.c Fri Feb 24 17:32:58 2006 +0100 3.3 @@ -113,13 +113,15 @@ asmlinkage void vmx_intr_assist(void) 3.4 struct hvm_virpit *vpit = &plat->vpit; 3.5 struct hvm_virpic *pic= &plat->vpic; 3.6 3.7 - hvm_pic_assist(v); 3.8 - __vmread_vcpu(v, CPU_BASED_VM_EXEC_CONTROL, &cpu_exec_control); 3.9 - if ( vpit->pending_intr_nr ) { 3.10 + if ( v->vcpu_id == 0 ) 3.11 + hvm_pic_assist(v); 3.12 + 3.13 + if ( (v->vcpu_id == 0) && vpit->pending_intr_nr ) { 3.14 pic_set_irq(pic, 0, 0); 3.15 pic_set_irq(pic, 0, 1); 3.16 } 3.17 3.18 + __vmread_vcpu(v, CPU_BASED_VM_EXEC_CONTROL, &cpu_exec_control); 3.19 __vmread(VM_ENTRY_INTR_INFO_FIELD, &intr_fields); 3.20 3.21 if (intr_fields & INTR_INFO_VALID_MASK) {
4.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Fri Feb 24 17:32:19 2006 +0100 4.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Fri Feb 24 17:32:58 2006 +0100 4.3 @@ -448,6 +448,37 @@ unsigned long vmx_get_ctrl_reg(struct vc 4.4 return 0; /* dummy */ 4.5 } 4.6 4.7 +/* SMP VMX guest support */ 4.8 +void vmx_init_ap_context(struct vcpu_guest_context *ctxt, 4.9 + int vcpuid, int trampoline_vector) 4.10 +{ 4.11 + int i; 4.12 + 4.13 + memset(ctxt, 0, sizeof(*ctxt)); 4.14 + 4.15 + /* 4.16 + * Initial register values: 4.17 + */ 4.18 + ctxt->user_regs.eip = VMXASSIST_BASE; 4.19 + ctxt->user_regs.edx = vcpuid; 4.20 + ctxt->user_regs.ebx = trampoline_vector; 4.21 + 4.22 + ctxt->flags = VGCF_HVM_GUEST; 4.23 + 4.24 + /* Virtual IDT is empty at start-of-day. */ 4.25 + for ( i = 0; i < 256; i++ ) 4.26 + { 4.27 + ctxt->trap_ctxt[i].vector = i; 4.28 + ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS; 4.29 + } 4.30 + 4.31 + /* No callback handlers. */ 4.32 +#if defined(__i386__) 4.33 + ctxt->event_callback_cs = FLAT_KERNEL_CS; 4.34 + ctxt->failsafe_callback_cs = FLAT_KERNEL_CS; 4.35 +#endif 4.36 +} 4.37 + 4.38 void do_nmi(struct cpu_user_regs *); 4.39 4.40 static int check_vmx_controls(ctrls, msr) 4.41 @@ -545,6 +576,8 @@ int start_vmx(void) 4.42 hvm_funcs.instruction_length = vmx_instruction_length; 4.43 hvm_funcs.get_guest_ctrl_reg = vmx_get_ctrl_reg; 4.44 4.45 + hvm_funcs.init_ap_context = vmx_init_ap_context; 4.46 + 4.47 hvm_enabled = 1; 4.48 4.49 return 1;
5.1 --- a/xen/include/asm-x86/hvm/hvm.h Fri Feb 24 17:32:19 2006 +0100 5.2 +++ b/xen/include/asm-x86/hvm/hvm.h Fri Feb 24 17:32:58 2006 +0100 5.3 @@ -67,6 +67,9 @@ struct hvm_function_table { 5.4 int (*paging_enabled)(struct vcpu *v); 5.5 int (*instruction_length)(struct vcpu *v); 5.6 unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num); 5.7 + 5.8 + void (*init_ap_context)(struct vcpu_guest_context *ctxt, 5.9 + int vcpuid, int trampoline_vector); 5.10 }; 5.11 5.12 extern struct hvm_function_table hvm_funcs; 5.13 @@ -173,4 +176,14 @@ hvm_get_guest_ctrl_reg(struct vcpu *v, u 5.14 return hvm_funcs.get_guest_ctrl_reg(v, num); 5.15 return 0; /* force to fail */ 5.16 } 5.17 + 5.18 +static inline void 5.19 +hvm_init_ap_context(struct vcpu_guest_context *ctxt, 5.20 + int vcpuid, int trampoline_vector) 5.21 +{ 5.22 + return hvm_funcs.init_ap_context(ctxt, vcpuid, trampoline_vector); 5.23 +} 5.24 + 5.25 +extern int hvm_bringup_ap(int vcpuid, int trampoline_vector); 5.26 + 5.27 #endif /* __ASM_X86_HVM_HVM_H__ */
6.1 --- a/xen/include/asm-x86/hvm/vcpu.h Fri Feb 24 17:32:19 2006 +0100 6.2 +++ b/xen/include/asm-x86/hvm/vcpu.h Fri Feb 24 17:32:58 2006 +0100 6.3 @@ -25,10 +25,15 @@ 6.4 #include <asm/hvm/vmx/vmcs.h> 6.5 #include <asm/hvm/svm/vmcb.h> 6.6 6.7 +#define HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM 0 6.8 +#define HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI 1 6.9 + 6.10 struct hvm_vcpu { 6.11 - unsigned long ioflags; 6.12 - struct mmio_op mmio_op; 6.13 - struct vlapic *vlapic; 6.14 + unsigned long ioflags; 6.15 + struct mmio_op mmio_op; 6.16 + struct vlapic *vlapic; 6.17 + /* For AP startup */ 6.18 + unsigned long init_sipi_sipi_state; 6.19 6.20 union { 6.21 struct arch_vmx_struct vmx;
7.1 --- a/xen/include/asm-x86/hvm/vlapic.h Fri Feb 24 17:32:19 2006 +0100 7.2 +++ b/xen/include/asm-x86/hvm/vlapic.h Fri Feb 24 17:32:58 2006 +0100 7.3 @@ -159,9 +159,6 @@ typedef struct direct_intr_info { 7.4 int source[6]; 7.5 } direct_intr_info_t; 7.6 7.7 -#define VLAPIC_INIT_SIPI_SIPI_STATE_NORM 0 7.8 -#define VLAPIC_INIT_SIPI_SIPI_STATE_WAIT_SIPI 1 7.9 - 7.10 struct vlapic 7.11 { 7.12 //FIXME check what would be 64 bit on EM64T 7.13 @@ -197,7 +194,6 @@ struct vlapic 7.14 unsigned long init_ticks; 7.15 uint32_t err_write_count; 7.16 uint64_t apic_base_msr; 7.17 - uint32_t init_sipi_sipi_state; 7.18 struct vcpu *vcpu; 7.19 struct domain *domain; 7.20 };