ia64/xen-unstable
changeset 16559:98e9485d8fcf
hvm: Clean up AP initialisation. This allows AP bringup into emulated
real mode when running on VMX, as well as removing 100 LOC.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
real mode when running on VMX, as well as removing 100 LOC.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Fri Dec 07 14:28:07 2007 +0000 (2007-12-07) |
parents | c0f7ba3aa9b2 |
children | 822d4ec5cfb1 |
files | xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/hvm.h |
line diff
1.1 --- a/xen/arch/x86/hvm/hvm.c Fri Dec 07 12:48:36 2007 +0000 1.2 +++ b/xen/arch/x86/hvm/hvm.c Fri Dec 07 14:28:07 2007 +0000 1.3 @@ -1590,57 +1590,90 @@ void hvm_hypercall_page_initialise(struc 1.4 hvm_funcs.init_hypercall_page(d, hypercall_page); 1.5 } 1.6 1.7 - 1.8 -/* 1.9 - * only called in HVM domain BSP context 1.10 - * when booting, vcpuid is always equal to apic_id 1.11 - */ 1.12 int hvm_bringup_ap(int vcpuid, int trampoline_vector) 1.13 { 1.14 + struct domain *d = current->domain; 1.15 struct vcpu *v; 1.16 - struct domain *d = current->domain; 1.17 struct vcpu_guest_context *ctxt; 1.18 - int rc = 0; 1.19 + struct segment_register reg; 1.20 1.21 - BUG_ON(!is_hvm_domain(d)); 1.22 + ASSERT(is_hvm_domain(d)); 1.23 1.24 if ( (v = d->vcpu[vcpuid]) == NULL ) 1.25 return -ENOENT; 1.26 1.27 - if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL ) 1.28 + v->fpu_initialised = 0; 1.29 + v->arch.flags |= TF_kernel_mode; 1.30 + v->is_initialised = 1; 1.31 + 1.32 + ctxt = &v->arch.guest_context; 1.33 + memset(ctxt, 0, sizeof(*ctxt)); 1.34 + ctxt->flags = VGCF_online; 1.35 + ctxt->user_regs.eflags = 2; 1.36 + 1.37 +#ifdef VMXASSIST 1.38 + if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) 1.39 { 1.40 - gdprintk(XENLOG_ERR, 1.41 - "Failed to allocate memory in hvm_bringup_ap.\n"); 1.42 - return -ENOMEM; 1.43 + ctxt->user_regs.eip = VMXASSIST_BASE; 1.44 + ctxt->user_regs.edx = vcpuid; 1.45 + ctxt->user_regs.ebx = trampoline_vector; 1.46 + goto done; 1.47 } 1.48 +#endif 1.49 1.50 - hvm_init_ap_context(ctxt, vcpuid, trampoline_vector); 1.51 + v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET; 1.52 + hvm_update_guest_cr(v, 0); 1.53 + 1.54 + v->arch.hvm_vcpu.guest_cr[2] = 0; 1.55 + hvm_update_guest_cr(v, 2); 1.56 + 1.57 + v->arch.hvm_vcpu.guest_cr[3] = 0; 1.58 + hvm_update_guest_cr(v, 3); 1.59 + 1.60 + v->arch.hvm_vcpu.guest_cr[4] = 0; 1.61 + hvm_update_guest_cr(v, 4); 1.62 + 1.63 + v->arch.hvm_vcpu.guest_efer = 0; 1.64 + hvm_update_guest_efer(v); 1.65 + 1.66 + reg.sel = trampoline_vector << 8; 1.67 + reg.base = (uint32_t)reg.sel << 4; 1.68 + reg.limit = 0xffff; 1.69 + reg.attr.bytes = 0x89b; 1.70 + hvm_set_segment_register(v, x86_seg_cs, ®); 1.71 1.72 + reg.sel = reg.base = 0; 1.73 + reg.limit = 0xffff; 1.74 + reg.attr.bytes = 0x893; 1.75 + hvm_set_segment_register(v, x86_seg_ds, ®); 1.76 + hvm_set_segment_register(v, x86_seg_es, ®); 1.77 + hvm_set_segment_register(v, x86_seg_fs, ®); 1.78 + hvm_set_segment_register(v, x86_seg_gs, ®); 1.79 + hvm_set_segment_register(v, x86_seg_ss, ®); 1.80 + 1.81 + reg.attr.bytes = 0x82; /* LDT */ 1.82 + hvm_set_segment_register(v, x86_seg_ldtr, ®); 1.83 + 1.84 + reg.attr.bytes = 0x8b; /* 32-bit TSS (busy) */ 1.85 + hvm_set_segment_register(v, x86_seg_tr, ®); 1.86 + 1.87 + reg.attr.bytes = 0; 1.88 + hvm_set_segment_register(v, x86_seg_gdtr, ®); 1.89 + hvm_set_segment_register(v, x86_seg_idtr, ®); 1.90 + 1.91 +#ifdef VMXASSIST 1.92 + done: 1.93 +#endif 1.94 /* Sync AP's TSC with BSP's. */ 1.95 v->arch.hvm_vcpu.cache_tsc_offset = 1.96 v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset; 1.97 hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset); 1.98 1.99 - LOCK_BIGLOCK(d); 1.100 - rc = -EEXIST; 1.101 - if ( !v->is_initialised ) 1.102 - rc = boot_vcpu(d, vcpuid, ctxt); 1.103 - UNLOCK_BIGLOCK(d); 1.104 - 1.105 - if ( rc != 0 ) 1.106 - { 1.107 - gdprintk(XENLOG_ERR, 1.108 - "AP %d bringup failed in boot_vcpu %x.\n", vcpuid, rc); 1.109 - goto out; 1.110 - } 1.111 - 1.112 if ( test_and_clear_bit(_VPF_down, &v->pause_flags) ) 1.113 vcpu_wake(v); 1.114 - gdprintk(XENLOG_INFO, "AP %d bringup suceeded.\n", vcpuid); 1.115 1.116 - out: 1.117 - xfree(ctxt); 1.118 - return rc; 1.119 + gdprintk(XENLOG_INFO, "AP %d bringup succeeded.\n", vcpuid); 1.120 + return 0; 1.121 } 1.122 1.123 static int hvmop_set_pci_intx_level(
2.1 --- a/xen/arch/x86/hvm/svm/svm.c Fri Dec 07 12:48:36 2007 +0000 2.2 +++ b/xen/arch/x86/hvm/svm/svm.c Fri Dec 07 14:28:07 2007 +0000 2.3 @@ -62,8 +62,6 @@ int inst_copy_from_guest(unsigned char * 2.4 int inst_len); 2.5 asmlinkage void do_IRQ(struct cpu_user_regs *); 2.6 2.7 -static int svm_reset_to_realmode( 2.8 - struct vcpu *v, struct cpu_user_regs *regs); 2.9 static void svm_update_guest_cr(struct vcpu *v, unsigned int cr); 2.10 static void svm_update_guest_efer(struct vcpu *v); 2.11 static void svm_inject_exception( 2.12 @@ -617,8 +615,24 @@ static void svm_set_segment_register(str 2.13 struct segment_register *reg) 2.14 { 2.15 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 2.16 + int sync = 0; 2.17 2.18 - ASSERT(v == current); 2.19 + ASSERT((v == current) || !vcpu_runnable(v)); 2.20 + 2.21 + switch ( seg ) 2.22 + { 2.23 + case x86_seg_fs: 2.24 + case x86_seg_gs: 2.25 + case x86_seg_tr: 2.26 + case x86_seg_ldtr: 2.27 + sync = (v == current); 2.28 + break; 2.29 + default: 2.30 + break; 2.31 + } 2.32 + 2.33 + if ( sync ) 2.34 + svm_sync_vmcb(v); 2.35 2.36 switch ( seg ) 2.37 { 2.38 @@ -632,23 +646,17 @@ static void svm_set_segment_register(str 2.39 memcpy(&vmcb->es, reg, sizeof(*reg)); 2.40 break; 2.41 case x86_seg_fs: 2.42 - svm_sync_vmcb(v); 2.43 memcpy(&vmcb->fs, reg, sizeof(*reg)); 2.44 - svm_vmload(vmcb); 2.45 break; 2.46 case x86_seg_gs: 2.47 - svm_sync_vmcb(v); 2.48 memcpy(&vmcb->gs, reg, sizeof(*reg)); 2.49 - svm_vmload(vmcb); 2.50 break; 2.51 case x86_seg_ss: 2.52 memcpy(&vmcb->ss, reg, sizeof(*reg)); 2.53 vmcb->cpl = vmcb->ss.attr.fields.dpl; 2.54 break; 2.55 case x86_seg_tr: 2.56 - svm_sync_vmcb(v); 2.57 memcpy(&vmcb->tr, reg, sizeof(*reg)); 2.58 - svm_vmload(vmcb); 2.59 break; 2.60 case x86_seg_gdtr: 2.61 memcpy(&vmcb->gdtr, reg, sizeof(*reg)); 2.62 @@ -657,13 +665,14 @@ static void svm_set_segment_register(str 2.63 memcpy(&vmcb->idtr, reg, sizeof(*reg)); 2.64 break; 2.65 case x86_seg_ldtr: 2.66 - svm_sync_vmcb(v); 2.67 memcpy(&vmcb->ldtr, reg, sizeof(*reg)); 2.68 - svm_vmload(vmcb); 2.69 break; 2.70 default: 2.71 BUG(); 2.72 } 2.73 + 2.74 + if ( sync ) 2.75 + svm_vmload(vmcb); 2.76 } 2.77 2.78 /* Make sure that xen intercepts any FP accesses from current */ 2.79 @@ -684,47 +693,11 @@ static void svm_stts(struct vcpu *v) 2.80 } 2.81 } 2.82 2.83 - 2.84 static void svm_set_tsc_offset(struct vcpu *v, u64 offset) 2.85 { 2.86 v->arch.hvm_svm.vmcb->tsc_offset = offset; 2.87 } 2.88 2.89 - 2.90 -static void svm_init_ap_context( 2.91 - struct vcpu_guest_context *ctxt, int vcpuid, int trampoline_vector) 2.92 -{ 2.93 - struct vcpu *v; 2.94 - struct vmcb_struct *vmcb; 2.95 - cpu_user_regs_t *regs; 2.96 - u16 cs_sel; 2.97 - 2.98 - /* We know this is safe because hvm_bringup_ap() does it */ 2.99 - v = current->domain->vcpu[vcpuid]; 2.100 - vmcb = v->arch.hvm_svm.vmcb; 2.101 - regs = &v->arch.guest_context.user_regs; 2.102 - 2.103 - memset(ctxt, 0, sizeof(*ctxt)); 2.104 - 2.105 - /* 2.106 - * We execute the trampoline code in real mode. The trampoline vector 2.107 - * passed to us is page alligned and is the physical frame number for 2.108 - * the code. We will execute this code in real mode. 2.109 - */ 2.110 - cs_sel = trampoline_vector << 8; 2.111 - ctxt->user_regs.eip = 0x0; 2.112 - ctxt->user_regs.cs = cs_sel; 2.113 - 2.114 - /* 2.115 - * This is the launch of an AP; set state so that we begin executing 2.116 - * the trampoline code in real-mode. 2.117 - */ 2.118 - svm_reset_to_realmode(v, regs); 2.119 - /* Adjust the vmcb's hidden register state. */ 2.120 - vmcb->cs.sel = cs_sel; 2.121 - vmcb->cs.base = (cs_sel << 4); 2.122 -} 2.123 - 2.124 static void svm_init_hypercall_page(struct domain *d, void *hypercall_page) 2.125 { 2.126 char *p; 2.127 @@ -916,7 +889,6 @@ static struct hvm_function_table svm_fun 2.128 .stts = svm_stts, 2.129 .set_tsc_offset = svm_set_tsc_offset, 2.130 .inject_exception = svm_inject_exception, 2.131 - .init_ap_context = svm_init_ap_context, 2.132 .init_hypercall_page = svm_init_hypercall_page, 2.133 .event_pending = svm_event_pending 2.134 }; 2.135 @@ -2037,90 +2009,6 @@ void svm_handle_invlpg(const short invlp 2.136 domain_crash(v->domain); 2.137 } 2.138 2.139 - 2.140 -/* 2.141 - * Reset to realmode causes execution to start at 0xF000:0xFFF0 in 2.142 - * 16-bit realmode. Basically, this mimics a processor reset. 2.143 - * 2.144 - * returns 0 on success, non-zero otherwise 2.145 - */ 2.146 -static int svm_reset_to_realmode(struct vcpu *v, 2.147 - struct cpu_user_regs *regs) 2.148 -{ 2.149 - struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 2.150 - 2.151 - memset(regs, 0, sizeof(struct cpu_user_regs)); 2.152 - 2.153 - regs->eflags = 2; 2.154 - 2.155 - v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET; 2.156 - svm_update_guest_cr(v, 0); 2.157 - 2.158 - v->arch.hvm_vcpu.guest_cr[2] = 0; 2.159 - svm_update_guest_cr(v, 2); 2.160 - 2.161 - v->arch.hvm_vcpu.guest_cr[4] = 0; 2.162 - svm_update_guest_cr(v, 4); 2.163 - 2.164 - vmcb->efer = EFER_SVME; 2.165 - 2.166 - /* This will jump to ROMBIOS */ 2.167 - regs->eip = 0xFFF0; 2.168 - 2.169 - /* Set up the segment registers and all their hidden states. */ 2.170 - vmcb->cs.sel = 0xF000; 2.171 - vmcb->cs.attr.bytes = 0x089b; 2.172 - vmcb->cs.limit = 0xffff; 2.173 - vmcb->cs.base = 0x000F0000; 2.174 - 2.175 - vmcb->ss.sel = 0x00; 2.176 - vmcb->ss.attr.bytes = 0x0893; 2.177 - vmcb->ss.limit = 0xffff; 2.178 - vmcb->ss.base = 0x00; 2.179 - 2.180 - vmcb->ds.sel = 0x00; 2.181 - vmcb->ds.attr.bytes = 0x0893; 2.182 - vmcb->ds.limit = 0xffff; 2.183 - vmcb->ds.base = 0x00; 2.184 - 2.185 - vmcb->es.sel = 0x00; 2.186 - vmcb->es.attr.bytes = 0x0893; 2.187 - vmcb->es.limit = 0xffff; 2.188 - vmcb->es.base = 0x00; 2.189 - 2.190 - vmcb->fs.sel = 0x00; 2.191 - vmcb->fs.attr.bytes = 0x0893; 2.192 - vmcb->fs.limit = 0xffff; 2.193 - vmcb->fs.base = 0x00; 2.194 - 2.195 - vmcb->gs.sel = 0x00; 2.196 - vmcb->gs.attr.bytes = 0x0893; 2.197 - vmcb->gs.limit = 0xffff; 2.198 - vmcb->gs.base = 0x00; 2.199 - 2.200 - vmcb->ldtr.sel = 0x00; 2.201 - vmcb->ldtr.attr.bytes = 0x0000; 2.202 - vmcb->ldtr.limit = 0x0; 2.203 - vmcb->ldtr.base = 0x00; 2.204 - 2.205 - vmcb->gdtr.sel = 0x00; 2.206 - vmcb->gdtr.attr.bytes = 0x0000; 2.207 - vmcb->gdtr.limit = 0x0; 2.208 - vmcb->gdtr.base = 0x00; 2.209 - 2.210 - vmcb->tr.sel = 0; 2.211 - vmcb->tr.attr.bytes = 0; 2.212 - vmcb->tr.limit = 0x0; 2.213 - vmcb->tr.base = 0; 2.214 - 2.215 - vmcb->idtr.sel = 0x00; 2.216 - vmcb->idtr.attr.bytes = 0x0000; 2.217 - vmcb->idtr.limit = 0x3ff; 2.218 - vmcb->idtr.base = 0x00; 2.219 - 2.220 - return 0; 2.221 -} 2.222 - 2.223 asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs) 2.224 { 2.225 unsigned int exit_reason;
3.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Fri Dec 07 12:48:36 2007 +0000 3.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Fri Dec 07 14:28:07 2007 +0000 3.3 @@ -863,7 +863,7 @@ static void vmx_set_segment_register(str 3.4 { 3.5 uint32_t attr; 3.6 3.7 - ASSERT(v == current); 3.8 + ASSERT((v == current) || !vcpu_runnable(v)); 3.9 3.10 attr = reg->attr.bytes; 3.11 attr = ((attr & 0xf00) << 4) | (attr & 0xff); 3.12 @@ -872,6 +872,8 @@ static void vmx_set_segment_register(str 3.13 if ( !reg->attr.fields.p ) 3.14 attr |= (1u << 16); 3.15 3.16 + vmx_vmcs_enter(v); 3.17 + 3.18 switch ( seg ) 3.19 { 3.20 case x86_seg_cs: 3.21 @@ -933,6 +935,8 @@ static void vmx_set_segment_register(str 3.22 default: 3.23 BUG(); 3.24 } 3.25 + 3.26 + vmx_vmcs_exit(v); 3.27 } 3.28 3.29 /* Make sure that xen intercepts any FP accesses from current */ 3.30 @@ -965,17 +969,6 @@ static void vmx_set_tsc_offset(struct vc 3.31 vmx_vmcs_exit(v); 3.32 } 3.33 3.34 -static void vmx_init_ap_context( 3.35 - struct vcpu_guest_context *ctxt, int vcpuid, int trampoline_vector) 3.36 -{ 3.37 - memset(ctxt, 0, sizeof(*ctxt)); 3.38 -#ifdef VMXASSIST 3.39 - ctxt->user_regs.eip = VMXASSIST_BASE; 3.40 - ctxt->user_regs.edx = vcpuid; 3.41 - ctxt->user_regs.ebx = trampoline_vector; 3.42 -#endif 3.43 -} 3.44 - 3.45 void do_nmi(struct cpu_user_regs *); 3.46 3.47 static void vmx_init_hypercall_page(struct domain *d, void *hypercall_page) 3.48 @@ -1159,7 +1152,6 @@ static struct hvm_function_table vmx_fun 3.49 .stts = vmx_stts, 3.50 .set_tsc_offset = vmx_set_tsc_offset, 3.51 .inject_exception = vmx_inject_exception, 3.52 - .init_ap_context = vmx_init_ap_context, 3.53 .init_hypercall_page = vmx_init_hypercall_page, 3.54 .event_pending = vmx_event_pending, 3.55 .cpu_up = vmx_cpu_up,
4.1 --- a/xen/include/asm-x86/hvm/hvm.h Fri Dec 07 12:48:36 2007 +0000 4.2 +++ b/xen/include/asm-x86/hvm/hvm.h Fri Dec 07 14:28:07 2007 +0000 4.3 @@ -121,9 +121,6 @@ struct hvm_function_table { 4.4 void (*inject_exception)(unsigned int trapnr, int errcode, 4.5 unsigned long cr2); 4.6 4.7 - void (*init_ap_context)(struct vcpu_guest_context *ctxt, 4.8 - int vcpuid, int trampoline_vector); 4.9 - 4.10 void (*init_hypercall_page)(struct domain *d, void *hypercall_page); 4.11 4.12 int (*event_pending)(struct vcpu *v); 4.13 @@ -239,13 +236,6 @@ void hvm_migrate_timers(struct vcpu *v); 4.14 void hvm_do_resume(struct vcpu *v); 4.15 4.16 static inline void 4.17 -hvm_init_ap_context(struct vcpu_guest_context *ctxt, 4.18 - int vcpuid, int trampoline_vector) 4.19 -{ 4.20 - return hvm_funcs.init_ap_context(ctxt, vcpuid, trampoline_vector); 4.21 -} 4.22 - 4.23 -static inline void 4.24 hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2) 4.25 { 4.26 hvm_funcs.inject_exception(trapnr, errcode, cr2);