ia64/xen-unstable
changeset 9089:9bde2889635c
Add SVM 32bit msr support (combined both 32bit with 64bit functions)
and enable vlapic for 64bit.
Signed-off-by: Tom Woller <thomas.woller@amd.com>
and enable vlapic for 64bit.
Signed-off-by: Tom Woller <thomas.woller@amd.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Wed Mar 01 23:39:43 2006 +0100 (2006-03-01) |
parents | 072d51860554 |
children | 6734682d2fd0 |
files | xen/arch/x86/hvm/svm/svm.c |
line diff
1.1 --- a/xen/arch/x86/hvm/svm/svm.c Wed Mar 01 23:38:18 2006 +0100 1.2 +++ b/xen/arch/x86/hvm/svm/svm.c Wed Mar 01 23:39:43 2006 +0100 1.3 @@ -247,6 +247,7 @@ void svm_load_msrs(void) 1.4 void svm_restore_msrs(struct vcpu *v) 1.5 { 1.6 } 1.7 +#endif 1.8 1.9 #define IS_CANO_ADDRESS(add) 1 1.10 1.11 @@ -297,8 +298,13 @@ static inline int long_mode_do_msr_read( 1.12 return 0; 1.13 } 1.14 1.15 +#ifdef __x86_64__ 1.16 HVM_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %lx\n", 1.17 msr_content); 1.18 +#else 1.19 + HVM_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %llx\n", 1.20 + msr_content); 1.21 +#endif 1.22 1.23 regs->eax = msr_content & 0xffffffff; 1.24 regs->edx = msr_content >> 32; 1.25 @@ -311,12 +317,18 @@ static inline int long_mode_do_msr_write 1.26 struct vcpu *vc = current; 1.27 struct vmcb_struct *vmcb = vc->arch.hvm_svm.vmcb; 1.28 1.29 +#ifdef __x86_64__ 1.30 HVM_DBG_LOG(DBG_LEVEL_1, "mode_do_msr_write msr %lx msr_content %lx\n", 1.31 regs->ecx, msr_content); 1.32 +#else 1.33 + HVM_DBG_LOG(DBG_LEVEL_1, "mode_do_msr_write msr %x msr_content %llx\n", 1.34 + regs->ecx, msr_content); 1.35 +#endif 1.36 1.37 switch (regs->ecx) 1.38 { 1.39 case MSR_EFER: 1.40 +#ifdef __x86_64__ 1.41 if ((msr_content & EFER_LME) ^ test_bit(SVM_CPU_STATE_LME_ENABLED, 1.42 &vc->arch.hvm_svm.cpu_state)) 1.43 { 1.44 @@ -337,6 +349,7 @@ static inline int long_mode_do_msr_write 1.45 if ((msr_content ^ vmcb->efer) & EFER_LME) 1.46 msr_content &= ~EFER_LME; 1.47 /* No update for LME/LMA since it have no effect */ 1.48 +#endif 1.49 vmcb->efer = msr_content | EFER_SVME; 1.50 break; 1.51 1.52 @@ -383,18 +396,6 @@ static inline int long_mode_do_msr_write 1.53 return 1; 1.54 } 1.55 1.56 -#else 1.57 -static inline int long_mode_do_msr_read(struct cpu_user_regs *regs) 1.58 -{ 1.59 - return 0; 1.60 -} 1.61 - 1.62 -static inline int long_mode_do_msr_write(struct cpu_user_regs *regs) 1.63 -{ 1.64 - return 0; 1.65 -} 1.66 -#endif 1.67 - 1.68 void svm_store_cpu_guest_ctrl_regs(struct vcpu *v, unsigned long crs[8]) 1.69 { 1.70 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.71 @@ -937,10 +938,8 @@ static void svm_vmexit_do_cpuid(struct v 1.72 1.73 if (input == 1) 1.74 { 1.75 -#ifndef __x86_64__ 1.76 if ( hvm_apic_support(v->domain) && 1.77 !vlapic_global_enabled((VLAPIC(v))) ) 1.78 -#endif 1.79 clear_bit(X86_FEATURE_APIC, &edx); 1.80 1.81 #if CONFIG_PAGING_LEVELS < 3