ia64/xen-unstable

changeset 9329:5b56d1e1ac8b

Fix VMX EFER write logic. Also some cleanups.

Signed-off-by: Xin Li <xin.b.li@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Sun Mar 19 12:48:51 2006 +0100 (2006-03-19)
parents e3c7b1e97459
children f513384a18ee
files xen/arch/x86/hvm/vmx/vmx.c
line diff
     1.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Sun Mar 19 12:41:28 2006 +0100
     1.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Sun Mar 19 12:48:51 2006 +0100
     1.3 @@ -166,113 +166,139 @@ static void vmx_save_init_msrs(void)
     1.4  #define IS_CANO_ADDRESS(add) 1
     1.5  static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
     1.6  {
     1.7 -    u64     msr_content = 0;
     1.8 -    struct vcpu *vc = current;
     1.9 -    struct vmx_msr_state * msr = &vc->arch.hvm_vmx.msr_content;
    1.10 -    switch(regs->ecx){
    1.11 +    u64 msr_content = 0;
    1.12 +    struct vcpu *v = current;
    1.13 +    struct vmx_msr_state *msr = &v->arch.hvm_vmx.msr_content;
    1.14 +
    1.15 +    switch ( regs->ecx ) {
    1.16      case MSR_EFER:
    1.17 +        HVM_DBG_LOG(DBG_LEVEL_2, "EFER msr_content 0x%"PRIx64, msr_content);
    1.18          msr_content = msr->msr_items[VMX_INDEX_MSR_EFER];
    1.19 -        HVM_DBG_LOG(DBG_LEVEL_2, "EFER msr_content %"PRIx64"\n", msr_content);
    1.20 -        if (test_bit(VMX_CPU_STATE_LME_ENABLED,
    1.21 -                     &vc->arch.hvm_vmx.cpu_state))
    1.22 -            msr_content |= 1 << _EFER_LME;
    1.23  
    1.24 -        if (VMX_LONG_GUEST(vc))
    1.25 -            msr_content |= 1 << _EFER_LMA;
    1.26 +        /* the following code may be not needed */
    1.27 +        if ( test_bit(VMX_CPU_STATE_LME_ENABLED, &v->arch.hvm_vmx.cpu_state) )
    1.28 +            msr_content |= EFER_LME;
    1.29 +        else
    1.30 +            msr_content &= ~EFER_LME;
    1.31 +
    1.32 +        if ( VMX_LONG_GUEST(v) )
    1.33 +            msr_content |= EFER_LMA;
    1.34 +        else
    1.35 +            msr_content &= ~EFER_LMA;
    1.36          break;
    1.37 +
    1.38      case MSR_FS_BASE:
    1.39 -        if (!(VMX_LONG_GUEST(vc)))
    1.40 +        if ( !(VMX_LONG_GUEST(v)) )
    1.41              /* XXX should it be GP fault */
    1.42              domain_crash_synchronous();
    1.43 +
    1.44          __vmread(GUEST_FS_BASE, &msr_content);
    1.45          break;
    1.46 +
    1.47      case MSR_GS_BASE:
    1.48 -        if (!(VMX_LONG_GUEST(vc)))
    1.49 +        if ( !(VMX_LONG_GUEST(v)) )
    1.50              domain_crash_synchronous();
    1.51 +
    1.52          __vmread(GUEST_GS_BASE, &msr_content);
    1.53          break;
    1.54 +
    1.55      case MSR_SHADOW_GS_BASE:
    1.56          msr_content = msr->shadow_gs;
    1.57          break;
    1.58  
    1.59 -        CASE_READ_MSR(STAR);
    1.60 -        CASE_READ_MSR(LSTAR);
    1.61 -        CASE_READ_MSR(CSTAR);
    1.62 -        CASE_READ_MSR(SYSCALL_MASK);
    1.63 +    CASE_READ_MSR(STAR);
    1.64 +    CASE_READ_MSR(LSTAR);
    1.65 +    CASE_READ_MSR(CSTAR);
    1.66 +    CASE_READ_MSR(SYSCALL_MASK);
    1.67 +
    1.68      default:
    1.69          return 0;
    1.70      }
    1.71 -    HVM_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %"PRIx64"\n",
    1.72 -                msr_content);
    1.73 +
    1.74 +    HVM_DBG_LOG(DBG_LEVEL_2, "msr_content: 0x%"PRIx64, msr_content);
    1.75 +
    1.76      regs->eax = msr_content & 0xffffffff;
    1.77      regs->edx = msr_content >> 32;
    1.78 +
    1.79      return 1;
    1.80  }
    1.81  
    1.82  static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
    1.83  {
    1.84 -    u64     msr_content = regs->eax | ((u64)regs->edx << 32);
    1.85 -    struct vcpu *vc = current;
    1.86 -    struct vmx_msr_state * msr = &vc->arch.hvm_vmx.msr_content;
    1.87 -    struct vmx_msr_state * host_state =
    1.88 -        &percpu_msr[smp_processor_id()];
    1.89 +    u64 msr_content = regs->eax | ((u64)regs->edx << 32);
    1.90 +    struct vcpu *v = current;
    1.91 +    struct vmx_msr_state *msr = &v->arch.hvm_vmx.msr_content;
    1.92 +    struct vmx_msr_state *host_state = &percpu_msr[smp_processor_id()];
    1.93  
    1.94 -    HVM_DBG_LOG(DBG_LEVEL_1, " mode_do_msr_write msr %lx "
    1.95 -                "msr_content %"PRIx64"\n",
    1.96 +    HVM_DBG_LOG(DBG_LEVEL_1, "msr 0x%lx msr_content 0x%"PRIx64"\n",
    1.97                  (unsigned long)regs->ecx, msr_content);
    1.98  
    1.99 -    switch (regs->ecx){
   1.100 +    switch ( regs->ecx ) {
   1.101      case MSR_EFER:
   1.102          /* offending reserved bit will cause #GP */
   1.103 -        if ( msr_content &
   1.104 -                ~( EFER_LME | EFER_LMA | EFER_NX | EFER_SCE ) )
   1.105 -             vmx_inject_exception(vc, TRAP_gp_fault, 0);
   1.106 +        if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) )
   1.107 +        {
   1.108 +            printk("trying to set reserved bit in EFER\n");
   1.109 +            vmx_inject_exception(v, TRAP_gp_fault, 0);
   1.110 +            return 0;
   1.111 +        }
   1.112  
   1.113 -        if ((msr_content & EFER_LME) ^
   1.114 -            test_bit(VMX_CPU_STATE_LME_ENABLED,
   1.115 -                     &vc->arch.hvm_vmx.cpu_state)){
   1.116 -            if ( vmx_paging_enabled(vc) ||
   1.117 +        /* LME: 0 -> 1 */
   1.118 +        if ( msr_content & EFER_LME &&
   1.119 +             !test_bit(VMX_CPU_STATE_LME_ENABLED, &v->arch.hvm_vmx.cpu_state) )
   1.120 +        {
   1.121 +            if ( vmx_paging_enabled(v) ||
   1.122                   !test_bit(VMX_CPU_STATE_PAE_ENABLED,
   1.123 -                           &vc->arch.hvm_vmx.cpu_state)) {
   1.124 -                vmx_inject_exception(vc, TRAP_gp_fault, 0);
   1.125 +                           &v->arch.hvm_vmx.cpu_state) )
   1.126 +            {
   1.127 +                printk("trying to set LME bit when "
   1.128 +                       "in paging mode or PAE bit is not set\n");
   1.129 +                vmx_inject_exception(v, TRAP_gp_fault, 0);
   1.130 +                return 0;
   1.131              }
   1.132 +
   1.133 +            set_bit(VMX_CPU_STATE_LME_ENABLED, &v->arch.hvm_vmx.cpu_state);
   1.134          }
   1.135 -        if (msr_content & EFER_LME)
   1.136 -            set_bit(VMX_CPU_STATE_LME_ENABLED,
   1.137 -                    &vc->arch.hvm_vmx.cpu_state);
   1.138  
   1.139 -        msr->msr_items[VMX_INDEX_MSR_EFER] =
   1.140 -            msr_content;
   1.141 +        msr->msr_items[VMX_INDEX_MSR_EFER] = msr_content;
   1.142          break;
   1.143  
   1.144      case MSR_FS_BASE:
   1.145      case MSR_GS_BASE:
   1.146 -        if (!(VMX_LONG_GUEST(vc)))
   1.147 +        if ( !(VMX_LONG_GUEST(v)) )
   1.148              domain_crash_synchronous();
   1.149 -        if (!IS_CANO_ADDRESS(msr_content)){
   1.150 +
   1.151 +        if ( !IS_CANO_ADDRESS(msr_content) )
   1.152 +        {
   1.153              HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
   1.154 -            vmx_inject_exception(vc, TRAP_gp_fault, 0);
   1.155 +            vmx_inject_exception(v, TRAP_gp_fault, 0);
   1.156 +            return 0;
   1.157          }
   1.158 -        if (regs->ecx == MSR_FS_BASE)
   1.159 +
   1.160 +        if ( regs->ecx == MSR_FS_BASE )
   1.161              __vmwrite(GUEST_FS_BASE, msr_content);
   1.162          else
   1.163              __vmwrite(GUEST_GS_BASE, msr_content);
   1.164 +
   1.165          break;
   1.166  
   1.167      case MSR_SHADOW_GS_BASE:
   1.168 -        if (!(VMX_LONG_GUEST(vc)))
   1.169 +        if ( !(VMX_LONG_GUEST(v)) )
   1.170              domain_crash_synchronous();
   1.171 -        vc->arch.hvm_vmx.msr_content.shadow_gs = msr_content;
   1.172 +
   1.173 +        v->arch.hvm_vmx.msr_content.shadow_gs = msr_content;
   1.174          wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
   1.175          break;
   1.176  
   1.177 -        CASE_WRITE_MSR(STAR);
   1.178 -        CASE_WRITE_MSR(LSTAR);
   1.179 -        CASE_WRITE_MSR(CSTAR);
   1.180 -        CASE_WRITE_MSR(SYSCALL_MASK);
   1.181 +    CASE_WRITE_MSR(STAR);
   1.182 +    CASE_WRITE_MSR(LSTAR);
   1.183 +    CASE_WRITE_MSR(CSTAR);
   1.184 +    CASE_WRITE_MSR(SYSCALL_MASK);
   1.185 +
   1.186      default:
   1.187          return 0;
   1.188      }
   1.189 +
   1.190      return 1;
   1.191  }
   1.192