ia64/xen-unstable

changeset 11759:646a120334ef

[VMX] Remove vcpu->arch.hvm_vmx.cpu_state

The flags in hvm_vmx.cpu_state were shadows of shadows of bits in
guest CR0_READ_SHADOW, CR4_READ_SHADOW and EFER, and were not being
kept in sync with the other shadows if changes were made while
running under vmxassist. Just use the full shadows of those control
regs instead, and replace cpu_state with a single vmxassist flag.

Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <tim.deegan@xensource.com>
date Thu Oct 05 16:21:39 2006 +0100 (2006-10-05)
parents b3cba293e61a
children 79b56c02b836
files xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/vmx/vmcs.h xen/include/asm-x86/hvm/vmx/vmx.h
line diff
     1.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Thu Oct 05 16:05:12 2006 +0100
     1.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Thu Oct 05 16:21:39 2006 +0100
     1.3 @@ -226,21 +226,10 @@ static inline int long_mode_do_msr_read(
     1.4      case MSR_EFER:
     1.5          HVM_DBG_LOG(DBG_LEVEL_2, "EFER msr_content 0x%"PRIx64, msr_content);
     1.6          msr_content = msr->msr_items[VMX_INDEX_MSR_EFER];
     1.7 -
     1.8 -        /* the following code may be not needed */
     1.9 -        if ( test_bit(VMX_CPU_STATE_LME_ENABLED, &v->arch.hvm_vmx.cpu_state) )
    1.10 -            msr_content |= EFER_LME;
    1.11 -        else
    1.12 -            msr_content &= ~EFER_LME;
    1.13 -
    1.14 -        if ( VMX_LONG_GUEST(v) )
    1.15 -            msr_content |= EFER_LMA;
    1.16 -        else
    1.17 -            msr_content &= ~EFER_LMA;
    1.18          break;
    1.19  
    1.20      case MSR_FS_BASE:
    1.21 -        if ( !(VMX_LONG_GUEST(v)) )
    1.22 +        if ( !(vmx_long_mode_enabled(v)) )
    1.23              /* XXX should it be GP fault */
    1.24              domain_crash_synchronous();
    1.25  
    1.26 @@ -248,7 +237,7 @@ static inline int long_mode_do_msr_read(
    1.27          break;
    1.28  
    1.29      case MSR_GS_BASE:
    1.30 -        if ( !(VMX_LONG_GUEST(v)) )
    1.31 +        if ( !(vmx_long_mode_enabled(v)) )
    1.32              domain_crash_synchronous();
    1.33  
    1.34          __vmread(GUEST_GS_BASE, &msr_content);
    1.35 @@ -296,21 +285,25 @@ static inline int long_mode_do_msr_write
    1.36              return 0;
    1.37          }
    1.38  
    1.39 -        /* LME: 0 -> 1 */
    1.40 -        if ( msr_content & EFER_LME &&
    1.41 -             !test_bit(VMX_CPU_STATE_LME_ENABLED, &v->arch.hvm_vmx.cpu_state) )
    1.42 +        if ( (msr_content & EFER_LME)
    1.43 +             &&  !(msr->msr_items[VMX_INDEX_MSR_EFER] & EFER_LME) )
    1.44          {
    1.45 -            if ( vmx_paging_enabled(v) ||
    1.46 -                 !test_bit(VMX_CPU_STATE_PAE_ENABLED,
    1.47 -                           &v->arch.hvm_vmx.cpu_state) )
    1.48 +            if ( unlikely(vmx_paging_enabled(v)) )
    1.49              {
    1.50 -                printk("Trying to set LME bit when "
    1.51 -                       "in paging mode or PAE bit is not set\n");
    1.52 +                printk("Trying to set EFER.LME with paging enabled\n");
    1.53                  vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
    1.54                  return 0;
    1.55              }
    1.56 -
    1.57 -            set_bit(VMX_CPU_STATE_LME_ENABLED, &v->arch.hvm_vmx.cpu_state);
    1.58 +        }
    1.59 +        else if ( !(msr_content & EFER_LME)
    1.60 +                  && (msr->msr_items[VMX_INDEX_MSR_EFER] & EFER_LME) )
    1.61 +        {
    1.62 +            if ( unlikely(vmx_paging_enabled(v)) )
    1.63 +            {
    1.64 +                printk("Trying to clear EFER.LME with paging enabled\n");
    1.65 +                vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
    1.66 +                return 0;
    1.67 +            }
    1.68          }
    1.69  
    1.70          msr->msr_items[VMX_INDEX_MSR_EFER] = msr_content;
    1.71 @@ -318,7 +311,7 @@ static inline int long_mode_do_msr_write
    1.72  
    1.73      case MSR_FS_BASE:
    1.74      case MSR_GS_BASE:
    1.75 -        if ( !(VMX_LONG_GUEST(v)) )
    1.76 +        if ( !(vmx_long_mode_enabled(v)) )
    1.77              domain_crash_synchronous();
    1.78  
    1.79          if ( !IS_CANO_ADDRESS(msr_content) )
    1.80 @@ -336,7 +329,7 @@ static inline int long_mode_do_msr_write
    1.81          break;
    1.82  
    1.83      case MSR_SHADOW_GS_BASE:
    1.84 -        if ( !(VMX_LONG_GUEST(v)) )
    1.85 +        if ( !(vmx_long_mode_enabled(v)) )
    1.86              domain_crash_synchronous();
    1.87  
    1.88          v->arch.hvm_vmx.msr_content.shadow_gs = msr_content;
    1.89 @@ -1307,7 +1300,6 @@ static int vmx_world_restore(struct vcpu
    1.90  
    1.91   skip_cr3:
    1.92  
    1.93 -    shadow_update_paging_modes(v);
    1.94      if (!vmx_paging_enabled(v))
    1.95          HVM_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
    1.96      else
    1.97 @@ -1364,6 +1356,8 @@ static int vmx_world_restore(struct vcpu
    1.98      error |= __vmwrite(GUEST_LDTR_BASE, c->ldtr_base);
    1.99      error |= __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes.bytes);
   1.100  
   1.101 +    shadow_update_paging_modes(v);
   1.102 +
   1.103      return !error;
   1.104  }
   1.105  
   1.106 @@ -1408,6 +1402,7 @@ static int vmx_assist(struct vcpu *v, in
   1.107                  goto error;
   1.108              if (!vmx_world_restore(v, &c))
   1.109                  goto error;
   1.110 +            v->arch.hvm_vmx.vmxassist_enabled = 1;            
   1.111              return 1;
   1.112          }
   1.113          break;
   1.114 @@ -1425,6 +1420,7 @@ static int vmx_assist(struct vcpu *v, in
   1.115                  goto error;
   1.116              if (!vmx_world_restore(v, &c))
   1.117                  goto error;
   1.118 +            v->arch.hvm_vmx.vmxassist_enabled = 0;
   1.119              return 1;
   1.120          }
   1.121          break;
   1.122 @@ -1480,26 +1476,23 @@ static int vmx_set_cr0(unsigned long val
   1.123          }
   1.124  
   1.125  #if defined(__x86_64__)
   1.126 -        if ( test_bit(VMX_CPU_STATE_LME_ENABLED,
   1.127 -                      &v->arch.hvm_vmx.cpu_state) &&
   1.128 -             !test_bit(VMX_CPU_STATE_PAE_ENABLED,
   1.129 -                       &v->arch.hvm_vmx.cpu_state) )
   1.130 +        if ( vmx_lme_is_set(v) )
   1.131          {
   1.132 -            HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enabled\n");
   1.133 -            vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
   1.134 -        }
   1.135 -
   1.136 -        if ( test_bit(VMX_CPU_STATE_LME_ENABLED,
   1.137 -                     &v->arch.hvm_vmx.cpu_state) )
   1.138 -        {
   1.139 -            /* Here the PAE is should be opened */
   1.140 -            HVM_DBG_LOG(DBG_LEVEL_1, "Enable long mode\n");
   1.141 -            set_bit(VMX_CPU_STATE_LMA_ENABLED,
   1.142 -                    &v->arch.hvm_vmx.cpu_state);
   1.143 -
   1.144 -            __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
   1.145 -            vm_entry_value |= VM_ENTRY_IA32E_MODE;
   1.146 -            __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
   1.147 +            if ( !(v->arch.hvm_vmx.cpu_shadow_cr4 & X86_CR4_PAE) )
   1.148 +            {
   1.149 +                HVM_DBG_LOG(DBG_LEVEL_1, "Guest enabled paging "
   1.150 +                            "with EFER.LME set but not CR4.PAE\n");
   1.151 +                vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
   1.152 +            }
   1.153 +            else 
   1.154 +            {
   1.155 +                HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode\n");
   1.156 +                v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER]
   1.157 +                    |= EFER_LMA;
   1.158 +                __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
   1.159 +                vm_entry_value |= VM_ENTRY_IA32E_MODE;
   1.160 +                __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
   1.161 +            }
   1.162          }
   1.163  #endif
   1.164  
   1.165 @@ -1546,11 +1539,10 @@ static int vmx_set_cr0(unsigned long val
   1.166               * Disable paging here.
   1.167               * Same to PE == 1 && PG == 0
   1.168               */
   1.169 -            if ( test_bit(VMX_CPU_STATE_LMA_ENABLED,
   1.170 -                          &v->arch.hvm_vmx.cpu_state) )
   1.171 +            if ( vmx_long_mode_enabled(v) )
   1.172              {
   1.173 -                clear_bit(VMX_CPU_STATE_LMA_ENABLED,
   1.174 -                          &v->arch.hvm_vmx.cpu_state);
   1.175 +                v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER]
   1.176 +                    &= ~EFER_LMA;
   1.177                  __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
   1.178                  vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
   1.179                  __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
   1.180 @@ -1559,22 +1551,19 @@ static int vmx_set_cr0(unsigned long val
   1.181  
   1.182          if ( vmx_assist(v, VMX_ASSIST_INVOKE) )
   1.183          {
   1.184 -            set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &v->arch.hvm_vmx.cpu_state);
   1.185              __vmread(GUEST_RIP, &eip);
   1.186              HVM_DBG_LOG(DBG_LEVEL_1,
   1.187                          "Transfering control to vmxassist %%eip 0x%lx\n", eip);
   1.188              return 0; /* do not update eip! */
   1.189          }
   1.190 -    } else if ( test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
   1.191 -                         &v->arch.hvm_vmx.cpu_state) )
   1.192 +    }
   1.193 +    else if ( v->arch.hvm_vmx.vmxassist_enabled )
   1.194      {
   1.195          __vmread(GUEST_RIP, &eip);
   1.196          HVM_DBG_LOG(DBG_LEVEL_1,
   1.197                      "Enabling CR0.PE at %%eip 0x%lx\n", eip);
   1.198          if ( vmx_assist(v, VMX_ASSIST_RESTORE) )
   1.199          {
   1.200 -            clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
   1.201 -                      &v->arch.hvm_vmx.cpu_state);
   1.202              __vmread(GUEST_RIP, &eip);
   1.203              HVM_DBG_LOG(DBG_LEVEL_1,
   1.204                          "Restoring to %%eip 0x%lx\n", eip);
   1.205 @@ -1705,8 +1694,6 @@ static int mov_to_cr(int gp, int cr, str
   1.206  
   1.207          if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) )
   1.208          {
   1.209 -            set_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.hvm_vmx.cpu_state);
   1.210 -
   1.211              if ( vmx_pgbit_test(v) )
   1.212              {
   1.213                  /* The guest is a 32-bit PAE guest. */
   1.214 @@ -1745,14 +1732,14 @@ static int mov_to_cr(int gp, int cr, str
   1.215  #endif
   1.216              }
   1.217          }
   1.218 -        else if ( value & X86_CR4_PAE )
   1.219 -            set_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.hvm_vmx.cpu_state);
   1.220 -        else
   1.221 +        else if ( !(value & X86_CR4_PAE) )
   1.222          {
   1.223 -            if ( test_bit(VMX_CPU_STATE_LMA_ENABLED, &v->arch.hvm_vmx.cpu_state) )
   1.224 +            if ( unlikely(vmx_long_mode_enabled(v)) )
   1.225 +            {
   1.226 +                HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while "
   1.227 +                            "EFER.LMA is set\n");
   1.228                  vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
   1.229 -
   1.230 -            clear_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.hvm_vmx.cpu_state);
   1.231 +            }
   1.232          }
   1.233  
   1.234          __vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK);
     2.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h	Thu Oct 05 16:05:12 2006 +0100
     2.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h	Thu Oct 05 16:21:39 2006 +0100
     2.3 @@ -29,19 +29,6 @@ extern void vmcs_dump_vcpu(void);
     2.4  extern void vmx_init_vmcs_config(void);
     2.5  extern void setup_vmcs_dump(void);
     2.6  
     2.7 -enum {
     2.8 -    VMX_CPU_STATE_PAE_ENABLED=0,
     2.9 -    VMX_CPU_STATE_LME_ENABLED,
    2.10 -    VMX_CPU_STATE_LMA_ENABLED,
    2.11 -    VMX_CPU_STATE_ASSIST_ENABLED,
    2.12 -};
    2.13 -
    2.14 -#define VMX_LONG_GUEST(ed)    \
    2.15 -  (test_bit(VMX_CPU_STATE_LMA_ENABLED, &ed->arch.hvm_vmx.cpu_state))
    2.16 -
    2.17 -#define VMX_PAE_GUEST(ed)       \
    2.18 -  (test_bit(VMX_CPU_STATE_PAE_ENABLED, &ed->arch.hvm_vmx.cpu_state))
    2.19 -
    2.20  struct vmcs_struct {
    2.21      u32 vmcs_revision_id;
    2.22      unsigned char data [0]; /* vmcs size is read from MSR */
    2.23 @@ -93,10 +80,10 @@ struct arch_vmx_struct {
    2.24      unsigned long        cpu_shadow_cr4; /* copy of guest read shadow CR4 */
    2.25      unsigned long        cpu_cr2; /* save CR2 */
    2.26      unsigned long        cpu_cr3;
    2.27 -    unsigned long        cpu_state;
    2.28      unsigned long        cpu_based_exec_control;
    2.29      struct vmx_msr_state msr_content;
    2.30      void                *io_bitmap_a, *io_bitmap_b;
    2.31 +    unsigned long        vmxassist_enabled:1; 
    2.32  };
    2.33  
    2.34  #define vmx_schedule_tail(next)         \
     3.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Thu Oct 05 16:05:12 2006 +0100
     3.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Thu Oct 05 16:21:39 2006 +0100
     3.3 @@ -335,26 +335,30 @@ static inline int __vmxon (u64 addr)
     3.4      return rc;
     3.5  }
     3.6  
     3.7 -/* Works only for vcpu == current */
     3.8  static inline int vmx_paging_enabled(struct vcpu *v)
     3.9  {
    3.10      unsigned long cr0;
    3.11 -
    3.12      __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
    3.13 -    return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
    3.14 -}
    3.15 -
    3.16 -/* Works only for vcpu == current */
    3.17 -static inline int vmx_long_mode_enabled(struct vcpu *v)
    3.18 -{
    3.19 -    ASSERT(v == current);
    3.20 -    return VMX_LONG_GUEST(current);
    3.21 +    return ((cr0 & (X86_CR0_PE|X86_CR0_PG)) == (X86_CR0_PE|X86_CR0_PG));
    3.22  }
    3.23  
    3.24  static inline int vmx_pae_enabled(struct vcpu *v)
    3.25  {
    3.26 -    ASSERT(v == current);
    3.27 -    return VMX_PAE_GUEST(current);
    3.28 +    unsigned long cr4;
    3.29 +    __vmread_vcpu(v, CR4_READ_SHADOW, &cr4);
    3.30 +    return (vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE));
    3.31 +}
    3.32 +
    3.33 +static inline int vmx_long_mode_enabled(struct vcpu *v)
    3.34 +{
    3.35 +    u64 efer = v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER];
    3.36 +    return efer & EFER_LMA;
    3.37 +}
    3.38 +
    3.39 +static inline int vmx_lme_is_set(struct vcpu *v)
    3.40 +{
    3.41 +    u64 efer = v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER];
    3.42 +    return efer & EFER_LME;
    3.43  }
    3.44  
    3.45  /* Works only for vcpu == current */