ia64/xen-unstable

changeset 14273:437774e49735

[SVM] Remove SVM cpu_state and add EFER shadow to svm structure.

The SVM cpu state was being used inconsistently and, besides, it makes
more sense to instead rely on the guest's shadow registers to determine
the current guest state.

Signed-off-by: Travis Betak <travis.betak@amd.com>
author Travis Betak <travis.betak@amd.com>
date Mon Mar 05 16:17:25 2007 -0600 (2007-03-05)
parents e85f685fb99f
children e4cffff8cfa7
files xen/arch/x86/hvm/svm/svm.c xen/include/asm-x86/hvm/svm/vmcb.h
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/svm.c	Mon Mar 05 22:42:13 2007 +0000
     1.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Mon Mar 05 16:17:25 2007 -0600
     1.3 @@ -148,30 +148,34 @@ static void svm_store_cpu_guest_regs(
     1.4      }
     1.5  }
     1.6  
     1.7 +static int svm_lme_is_set(struct vcpu *v)
     1.8 +{
     1.9 +    u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
    1.10 +    return guest_efer & EFER_LME;
    1.11 +}
    1.12 +
    1.13 +static int svm_cr4_pae_is_set(struct vcpu *v)
    1.14 +{
    1.15 +    unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
    1.16 +    return guest_cr4 & X86_CR4_PAE;
    1.17 +}
    1.18 +
    1.19  static int svm_paging_enabled(struct vcpu *v)
    1.20  {
    1.21 -    unsigned long cr0;
    1.22 -
    1.23 -    cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
    1.24 -
    1.25 -    return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
    1.26 +    unsigned long guest_cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
    1.27 +    return (guest_cr0 & X86_CR0_PE) && (guest_cr0 & X86_CR0_PG);
    1.28  }
    1.29  
    1.30  static int svm_pae_enabled(struct vcpu *v)
    1.31  {
    1.32 -    unsigned long cr4;
    1.33 -
    1.34 -    if(!svm_paging_enabled(v))
    1.35 -        return 0;
    1.36 -
    1.37 -    cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
    1.38 -
    1.39 -    return (cr4 & X86_CR4_PAE);
    1.40 +    unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
    1.41 +    return svm_paging_enabled(v) && (guest_cr4 & X86_CR4_PAE);
    1.42  }
    1.43  
    1.44  static int svm_long_mode_enabled(struct vcpu *v)
    1.45  {
    1.46 -    return test_bit(SVM_CPU_STATE_LMA_ENABLED, &v->arch.hvm_svm.cpu_state);
    1.47 +    u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
    1.48 +    return guest_efer & EFER_LMA;
    1.49  }
    1.50  
    1.51  static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
    1.52 @@ -257,18 +261,22 @@ static inline int long_mode_do_msr_write
    1.53  
    1.54  #ifdef __x86_64__
    1.55          /* LME: 0 -> 1 */
    1.56 -        if ( msr_content & EFER_LME &&
    1.57 -             !test_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state))
    1.58 +        if ( (msr_content & EFER_LME) 
    1.59 +             && !svm_lme_is_set(v) )
    1.60          {
    1.61 +            /* 
    1.62 +             * setting EFER.LME is illegal if the guest currently has
    1.63 +             * enabled or CR4.PAE is not set
    1.64 +             */
    1.65              if ( svm_paging_enabled(v) ||
    1.66 -                 !test_bit(SVM_CPU_STATE_PAE_ENABLED,
    1.67 -                           &v->arch.hvm_svm.cpu_state) )
    1.68 +                 !svm_cr4_pae_is_set(v) )
    1.69              {
    1.70                  gdprintk(XENLOG_WARNING, "Trying to set LME bit when "
    1.71                           "in paging mode or PAE bit is not set\n");
    1.72                  goto gp_fault;
    1.73              }
    1.74 -            set_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state);
    1.75 +            
    1.76 +            v->arch.hvm_svm.cpu_shadow_efer |= EFER_LME;
    1.77          }
    1.78  
    1.79          /* We have already recorded that we want LME, so it will be set 
    1.80 @@ -468,22 +476,25 @@ int svm_vmcb_restore(struct vcpu *v, str
    1.81              c->cr4);
    1.82  #endif
    1.83  
    1.84 -    if (!svm_paging_enabled(v)) {
    1.85 +    if ( !svm_paging_enabled(v) ) 
    1.86 +    {
    1.87          printk("%s: paging not enabled.", __func__);
    1.88          goto skip_cr3;
    1.89      }
    1.90  
    1.91 -    if (c->cr3 == v->arch.hvm_svm.cpu_cr3) {
    1.92 +    if ( c->cr3 == v->arch.hvm_svm.cpu_cr3 ) 
    1.93 +    {
    1.94          /*
    1.95           * This is simple TLB flush, implying the guest has
    1.96           * removed some translation or changed page attributes.
    1.97           * We simply invalidate the shadow.
    1.98           */
    1.99          mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
   1.100 -        if (mfn != pagetable_get_pfn(v->arch.guest_table)) {
   1.101 +        if ( mfn != pagetable_get_pfn(v->arch.guest_table) ) 
   1.102              goto bad_cr3;
   1.103 -        }
   1.104 -    } else {
   1.105 +    } 
   1.106 +    else 
   1.107 +    {
   1.108          /*
   1.109           * If different, make a shadow. Check if the PDBR is valid
   1.110           * first.
   1.111 @@ -491,9 +502,9 @@ int svm_vmcb_restore(struct vcpu *v, str
   1.112          HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %"PRIx64"", c->cr3);
   1.113          /* current!=vcpu as not called by arch_vmx_do_launch */
   1.114          mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
   1.115 -        if( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain)) {
   1.116 +        if( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) ) 
   1.117              goto bad_cr3;
   1.118 -        }
   1.119 +
   1.120          old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   1.121          v->arch.guest_table = pagetable_from_pfn(mfn);
   1.122          if (old_base_mfn)
   1.123 @@ -1727,7 +1738,8 @@ static int svm_set_cr0(unsigned long val
   1.124  
   1.125      HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value);
   1.126  
   1.127 -    if ((value & X86_CR0_PE) && (value & X86_CR0_PG) && !paging_enabled) 
   1.128 +    if ( ((value & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
   1.129 +         && !paging_enabled ) 
   1.130      {
   1.131          /* The guest CR3 must be pointing to the guest physical. */
   1.132          mfn = get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT);
   1.133 @@ -1740,18 +1752,16 @@ static int svm_set_cr0(unsigned long val
   1.134          }
   1.135  
   1.136  #if defined(__x86_64__)
   1.137 -        if (test_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state) 
   1.138 -            && !test_bit(SVM_CPU_STATE_PAE_ENABLED, 
   1.139 -                         &v->arch.hvm_svm.cpu_state))
   1.140 +        if ( svm_lme_is_set(v) && !svm_cr4_pae_is_set(v) )
   1.141          {
   1.142              HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable\n");
   1.143              svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   1.144          }
   1.145  
   1.146 -        if (test_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state))
   1.147 +        if ( svm_lme_is_set(v) )
   1.148          {
   1.149              HVM_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode\n");
   1.150 -            set_bit(SVM_CPU_STATE_LMA_ENABLED, &v->arch.hvm_svm.cpu_state);
   1.151 +            v->arch.hvm_svm.cpu_shadow_efer |= EFER_LMA;
   1.152              vmcb->efer |= EFER_LMA | EFER_LME;
   1.153          }
   1.154  #endif  /* __x86_64__ */
   1.155 @@ -1791,7 +1801,7 @@ static int svm_set_cr0(unsigned long val
   1.156          if ( svm_long_mode_enabled(v) )
   1.157          {
   1.158              vmcb->efer &= ~EFER_LMA;
   1.159 -            clear_bit(SVM_CPU_STATE_LMA_ENABLED, &v->arch.hvm_svm.cpu_state);
   1.160 +            v->arch.hvm_svm.cpu_shadow_efer &= ~EFER_LMA;
   1.161          }
   1.162          /* we should take care of this kind of situation */
   1.163          paging_update_paging_modes(v);
   1.164 @@ -1933,7 +1943,6 @@ static int mov_to_cr(int gpreg, int cr, 
   1.165          old_cr = v->arch.hvm_svm.cpu_shadow_cr4;
   1.166          if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) )
   1.167          {
   1.168 -            set_bit(SVM_CPU_STATE_PAE_ENABLED, &v->arch.hvm_svm.cpu_state);
   1.169              if ( svm_pgbit_test(v) )
   1.170              {
   1.171                  /* The guest is a 32-bit PAE guest. */
   1.172 @@ -1962,15 +1971,13 @@ static int mov_to_cr(int gpreg, int cr, 
   1.173                              v->arch.hvm_svm.cpu_cr3, mfn);
   1.174  #endif
   1.175              }
   1.176 -        }
   1.177 -        else if (value & X86_CR4_PAE) {
   1.178 -            set_bit(SVM_CPU_STATE_PAE_ENABLED, &v->arch.hvm_svm.cpu_state);
   1.179 -        } else {
   1.180 -            if (test_bit(SVM_CPU_STATE_LMA_ENABLED,
   1.181 -                         &v->arch.hvm_svm.cpu_state)) {
   1.182 +        } 
   1.183 +        else if ( !(value & X86_CR4_PAE) )
   1.184 +        {
   1.185 +            if ( svm_long_mode_enabled(v) )
   1.186 +            {
   1.187                  svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   1.188              }
   1.189 -            clear_bit(SVM_CPU_STATE_PAE_ENABLED, &v->arch.hvm_svm.cpu_state);
   1.190          }
   1.191  
   1.192          v->arch.hvm_svm.cpu_shadow_cr4 = value;
   1.193 @@ -2369,7 +2376,6 @@ static int svm_do_vmmcall_reset_to_realm
   1.194  
   1.195      vmcb->cr4 = SVM_CR4_HOST_MASK;
   1.196      v->arch.hvm_svm.cpu_shadow_cr4 = 0;
   1.197 -    clear_bit(SVM_CPU_STATE_PAE_ENABLED, &v->arch.hvm_svm.cpu_state);
   1.198  
   1.199      /* This will jump to ROMBIOS */
   1.200      vmcb->rip = 0xFFF0;
     2.1 --- a/xen/include/asm-x86/hvm/svm/vmcb.h	Mon Mar 05 22:42:13 2007 +0000
     2.2 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h	Mon Mar 05 16:17:25 2007 -0600
     2.3 @@ -303,14 +303,6 @@ enum VMEXIT_EXITCODE
     2.4      VMEXIT_INVALID          =  -1
     2.5  };
     2.6  
     2.7 -enum {
     2.8 -    SVM_CPU_STATE_PG_ENABLED=0,
     2.9 -    SVM_CPU_STATE_PAE_ENABLED,
    2.10 -    SVM_CPU_STATE_LME_ENABLED,      
    2.11 -    SVM_CPU_STATE_LMA_ENABLED,
    2.12 -    SVM_CPU_STATE_ASSIST_ENABLED,
    2.13 -};  
    2.14 -
    2.15  /* Definitions of segment state are borrowed by the generic HVM code. */
    2.16  typedef segment_attributes_t svm_segment_attributes_t;
    2.17  typedef segment_register_t svm_segment_register_t;
    2.18 @@ -457,12 +449,12 @@ struct arch_svm_struct {
    2.19      int                 saved_irq_vector;
    2.20      u32                 launch_core;
    2.21      
    2.22 -    unsigned long       flags;      /* VMCB flags */
    2.23 -    unsigned long       cpu_shadow_cr0; /* Guest value for CR0 */
    2.24 -    unsigned long       cpu_shadow_cr4; /* Guest value for CR4 */
    2.25 +    unsigned long       flags;            /* VMCB flags */
    2.26 +    unsigned long       cpu_shadow_cr0;   /* Guest value for CR0 */
    2.27 +    unsigned long       cpu_shadow_cr4;   /* Guest value for CR4 */
    2.28 +    unsigned long       cpu_shadow_efer;  /* Guest value for EFER */
    2.29      unsigned long       cpu_cr2;
    2.30      unsigned long       cpu_cr3;
    2.31 -    unsigned long       cpu_state;
    2.32  };
    2.33  
    2.34  struct vmcb_struct *alloc_vmcb(void);