ia64/xen-unstable

changeset 14275:647fcc8f0c83

[SVM] Cleanup guest writes to EFER
author Travis Betak <travis.betak@amd.com>
date Mon Mar 05 16:21:11 2007 -0600 (2007-03-05)
parents e4cffff8cfa7
children 8bae3387b86c
files xen/arch/x86/hvm/svm/svm.c
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/svm.c	Mon Mar 05 16:18:10 2007 -0600
     1.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Mon Mar 05 16:21:11 2007 -0600
     1.3 @@ -258,15 +258,23 @@ static inline int long_mode_do_msr_write
     1.4              goto gp_fault;
     1.5          }
     1.6  
     1.7 +        /* 
     1.8 +         * update the VMCB's EFER with the intended value along with
     1.9 +         * that crucial EFER.SVME bit =)
    1.10 +         */
    1.11 +        vmcb->efer = msr_content | EFER_SVME;
    1.12 +
    1.13  #ifdef __x86_64__
    1.14 -        /* LME: 0 -> 1 */
    1.15 -        if ( (msr_content & EFER_LME) 
    1.16 -             && !svm_lme_is_set(v) )
    1.17 +
    1.18 +        /*
    1.19 +         * Check for EFER.LME transitions from 0->1 or 1->0.  Do the
    1.20 +         * sanity checks and then make sure that both EFER.LME and
    1.21 +         * EFER.LMA are cleared.
    1.22 +         */
    1.23 +        if ( (msr_content & EFER_LME) && !svm_lme_is_set(v) )
    1.24          {
    1.25 -            /* 
    1.26 -             * setting EFER.LME is illegal if the guest currently has
    1.27 -             * enabled or CR4.PAE is not set
    1.28 -             */
    1.29 +            /* EFER.LME transition from 0 to 1 */
    1.30 +            
    1.31              if ( svm_paging_enabled(v) ||
    1.32                   !svm_cr4_pae_is_set(v) )
    1.33              {
    1.34 @@ -274,18 +282,26 @@ static inline int long_mode_do_msr_write
    1.35                           "in paging mode or PAE bit is not set\n");
    1.36                  goto gp_fault;
    1.37              }
    1.38 -            
    1.39 -            v->arch.hvm_svm.cpu_shadow_efer |= EFER_LME;
    1.40 +
    1.41 +            vmcb->efer &= ~(EFER_LME | EFER_LMA);
    1.42          }
    1.43 -
    1.44 -        /* We have already recorded that we want LME, so it will be set 
    1.45 -         * next time CR0 gets updated. So we clear that bit and continue.
    1.46 -         */
    1.47 -        if ((msr_content ^ vmcb->efer) & EFER_LME)
    1.48 -            msr_content &= ~EFER_LME;  
    1.49 -        /* No update for LME/LMA since it have no effect */
    1.50 -#endif
    1.51 -        vmcb->efer = msr_content | EFER_SVME;
    1.52 +        else if ( !(msr_content & EFER_LME) && svm_lme_is_set(v) )
    1.53 +        {
    1.54 +            /* EFER.LME transistion from 1 to 0 */
    1.55 +            
    1.56 +            if ( svm_paging_enabled(v) )
    1.57 +            {
    1.58 +                gdprintk(XENLOG_WARNING, 
    1.59 +                         "Trying to clear EFER.LME while paging enabled\n");
    1.60 +                goto gp_fault;
    1.61 +            }
    1.62 +
    1.63 +            vmcb->efer &= ~(EFER_LME | EFER_LMA);
    1.64 +        }
    1.65 +#endif /* __x86_64__ */
    1.66 +
    1.67 +        /* update the guest EFER's shadow with the intended value */
    1.68 +        v->arch.hvm_svm.cpu_shadow_efer = msr_content;
    1.69          break;
    1.70  
    1.71  #ifdef __x86_64__