ia64/xen-unstable

changeset 14718:bc43ac9631e3

hvm svm: Avoid intercepts when accessing a number of architectural MSRs.

1. Removes intercept of guest-specific MSRs
MSR_IA32_SYSENTER_{CS,ESP,EIP}, MSR_{L,C,}STAR, MSR_SYSCALL_MASK,
MSR_SHADOW_GS_BASE.

2. Removes code to deal with those intercepts, as well as the
FS/GS-base. Since long_mode_do_msr_read now became essentially a
switch of a single case + default, I removed the entire function and
inlined the remaining single case-statement into the original
MSR-access function.

Signed-off-by: Mats Petersson <mats.petersson@amd.com>
author kfraser@localhost.localdomain
date Wed Apr 04 11:57:28 2007 +0100 (2007-04-04)
parents c278b1c580db
children e74bf0303658
files xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/svm.c	Wed Apr 04 11:49:37 2007 +0100
     1.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Wed Apr 04 11:57:28 2007 +0100
     1.3 @@ -131,66 +131,6 @@ static void svm_store_cpu_guest_regs(
     1.4      }
     1.5  }
     1.6  
     1.7 -
     1.8 -static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
     1.9 -{
    1.10 -    u64 msr_content = 0;
    1.11 -    struct vcpu *v = current;
    1.12 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    1.13 -
    1.14 -    switch ((u32)regs->ecx)
    1.15 -    {
    1.16 -    case MSR_EFER:
    1.17 -        msr_content = v->arch.hvm_svm.cpu_shadow_efer;
    1.18 -        break;
    1.19 -
    1.20 -#ifdef __x86_64__
    1.21 -    case MSR_FS_BASE:
    1.22 -        msr_content = vmcb->fs.base;
    1.23 -        goto check_long_mode;
    1.24 -
    1.25 -    case MSR_GS_BASE:
    1.26 -        msr_content = vmcb->gs.base;
    1.27 -        goto check_long_mode;
    1.28 -
    1.29 -    case MSR_SHADOW_GS_BASE:
    1.30 -        msr_content = vmcb->kerngsbase;
    1.31 -    check_long_mode:
    1.32 -        if ( !svm_long_mode_enabled(v) )
    1.33 -        {
    1.34 -            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
    1.35 -            return 0;
    1.36 -        }
    1.37 -        break;
    1.38 -#endif
    1.39 -
    1.40 -    case MSR_STAR:
    1.41 -        msr_content = vmcb->star;
    1.42 -        break;
    1.43 - 
    1.44 -    case MSR_LSTAR:
    1.45 -        msr_content = vmcb->lstar;
    1.46 -        break;
    1.47 - 
    1.48 -    case MSR_CSTAR:
    1.49 -        msr_content = vmcb->cstar;
    1.50 -        break;
    1.51 - 
    1.52 -    case MSR_SYSCALL_MASK:
    1.53 -        msr_content = vmcb->sfmask;
    1.54 -        break;
    1.55 -    default:
    1.56 -        return 0;
    1.57 -    }
    1.58 -
    1.59 -    HVM_DBG_LOG(DBG_LEVEL_2, "msr_content: %"PRIx64"\n",
    1.60 -                msr_content);
    1.61 -
    1.62 -    regs->eax = (u32)(msr_content >>  0);
    1.63 -    regs->edx = (u32)(msr_content >> 32);
    1.64 -    return 1;
    1.65 -}
    1.66 -
    1.67  static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
    1.68  {
    1.69      u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
    1.70 @@ -242,52 +182,12 @@ static inline int long_mode_do_msr_write
    1.71  
    1.72          break;
    1.73  
    1.74 -#ifdef __x86_64__
    1.75 -    case MSR_FS_BASE:
    1.76 -    case MSR_GS_BASE:
    1.77 -    case MSR_SHADOW_GS_BASE:
    1.78 -        if ( !svm_long_mode_enabled(v) )
    1.79 -            goto gp_fault;
    1.80 -
    1.81 -        if ( !is_canonical_address(msr_content) )
    1.82 -            goto uncanonical_address;
    1.83 -
    1.84 -        if ( ecx == MSR_FS_BASE )
    1.85 -            vmcb->fs.base = msr_content;
    1.86 -        else if ( ecx == MSR_GS_BASE )
    1.87 -            vmcb->gs.base = msr_content;
    1.88 -        else
    1.89 -            vmcb->kerngsbase = msr_content;
    1.90 -        break;
    1.91 -#endif
    1.92 - 
    1.93 -    case MSR_STAR:
    1.94 -        vmcb->star = msr_content;
    1.95 -        break;
    1.96 - 
    1.97 -    case MSR_LSTAR:
    1.98 -    case MSR_CSTAR:
    1.99 -        if ( !is_canonical_address(msr_content) )
   1.100 -            goto uncanonical_address;
   1.101 -
   1.102 -        if ( ecx == MSR_LSTAR )
   1.103 -            vmcb->lstar = msr_content;
   1.104 -        else
   1.105 -            vmcb->cstar = msr_content;
   1.106 -        break;
   1.107 - 
   1.108 -    case MSR_SYSCALL_MASK:
   1.109 -        vmcb->sfmask = msr_content;
   1.110 -        break;
   1.111 -
   1.112      default:
   1.113          return 0;
   1.114      }
   1.115  
   1.116      return 1;
   1.117  
   1.118 - uncanonical_address:
   1.119 -    HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write %x\n", ecx);
   1.120   gp_fault:
   1.121      svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   1.122      return 0;
   1.123 @@ -2013,22 +1913,14 @@ static inline void svm_do_msr_access(
   1.124          case MSR_IA32_TIME_STAMP_COUNTER:
   1.125              msr_content = hvm_get_guest_time(v);
   1.126              break;
   1.127 -        case MSR_IA32_SYSENTER_CS:
   1.128 -            msr_content = vmcb->sysenter_cs;
   1.129 -            break;
   1.130 -        case MSR_IA32_SYSENTER_ESP: 
   1.131 -            msr_content = vmcb->sysenter_esp;
   1.132 -            break;
   1.133 -        case MSR_IA32_SYSENTER_EIP:     
   1.134 -            msr_content = vmcb->sysenter_eip;
   1.135 -            break;
   1.136          case MSR_IA32_APICBASE:
   1.137              msr_content = vcpu_vlapic(v)->hw.apic_base_msr;
   1.138              break;
   1.139 +        case MSR_EFER:
   1.140 +            msr_content = v->arch.hvm_svm.cpu_shadow_efer;
   1.141 +            break;
   1.142 +
   1.143          default:
   1.144 -            if (long_mode_do_msr_read(regs))
   1.145 -                goto done;
   1.146 -
   1.147              if ( rdmsr_hypervisor_regs(ecx, &eax, &edx) ||
   1.148                   rdmsr_safe(ecx, eax, edx) == 0 )
   1.149              {
   1.150 @@ -2061,15 +1953,6 @@ static inline void svm_do_msr_access(
   1.151              hvm_set_guest_time(v, msr_content);
   1.152              pt_reset(v);
   1.153              break;
   1.154 -        case MSR_IA32_SYSENTER_CS:
   1.155 -            vmcb->sysenter_cs = msr_content;
   1.156 -            break;
   1.157 -        case MSR_IA32_SYSENTER_ESP: 
   1.158 -            vmcb->sysenter_esp = msr_content;
   1.159 -            break;
   1.160 -        case MSR_IA32_SYSENTER_EIP:     
   1.161 -            vmcb->sysenter_eip = msr_content;
   1.162 -            break;
   1.163          case MSR_IA32_APICBASE:
   1.164              vlapic_msr_set(vcpu_vlapic(v), msr_content);
   1.165              break;
     2.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Wed Apr 04 11:49:37 2007 +0100
     2.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Wed Apr 04 11:57:28 2007 +0100
     2.3 @@ -141,6 +141,14 @@ static int construct_vmcb(struct vcpu *v
     2.4  
     2.5      disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_FS_BASE);
     2.6      disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_GS_BASE);
     2.7 +    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_SHADOW_GS_BASE);
     2.8 +    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_CSTAR);
     2.9 +    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_LSTAR);
    2.10 +    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_STAR);
    2.11 +    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_SYSCALL_MASK);
    2.12 +    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_IA32_SYSENTER_CS);
    2.13 +    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_IA32_SYSENTER_ESP);
    2.14 +    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_IA32_SYSENTER_EIP);
    2.15  
    2.16      vmcb->msrpm_base_pa = (u64)virt_to_maddr(arch_svm->msrpm);
    2.17      vmcb->iopm_base_pa  = (u64)virt_to_maddr(hvm_io_bitmap);