ia64/xen-unstable

changeset 17893:3da148fb7d9b

vmx: Clean up and fix guest MSR load/save handling:
1. msr_bitmap/msr_area/msr_host_area must be freed when a vcpu is
destroyed
2. vmx_create_vmcs()/vmx_destroy_vmcs() are only ever called once,
and can hence be simplified slightly
3. Change vmx_*_msr() interfaces to make it crystal clear that they
operate only on current (hence safe against vmwrite() and also
against concurrency races).
4. Change vmx_add_*_msr() implementation to make it crystal clear
that msr_area is not dereferenced before it is allocated.

Only (1) is a bug fix. (2)-(4) are for code clarity.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 19 11:09:10 2008 +0100 (2008-06-19)
parents b55f6d42668d
children d3a87899985d
files xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/hvm/vmx/vpmu_core2.c xen/include/asm-x86/hvm/vmx/vmcs.h
line diff
     1.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Wed Jun 18 14:17:10 2008 +0100
     1.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Thu Jun 19 11:09:10 2008 +0100
     1.3 @@ -677,10 +677,11 @@ static int construct_vmcs(struct vcpu *v
     1.4      return 0;
     1.5  }
     1.6  
     1.7 -int vmx_read_guest_msr(struct vcpu *v, u32 msr, u64 *val)
     1.8 +int vmx_read_guest_msr(u32 msr, u64 *val)
     1.9  {
    1.10 -    unsigned int i, msr_count = v->arch.hvm_vmx.msr_count;
    1.11 -    const struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.msr_area;
    1.12 +    struct vcpu *curr = current;
    1.13 +    unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
    1.14 +    const struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
    1.15  
    1.16      for ( i = 0; i < msr_count; i++ )
    1.17      {
    1.18 @@ -694,10 +695,11 @@ int vmx_read_guest_msr(struct vcpu *v, u
    1.19      return -ESRCH;
    1.20  }
    1.21  
    1.22 -int vmx_write_guest_msr(struct vcpu *v, u32 msr, u64 val)
    1.23 +int vmx_write_guest_msr(u32 msr, u64 val)
    1.24  {
    1.25 -    unsigned int i, msr_count = v->arch.hvm_vmx.msr_count;
    1.26 -    struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.msr_area;
    1.27 +    struct vcpu *curr = current;
    1.28 +    unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
    1.29 +    struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
    1.30  
    1.31      for ( i = 0; i < msr_count; i++ )
    1.32      {
    1.33 @@ -711,10 +713,20 @@ int vmx_write_guest_msr(struct vcpu *v, 
    1.34      return -ESRCH;
    1.35  }
    1.36  
    1.37 -int vmx_add_guest_msr(struct vcpu *v, u32 msr)
    1.38 +int vmx_add_guest_msr(u32 msr)
    1.39  {
    1.40 -    unsigned int i, msr_count = v->arch.hvm_vmx.msr_count;
    1.41 -    struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.msr_area;
    1.42 +    struct vcpu *curr = current;
    1.43 +    unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
    1.44 +    struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
    1.45 +
    1.46 +    if ( msr_area == NULL )
    1.47 +    {
    1.48 +        if ( (msr_area = alloc_xenheap_page()) == NULL )
    1.49 +            return -ENOMEM;
    1.50 +        curr->arch.hvm_vmx.msr_area = msr_area;
    1.51 +        __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(msr_area));
    1.52 +        __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
    1.53 +    }
    1.54  
    1.55      for ( i = 0; i < msr_count; i++ )
    1.56          if ( msr_area[i].index == msr )
    1.57 @@ -723,29 +735,29 @@ int vmx_add_guest_msr(struct vcpu *v, u3
    1.58      if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
    1.59          return -ENOSPC;
    1.60  
    1.61 -    if ( msr_area == NULL )
    1.62 -    {
    1.63 -        if ( (msr_area = alloc_xenheap_page()) == NULL )
    1.64 -            return -ENOMEM;
    1.65 -        v->arch.hvm_vmx.msr_area = msr_area;
    1.66 -        __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(msr_area));
    1.67 -        __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
    1.68 -    }
    1.69 -
    1.70      msr_area[msr_count].index = msr;
    1.71      msr_area[msr_count].mbz   = 0;
    1.72      msr_area[msr_count].data  = 0;
    1.73 -    v->arch.hvm_vmx.msr_count = ++msr_count;
    1.74 +    curr->arch.hvm_vmx.msr_count = ++msr_count;
    1.75      __vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count);
    1.76      __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count);
    1.77  
    1.78      return 0;
    1.79  }
    1.80  
    1.81 -int vmx_add_host_load_msr(struct vcpu *v, u32 msr)
    1.82 +int vmx_add_host_load_msr(u32 msr)
    1.83  {
    1.84 -    unsigned int i, msr_count = v->arch.hvm_vmx.host_msr_count;
    1.85 -    struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.host_msr_area;
    1.86 +    struct vcpu *curr = current;
    1.87 +    unsigned int i, msr_count = curr->arch.hvm_vmx.host_msr_count;
    1.88 +    struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.host_msr_area;
    1.89 +
    1.90 +    if ( msr_area == NULL )
    1.91 +    {
    1.92 +        if ( (msr_area = alloc_xenheap_page()) == NULL )
    1.93 +            return -ENOMEM;
    1.94 +        curr->arch.hvm_vmx.host_msr_area = msr_area;
    1.95 +        __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
    1.96 +    }
    1.97  
    1.98      for ( i = 0; i < msr_count; i++ )
    1.99          if ( msr_area[i].index == msr )
   1.100 @@ -754,18 +766,10 @@ int vmx_add_host_load_msr(struct vcpu *v
   1.101      if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
   1.102          return -ENOSPC;
   1.103  
   1.104 -    if ( msr_area == NULL )
   1.105 -    {
   1.106 -        if ( (msr_area = alloc_xenheap_page()) == NULL )
   1.107 -            return -ENOMEM;
   1.108 -        v->arch.hvm_vmx.host_msr_area = msr_area;
   1.109 -        __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
   1.110 -    }
   1.111 -
   1.112      msr_area[msr_count].index = msr;
   1.113      msr_area[msr_count].mbz   = 0;
   1.114      rdmsrl(msr, msr_area[msr_count].data);
   1.115 -    v->arch.hvm_vmx.host_msr_count = ++msr_count;
   1.116 +    curr->arch.hvm_vmx.host_msr_count = ++msr_count;
   1.117      __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count);
   1.118  
   1.119      return 0;
   1.120 @@ -776,21 +780,17 @@ int vmx_create_vmcs(struct vcpu *v)
   1.121      struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
   1.122      int rc;
   1.123  
   1.124 -    if ( arch_vmx->vmcs == NULL )
   1.125 -    {
   1.126 -        if ( (arch_vmx->vmcs = vmx_alloc_vmcs()) == NULL )
   1.127 -            return -ENOMEM;
   1.128 +    if ( (arch_vmx->vmcs = vmx_alloc_vmcs()) == NULL )
   1.129 +        return -ENOMEM;
   1.130  
   1.131 -        INIT_LIST_HEAD(&arch_vmx->active_list);
   1.132 -        __vmpclear(virt_to_maddr(arch_vmx->vmcs));
   1.133 -        arch_vmx->active_cpu = -1;
   1.134 -        arch_vmx->launched   = 0;
   1.135 -    }
   1.136 +    INIT_LIST_HEAD(&arch_vmx->active_list);
   1.137 +    __vmpclear(virt_to_maddr(arch_vmx->vmcs));
   1.138 +    arch_vmx->active_cpu = -1;
   1.139 +    arch_vmx->launched   = 0;
   1.140  
   1.141      if ( (rc = construct_vmcs(v)) != 0 )
   1.142      {
   1.143          vmx_free_vmcs(arch_vmx->vmcs);
   1.144 -        arch_vmx->vmcs = NULL;
   1.145          return rc;
   1.146      }
   1.147  
   1.148 @@ -801,13 +801,13 @@ void vmx_destroy_vmcs(struct vcpu *v)
   1.149  {
   1.150      struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
   1.151  
   1.152 -    if ( arch_vmx->vmcs == NULL )
   1.153 -        return;
   1.154 -
   1.155      vmx_clear_vmcs(v);
   1.156  
   1.157      vmx_free_vmcs(arch_vmx->vmcs);
   1.158 -    arch_vmx->vmcs = NULL;
   1.159 +
   1.160 +    free_xenheap_page(v->arch.hvm_vmx.host_msr_area);
   1.161 +    free_xenheap_page(v->arch.hvm_vmx.msr_area);
   1.162 +    free_xenheap_page(v->arch.hvm_vmx.msr_bitmap);
   1.163  }
   1.164  
   1.165  void vm_launch_fail(void)
     2.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Wed Jun 18 14:17:10 2008 +0100
     2.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Thu Jun 19 11:09:10 2008 +0100
     2.3 @@ -1655,7 +1655,7 @@ static int vmx_msr_read_intercept(struct
     2.4                  goto done;
     2.5          }
     2.6  
     2.7 -        if ( vmx_read_guest_msr(v, ecx, &msr_content) == 0 )
     2.8 +        if ( vmx_read_guest_msr(ecx, &msr_content) == 0 )
     2.9              break;
    2.10  
    2.11          if ( is_last_branch_msr(ecx) )
    2.12 @@ -1817,12 +1817,12 @@ static int vmx_msr_write_intercept(struc
    2.13  
    2.14              for ( ; (rc == 0) && lbr->count; lbr++ )
    2.15                  for ( i = 0; (rc == 0) && (i < lbr->count); i++ )
    2.16 -                    if ( (rc = vmx_add_guest_msr(v, lbr->base + i)) == 0 )
    2.17 +                    if ( (rc = vmx_add_guest_msr(lbr->base + i)) == 0 )
    2.18                          vmx_disable_intercept_for_msr(v, lbr->base + i);
    2.19          }
    2.20  
    2.21          if ( (rc < 0) ||
    2.22 -             (vmx_add_host_load_msr(v, ecx) < 0) )
    2.23 +             (vmx_add_host_load_msr(ecx) < 0) )
    2.24              vmx_inject_hw_exception(v, TRAP_machine_check, 0);
    2.25          else
    2.26          {
    2.27 @@ -1842,7 +1842,7 @@ static int vmx_msr_write_intercept(struc
    2.28          switch ( long_mode_do_msr_write(regs) )
    2.29          {
    2.30              case HNDL_unhandled:
    2.31 -                if ( (vmx_write_guest_msr(v, ecx, msr_content) != 0) &&
    2.32 +                if ( (vmx_write_guest_msr(ecx, msr_content) != 0) &&
    2.33                       !is_last_branch_msr(ecx) )
    2.34                      wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx);
    2.35                  break;
     3.1 --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c	Wed Jun 18 14:17:10 2008 +0100
     3.2 +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c	Thu Jun 19 11:09:10 2008 +0100
     3.3 @@ -219,12 +219,12 @@ static int core2_vpmu_alloc_resource(str
     3.4          return 0;
     3.5  
     3.6      wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
     3.7 -    if ( vmx_add_host_load_msr(v, MSR_CORE_PERF_GLOBAL_CTRL) )
     3.8 +    if ( vmx_add_host_load_msr(MSR_CORE_PERF_GLOBAL_CTRL) )
     3.9          return 0;
    3.10  
    3.11 -    if ( vmx_add_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL) )
    3.12 +    if ( vmx_add_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL) )
    3.13          return 0;
    3.14 -    vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, -1ULL);
    3.15 +    vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, -1ULL);
    3.16  
    3.17      pmu_enable = xmalloc_bytes(sizeof(struct core2_pmu_enable) +
    3.18                   (core2_get_pmc_count()-1)*sizeof(char));
    3.19 @@ -347,7 +347,7 @@ static int core2_vpmu_do_wrmsr(struct cp
    3.20          break;
    3.21      case MSR_CORE_PERF_FIXED_CTR_CTRL:
    3.22          non_global_ctrl = msr_content;
    3.23 -        vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
    3.24 +        vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
    3.25          global_ctrl >>= 32;
    3.26          for ( i = 0; i < 3; i++ )
    3.27          {
    3.28 @@ -359,7 +359,7 @@ static int core2_vpmu_do_wrmsr(struct cp
    3.29          break;
    3.30      default:
    3.31          tmp = ecx - MSR_P6_EVNTSEL0;
    3.32 -        vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
    3.33 +        vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
    3.34          if ( tmp >= 0 && tmp < core2_get_pmc_count() )
    3.35              core2_vpmu_cxt->pmu_enable->arch_pmc_enable[tmp] =
    3.36                  (global_ctrl >> tmp) & (msr_content >> 22) & 1;
    3.37 @@ -385,7 +385,7 @@ static int core2_vpmu_do_wrmsr(struct cp
    3.38      if ( type != MSR_TYPE_GLOBAL )
    3.39          wrmsrl(ecx, msr_content);
    3.40      else
    3.41 -        vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
    3.42 +        vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
    3.43  
    3.44      return 1;
    3.45  }
    3.46 @@ -410,7 +410,7 @@ static int core2_vpmu_do_rdmsr(struct cp
    3.47          msr_content = core2_vpmu_cxt->global_ovf_status;
    3.48          break;
    3.49      case MSR_CORE_PERF_GLOBAL_CTRL:
    3.50 -        vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, &msr_content);
    3.51 +        vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &msr_content);
    3.52          break;
    3.53      default:
    3.54          rdmsrl(regs->ecx, msr_content);
     4.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h	Wed Jun 18 14:17:10 2008 +0100
     4.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h	Thu Jun 19 11:09:10 2008 +0100
     4.3 @@ -333,10 +333,10 @@ enum vmcs_field {
     4.4  #define VMCS_VPID_WIDTH 16
     4.5  
     4.6  void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr);
     4.7 -int vmx_read_guest_msr(struct vcpu *v, u32 msr, u64 *val);
     4.8 -int vmx_write_guest_msr(struct vcpu *v, u32 msr, u64 val);
     4.9 -int vmx_add_guest_msr(struct vcpu *v, u32 msr);
    4.10 -int vmx_add_host_load_msr(struct vcpu *v, u32 msr);
    4.11 +int vmx_read_guest_msr(u32 msr, u64 *val);
    4.12 +int vmx_write_guest_msr(u32 msr, u64 val);
    4.13 +int vmx_add_guest_msr(u32 msr);
    4.14 +int vmx_add_host_load_msr(u32 msr);
    4.15  
    4.16  #endif /* ASM_X86_HVM_VMX_VMCS_H__ */
    4.17