ia64/xen-unstable

changeset 16148:765600a13e4a

vmx: last branch recording MSR emulation

This required adding infrastructure to make use of VMX' MSR save/
restore feature as well as making the MSR intercept bitmap per-VM.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Wed Oct 17 15:19:05 2007 +0100 (2007-10-17)
parents ca2984b17fcf
children 16f5672879c8
files xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/vmx/vmcs.h xen/include/asm-x86/msr-index.h
line diff
     1.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Wed Oct 17 14:38:19 2007 +0100
     1.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Wed Oct 17 15:19:05 2007 +0100
     1.3 @@ -413,9 +413,35 @@ static void vmx_set_host_env(struct vcpu
     1.4                (unsigned long)&get_cpu_info()->guest_cpu_user_regs.error_code);
     1.5  }
     1.6  
     1.7 +void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr)
     1.8 +{
     1.9 +    char *msr_bitmap = v->arch.hvm_vmx.msr_bitmap;
    1.10 +
    1.11 +    /* VMX MSR bitmap supported? */
    1.12 +    if ( msr_bitmap == NULL )
    1.13 +        return;
    1.14 +
    1.15 +    /*
    1.16 +     * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
    1.17 +     * have the write-low and read-high bitmap offsets the wrong way round.
    1.18 +     * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
    1.19 +     */
    1.20 +    if ( msr <= 0x1fff )
    1.21 +    {
    1.22 +        __clear_bit(msr, msr_bitmap + 0x000); /* read-low */
    1.23 +        __clear_bit(msr, msr_bitmap + 0x800); /* write-low */
    1.24 +    }
    1.25 +    else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
    1.26 +    {
    1.27 +        msr &= 0x1fff;
    1.28 +        __clear_bit(msr, msr_bitmap + 0x400); /* read-high */
    1.29 +        __clear_bit(msr, msr_bitmap + 0xc00); /* write-high */
    1.30 +    }
    1.31 +}
    1.32 +
    1.33  #define GUEST_SEGMENT_LIMIT     0xffffffff
    1.34  
    1.35 -static void construct_vmcs(struct vcpu *v)
    1.36 +static int construct_vmcs(struct vcpu *v)
    1.37  {
    1.38      union vmcs_arbytes arbytes;
    1.39  
    1.40 @@ -430,8 +456,24 @@ static void construct_vmcs(struct vcpu *
    1.41      if ( vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS )
    1.42          __vmwrite(SECONDARY_VM_EXEC_CONTROL, vmx_secondary_exec_control);
    1.43  
    1.44 +    /* MSR access bitmap. */
    1.45      if ( cpu_has_vmx_msr_bitmap )
    1.46 -        __vmwrite(MSR_BITMAP, virt_to_maddr(vmx_msr_bitmap));
    1.47 +    {
    1.48 +        char *msr_bitmap = alloc_xenheap_page();
    1.49 +
    1.50 +        if ( msr_bitmap == NULL )
    1.51 +            return -ENOMEM;
    1.52 +
    1.53 +        memset(msr_bitmap, ~0, PAGE_SIZE);
    1.54 +        v->arch.hvm_vmx.msr_bitmap = msr_bitmap;
    1.55 +        __vmwrite(MSR_BITMAP, virt_to_maddr(msr_bitmap));
    1.56 +
    1.57 +        vmx_disable_intercept_for_msr(v, MSR_FS_BASE);
    1.58 +        vmx_disable_intercept_for_msr(v, MSR_GS_BASE);
    1.59 +        vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_CS);
    1.60 +        vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_ESP);
    1.61 +        vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP);
    1.62 +    }
    1.63  
    1.64      /* I/O access bitmap. */
    1.65      __vmwrite(IO_BITMAP_A, virt_to_maddr(hvm_io_bitmap));
    1.66 @@ -463,10 +505,8 @@ static void construct_vmcs(struct vcpu *
    1.67      __vmwrite(HOST_RIP, (unsigned long)vmx_asm_vmexit_handler);
    1.68  
    1.69      /* MSR intercepts. */
    1.70 -    __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
    1.71 -    __vmwrite(VM_EXIT_MSR_STORE_ADDR, 0);
    1.72 +    __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
    1.73      __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
    1.74 -    __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
    1.75      __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
    1.76  
    1.77      __vmwrite(VM_ENTRY_INTR_INFO, 0);
    1.78 @@ -565,11 +605,108 @@ static void construct_vmcs(struct vcpu *
    1.79      paging_update_paging_modes(v); /* will update HOST & GUEST_CR3 as reqd */
    1.80  
    1.81      vmx_vlapic_msr_changed(v);
    1.82 +
    1.83 +    return 0;
    1.84 +}
    1.85 +
    1.86 +int vmx_read_guest_msr(struct vcpu *v, u32 msr, u64 *val)
    1.87 +{
    1.88 +    unsigned int i, msr_count = v->arch.hvm_vmx.msr_count;
    1.89 +    const struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.msr_area;
    1.90 +
    1.91 +    for ( i = 0; i < msr_count; i++ )
    1.92 +    {
    1.93 +        if ( msr_area[i].index == msr )
    1.94 +        {
    1.95 +            *val = msr_area[i].data;
    1.96 +            return 0;
    1.97 +        }
    1.98 +    }
    1.99 +
   1.100 +    return -ESRCH;
   1.101 +}
   1.102 +
   1.103 +int vmx_write_guest_msr(struct vcpu *v, u32 msr, u64 val)
   1.104 +{
   1.105 +    unsigned int i, msr_count = v->arch.hvm_vmx.msr_count;
   1.106 +    struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.msr_area;
   1.107 +
   1.108 +    for ( i = 0; i < msr_count; i++ )
   1.109 +    {
   1.110 +        if ( msr_area[i].index == msr )
   1.111 +        {
   1.112 +            msr_area[i].data = val;
   1.113 +            return 0;
   1.114 +        }
   1.115 +    }
   1.116 +
   1.117 +    return -ESRCH;
   1.118 +}
   1.119 +
   1.120 +int vmx_add_guest_msr(struct vcpu *v, u32 msr)
   1.121 +{
   1.122 +    unsigned int i, msr_count = v->arch.hvm_vmx.msr_count;
   1.123 +    struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.msr_area;
   1.124 +
   1.125 +    for ( i = 0; i < msr_count; i++ )
   1.126 +        if ( msr_area[i].index == msr )
   1.127 +            return 0;
   1.128 +
   1.129 +    if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
   1.130 +        return -ENOSPC;
   1.131 +
   1.132 +    if ( msr_area == NULL )
   1.133 +    {
   1.134 +        if ( (msr_area = alloc_xenheap_page()) == NULL )
   1.135 +            return -ENOMEM;
   1.136 +        v->arch.hvm_vmx.msr_area = msr_area;
   1.137 +        __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(msr_area));
   1.138 +        __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
   1.139 +    }
   1.140 +
   1.141 +    msr_area[msr_count].index = msr;
   1.142 +    msr_area[msr_count].mbz   = 0;
   1.143 +    msr_area[msr_count].data  = 0;
   1.144 +    v->arch.hvm_vmx.msr_count = ++msr_count;
   1.145 +    __vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count);
   1.146 +    __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count);
   1.147 +
   1.148 +    return 0;
   1.149 +}
   1.150 +
   1.151 +int vmx_add_host_load_msr(struct vcpu *v, u32 msr)
   1.152 +{
   1.153 +    unsigned int i, msr_count = v->arch.hvm_vmx.host_msr_count;
   1.154 +    struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.host_msr_area;
   1.155 +
   1.156 +    for ( i = 0; i < msr_count; i++ )
   1.157 +        if ( msr_area[i].index == msr )
   1.158 +            return 0;
   1.159 +
   1.160 +    if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
   1.161 +        return -ENOSPC;
   1.162 +
   1.163 +    if ( msr_area == NULL )
   1.164 +    {
   1.165 +        if ( (msr_area = alloc_xenheap_page()) == NULL )
   1.166 +            return -ENOMEM;
   1.167 +        v->arch.hvm_vmx.host_msr_area = msr_area;
   1.168 +        __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
   1.169 +    }
   1.170 +
   1.171 +    msr_area[msr_count].index = msr;
   1.172 +    msr_area[msr_count].mbz   = 0;
   1.173 +    rdmsrl(msr, msr_area[msr_count].data);
   1.174 +    v->arch.hvm_vmx.host_msr_count = ++msr_count;
   1.175 +    __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count);
   1.176 +
   1.177 +    return 0;
   1.178  }
   1.179  
   1.180  int vmx_create_vmcs(struct vcpu *v)
   1.181  {
   1.182      struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
   1.183 +    int rc;
   1.184  
   1.185      if ( arch_vmx->vmcs == NULL )
   1.186      {
   1.187 @@ -582,7 +719,12 @@ int vmx_create_vmcs(struct vcpu *v)
   1.188          arch_vmx->launched   = 0;
   1.189      }
   1.190  
   1.191 -    construct_vmcs(v);
   1.192 +    if ( (rc = construct_vmcs(v)) != 0 )
   1.193 +    {
   1.194 +        vmx_free_vmcs(arch_vmx->vmcs);
   1.195 +        arch_vmx->vmcs = NULL;
   1.196 +        return rc;
   1.197 +    }
   1.198  
   1.199      return 0;
   1.200  }
     2.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Wed Oct 17 14:38:19 2007 +0100
     2.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Wed Oct 17 15:19:05 2007 +0100
     2.3 @@ -53,8 +53,6 @@
     2.4  
     2.5  enum handler_return { HNDL_done, HNDL_unhandled, HNDL_exception_raised };
     2.6  
     2.7 -char *vmx_msr_bitmap;
     2.8 -
     2.9  static void vmx_ctxt_switch_from(struct vcpu *v);
    2.10  static void vmx_ctxt_switch_to(struct vcpu *v);
    2.11  
    2.12 @@ -1106,26 +1104,6 @@ static int vmx_event_pending(struct vcpu
    2.13      return (__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK);
    2.14  }
    2.15  
    2.16 -static void disable_intercept_for_msr(u32 msr)
    2.17 -{
    2.18 -    /*
    2.19 -     * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
    2.20 -     * have the write-low and read-high bitmap offsets the wrong way round.
    2.21 -     * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
    2.22 -     */
    2.23 -    if ( msr <= 0x1fff )
    2.24 -    {
    2.25 -        __clear_bit(msr, vmx_msr_bitmap + 0x000); /* read-low */
    2.26 -        __clear_bit(msr, vmx_msr_bitmap + 0x800); /* write-low */
    2.27 -    }
    2.28 -    else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
    2.29 -    {
    2.30 -        msr &= 0x1fff;
    2.31 -        __clear_bit(msr, vmx_msr_bitmap + 0x400); /* read-high */
    2.32 -        __clear_bit(msr, vmx_msr_bitmap + 0xc00); /* write-high */
    2.33 -    }
    2.34 -}
    2.35 -
    2.36  static struct hvm_function_table vmx_function_table = {
    2.37      .name                 = "VMX",
    2.38      .domain_initialise    = vmx_domain_initialise,
    2.39 @@ -1190,21 +1168,6 @@ void start_vmx(void)
    2.40      setup_vmcs_dump();
    2.41  
    2.42      hvm_enable(&vmx_function_table);
    2.43 -
    2.44 -    if ( cpu_has_vmx_msr_bitmap )
    2.45 -    {
    2.46 -        printk("VMX: MSR intercept bitmap enabled\n");
    2.47 -        vmx_msr_bitmap = alloc_xenheap_page();
    2.48 -        BUG_ON(vmx_msr_bitmap == NULL);
    2.49 -        memset(vmx_msr_bitmap, ~0, PAGE_SIZE);
    2.50 -
    2.51 -        disable_intercept_for_msr(MSR_FS_BASE);
    2.52 -        disable_intercept_for_msr(MSR_GS_BASE);
    2.53 -
    2.54 -        disable_intercept_for_msr(MSR_IA32_SYSENTER_CS);
    2.55 -        disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP);
    2.56 -        disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP);
    2.57 -    }
    2.58  }
    2.59  
    2.60  /*
    2.61 @@ -1302,10 +1265,12 @@ static void vmx_do_cpuid(struct cpu_user
    2.62                   bitmaskof(X86_FEATURE_EST)  |
    2.63                   bitmaskof(X86_FEATURE_TM2)  |
    2.64                   bitmaskof(X86_FEATURE_CID)  |
    2.65 -                 bitmaskof(X86_FEATURE_PDCM));
    2.66 +                 bitmaskof(X86_FEATURE_PDCM) |
    2.67 +                 bitmaskof(X86_FEATURE_DSCPL));
    2.68          edx &= ~(bitmaskof(X86_FEATURE_HT)   |
    2.69                   bitmaskof(X86_FEATURE_ACPI) |
    2.70 -                 bitmaskof(X86_FEATURE_ACC));
    2.71 +                 bitmaskof(X86_FEATURE_ACC)  |
    2.72 +                 bitmaskof(X86_FEATURE_DS));
    2.73          break;
    2.74  
    2.75      case 0x00000004:
    2.76 @@ -2239,6 +2204,82 @@ static int vmx_cr_access(unsigned long e
    2.77      return 1;
    2.78  }
    2.79  
    2.80 +static const struct lbr_info {
    2.81 +    u32 base, count;
    2.82 +} p4_lbr[] = {
    2.83 +    { MSR_P4_LER_FROM_LIP,          1 },
    2.84 +    { MSR_P4_LER_TO_LIP,            1 },
    2.85 +    { MSR_P4_LASTBRANCH_TOS,        1 },
    2.86 +    { MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO },
    2.87 +    { MSR_P4_LASTBRANCH_0_TO_LIP,   NUM_MSR_P4_LASTBRANCH_FROM_TO },
    2.88 +    { 0, 0 }
    2.89 +}, c2_lbr[] = {
    2.90 +    { MSR_IA32_LASTINTFROMIP,       1 },
    2.91 +    { MSR_IA32_LASTINTTOIP,         1 },
    2.92 +    { MSR_C2_LASTBRANCH_TOS,        1 },
    2.93 +    { MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_C2_LASTBRANCH_FROM_TO },
    2.94 +    { MSR_C2_LASTBRANCH_0_TO_IP,    NUM_MSR_C2_LASTBRANCH_FROM_TO },
    2.95 +    { 0, 0 }
    2.96 +#ifdef __i386__
    2.97 +}, pm_lbr[] = {
    2.98 +    { MSR_IA32_LASTINTFROMIP,       1 },
    2.99 +    { MSR_IA32_LASTINTTOIP,         1 },
   2.100 +    { MSR_PM_LASTBRANCH_TOS,        1 },
   2.101 +    { MSR_PM_LASTBRANCH_0,          NUM_MSR_PM_LASTBRANCH },
   2.102 +    { 0, 0 }
   2.103 +#endif
   2.104 +};
   2.105 +
   2.106 +static const struct lbr_info *last_branch_msr_get(void)
   2.107 +{
   2.108 +    switch ( boot_cpu_data.x86 )
   2.109 +    {
   2.110 +    case 6:
   2.111 +        switch ( boot_cpu_data.x86_model )
   2.112 +        {
   2.113 +#ifdef __i386__
   2.114 +        /* PentiumM */
   2.115 +        case 9: case 13:
   2.116 +        /* Core Solo/Duo */
   2.117 +        case 14:
   2.118 +            return pm_lbr;
   2.119 +            break;
   2.120 +#endif
   2.121 +        /* Core2 Duo */
   2.122 +        case 15:
   2.123 +            return c2_lbr;
   2.124 +            break;
   2.125 +        }
   2.126 +        break;
   2.127 +
   2.128 +    case 15:
   2.129 +        switch ( boot_cpu_data.x86_model )
   2.130 +        {
   2.131 +        /* Pentium4/Xeon with em64t */
   2.132 +        case 3: case 4: case 6:
   2.133 +            return p4_lbr;
   2.134 +            break;
   2.135 +        }
   2.136 +        break;
   2.137 +    }
   2.138 +
   2.139 +    return NULL;
   2.140 +}
   2.141 +
   2.142 +static int is_last_branch_msr(u32 ecx)
   2.143 +{
   2.144 +    const struct lbr_info *lbr = last_branch_msr_get();
   2.145 +
   2.146 +    if ( lbr == NULL )
   2.147 +        return 0;
   2.148 +
   2.149 +    for ( ; lbr->count; lbr++ )
   2.150 +        if ( (ecx >= lbr->base) && (ecx < (lbr->base + lbr->count)) )
   2.151 +            return 1;
   2.152 +
   2.153 +    return 0;
   2.154 +}
   2.155 +
   2.156  static int vmx_do_msr_read(struct cpu_user_regs *regs)
   2.157  {
   2.158      u64 msr_content = 0;
   2.159 @@ -2264,6 +2305,10 @@ static int vmx_do_msr_read(struct cpu_us
   2.160      case MSR_IA32_APICBASE:
   2.161          msr_content = vcpu_vlapic(v)->hw.apic_base_msr;
   2.162          break;
   2.163 +    case MSR_IA32_DEBUGCTLMSR:
   2.164 +        if ( vmx_read_guest_msr(v, ecx, &msr_content) != 0 )
   2.165 +            msr_content = 0;
   2.166 +        break;
   2.167      case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_PROCBASED_CTLS2:
   2.168          goto gp_fault;
   2.169      case MSR_IA32_MCG_CAP:
   2.170 @@ -2288,6 +2333,15 @@ static int vmx_do_msr_read(struct cpu_us
   2.171                  goto done;
   2.172          }
   2.173  
   2.174 +        if ( vmx_read_guest_msr(v, ecx, &msr_content) == 0 )
   2.175 +            break;
   2.176 +
   2.177 +        if ( is_last_branch_msr(ecx) )
   2.178 +        {
   2.179 +            msr_content = 0;
   2.180 +            break;
   2.181 +        }
   2.182 +
   2.183          if ( rdmsr_hypervisor_regs(ecx, &eax, &edx) ||
   2.184               rdmsr_safe(ecx, eax, edx) == 0 )
   2.185          {
   2.186 @@ -2405,13 +2459,42 @@ static int vmx_do_msr_write(struct cpu_u
   2.187      case MSR_IA32_APICBASE:
   2.188          vlapic_msr_set(vcpu_vlapic(v), msr_content);
   2.189          break;
   2.190 +    case MSR_IA32_DEBUGCTLMSR: {
   2.191 +        int i, rc = 0;
   2.192 +
   2.193 +        if ( !msr_content || (msr_content & ~3) )
   2.194 +            break;
   2.195 +
   2.196 +        if ( msr_content & 1 )
   2.197 +        {
   2.198 +            const struct lbr_info *lbr = last_branch_msr_get();
   2.199 +            if ( lbr == NULL )
   2.200 +                break;
   2.201 +
   2.202 +            for ( ; (rc == 0) && lbr->count; lbr++ )
   2.203 +                for ( i = 0; (rc == 0) && (i < lbr->count); i++ )
   2.204 +                    if ( (rc = vmx_add_guest_msr(v, lbr->base + i)) == 0 )
   2.205 +                        vmx_disable_intercept_for_msr(v, lbr->base + i);
   2.206 +        }
   2.207 +
   2.208 +        if ( (rc < 0) ||
   2.209 +             (vmx_add_guest_msr(v, ecx) < 0) ||
   2.210 +             (vmx_add_host_load_msr(v, ecx) < 0) )
   2.211 +            vmx_inject_hw_exception(v, TRAP_machine_check, 0);
   2.212 +        else
   2.213 +            vmx_write_guest_msr(v, ecx, msr_content);
   2.214 +
   2.215 +        break;
   2.216 +    }
   2.217      case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_PROCBASED_CTLS2:
   2.218          goto gp_fault;
   2.219      default:
   2.220          switch ( long_mode_do_msr_write(regs) )
   2.221          {
   2.222              case HNDL_unhandled:
   2.223 -                wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx);
   2.224 +                if ( (vmx_write_guest_msr(v, ecx, msr_content) != 0) &&
   2.225 +                     !is_last_branch_msr(ecx) )
   2.226 +                    wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx);
   2.227                  break;
   2.228              case HNDL_exception_raised:
   2.229                  return 0;
     3.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h	Wed Oct 17 14:38:19 2007 +0100
     3.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h	Wed Oct 17 15:19:05 2007 +0100
     3.3 @@ -35,6 +35,12 @@ struct vmcs_struct {
     3.4      unsigned char data [0]; /* vmcs size is read from MSR */
     3.5  };
     3.6  
     3.7 +struct vmx_msr_entry {
     3.8 +    u32 index;
     3.9 +    u32 mbz;
    3.10 +    u64 data;
    3.11 +};
    3.12 +
    3.13  enum {
    3.14      VMX_INDEX_MSR_LSTAR = 0,
    3.15      VMX_INDEX_MSR_STAR,
    3.16 @@ -73,6 +79,12 @@ struct arch_vmx_struct {
    3.17      unsigned long        cstar;
    3.18  #endif
    3.19  
    3.20 +    char                *msr_bitmap;
    3.21 +    unsigned int         msr_count;
    3.22 +    struct vmx_msr_entry *msr_area;
    3.23 +    unsigned int         host_msr_count;
    3.24 +    struct vmx_msr_entry *host_msr_area;
    3.25 +
    3.26      /* Following fields are all specific to vmxassist. */
    3.27      unsigned long        vmxassist_enabled:1;
    3.28      unsigned long        irqbase_mode:1;
    3.29 @@ -131,7 +143,6 @@ extern bool_t cpu_has_vmx_ins_outs_instr
    3.30      (vmx_pin_based_exec_control & PIN_BASED_VIRTUAL_NMIS)
    3.31  #define cpu_has_vmx_msr_bitmap \
    3.32      (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_MSR_BITMAP)
    3.33 -extern char *vmx_msr_bitmap;
    3.34  
    3.35  /* GUEST_INTERRUPTIBILITY_INFO flags. */
    3.36  #define VMX_INTR_SHADOW_STI             0x00000001
    3.37 @@ -268,6 +279,12 @@ enum vmcs_field {
    3.38      HOST_RIP                        = 0x00006c16,
    3.39  };
    3.40  
    3.41 +void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr);
    3.42 +int vmx_read_guest_msr(struct vcpu *v, u32 msr, u64 *val);
    3.43 +int vmx_write_guest_msr(struct vcpu *v, u32 msr, u64 val);
    3.44 +int vmx_add_guest_msr(struct vcpu *v, u32 msr);
    3.45 +int vmx_add_host_load_msr(struct vcpu *v, u32 msr);
    3.46 +
    3.47  #endif /* ASM_X86_HVM_VMX_VMCS_H__ */
    3.48  
    3.49  /*
     4.1 --- a/xen/include/asm-x86/msr-index.h	Wed Oct 17 14:38:19 2007 +0100
     4.2 +++ b/xen/include/asm-x86/msr-index.h	Wed Oct 17 15:19:05 2007 +0100
     4.3 @@ -323,6 +323,27 @@
     4.4  #define MSR_P4_U2L_ESCR0		0x000003b0
     4.5  #define MSR_P4_U2L_ESCR1		0x000003b1
     4.6  
     4.7 +/* Netburst (P4) last-branch recording */
     4.8 +#define MSR_P4_LER_FROM_LIP 		0x000001d7
     4.9 +#define MSR_P4_LER_TO_LIP 		0x000001d8
    4.10 +#define MSR_P4_LASTBRANCH_TOS		0x000001da
    4.11 +#define MSR_P4_LASTBRANCH_0		0x000001db
    4.12 +#define NUM_MSR_P4_LASTBRANCH		4
    4.13 +#define MSR_P4_LASTBRANCH_0_FROM_LIP	0x00000680
    4.14 +#define MSR_P4_LASTBRANCH_0_TO_LIP	0x000006c0
    4.15 +#define NUM_MSR_P4_LASTBRANCH_FROM_TO	16
    4.16 +
    4.17 +/* Pentium M (and Core) last-branch recording */
    4.18 +#define MSR_PM_LASTBRANCH_TOS		0x000001c9
    4.19 +#define MSR_PM_LASTBRANCH_0		0x00000040
    4.20 +#define NUM_MSR_PM_LASTBRANCH		8
    4.21 +
    4.22 +/* Core 2 last-branch recording */
    4.23 +#define MSR_C2_LASTBRANCH_TOS		0x000001c9
    4.24 +#define MSR_C2_LASTBRANCH_0_FROM_IP	0x00000040
    4.25 +#define MSR_C2_LASTBRANCH_0_TO_IP	0x00000060
    4.26 +#define NUM_MSR_C2_LASTBRANCH_FROM_TO	4
    4.27 +
    4.28  /* Intel Core-based CPU performance counters */
    4.29  #define MSR_CORE_PERF_FIXED_CTR0	0x00000309
    4.30  #define MSR_CORE_PERF_FIXED_CTR1	0x0000030a