ia64/xen-unstable

changeset 16099:4746c8c9372f

svm: last branch recording MSR emulation
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Fri Oct 12 10:19:55 2007 +0100 (2007-10-12)
parents f6a06b2eefc5
children 5c13433f8842 ac37f61f6908
files xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c xen/include/asm-x86/hvm/svm/svm.h xen/include/asm-x86/hvm/svm/vmcb.h
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/svm.c	Thu Oct 11 19:23:40 2007 +0100
     1.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Fri Oct 12 10:19:55 2007 +0100
     1.3 @@ -50,6 +50,8 @@
     1.4  #include <asm/hvm/trace.h>
     1.5  #include <asm/hap.h>
     1.6  
     1.7 +u32 svm_feature_flags;
     1.8 +
     1.9  #define set_segment_register(name, value)  \
    1.10      asm volatile ( "movw %%ax ,%%" STR(name) "" : : "a" (value) )
    1.11  
    1.12 @@ -935,13 +937,16 @@ int start_svm(struct cpuinfo_x86 *c)
    1.13  
    1.14      setup_vmcb_dump();
    1.15  
    1.16 +    svm_feature_flags = ((cpuid_eax(0x80000000) >= 0x8000000A) ?
    1.17 +                         cpuid_edx(0x8000000A) : 0);
    1.18 +
    1.19  #ifdef __x86_64__
    1.20      /*
    1.21       * Check CPUID for nested paging support. We support NPT only on 64-bit
    1.22       * hosts since the phys-to-machine table is in host format. Hence 32-bit
    1.23       * Xen could only support guests using NPT with up to a 4GB memory map.
    1.24       */
    1.25 -    svm_function_table.hap_supported = (cpuid_edx(0x8000000A) & 1);
    1.26 +    svm_function_table.hap_supported = cpu_has_svm_npt;
    1.27  #endif
    1.28  
    1.29      hvm_enable(&svm_function_table);
    1.30 @@ -1810,6 +1815,26 @@ static void svm_do_msr_access(
    1.31              msr_content = 0;
    1.32              break;
    1.33  
    1.34 +        case MSR_IA32_DEBUGCTLMSR:
    1.35 +            msr_content = vmcb->debugctlmsr;
    1.36 +            break;
    1.37 +
    1.38 +        case MSR_IA32_LASTBRANCHFROMIP:
    1.39 +            msr_content = vmcb->lastbranchfromip;
    1.40 +            break;
    1.41 +
    1.42 +        case MSR_IA32_LASTBRANCHTOIP:
    1.43 +            msr_content = vmcb->lastbranchtoip;
    1.44 +            break;
    1.45 +
    1.46 +        case MSR_IA32_LASTINTFROMIP:
    1.47 +            msr_content = vmcb->lastintfromip;
    1.48 +            break;
    1.49 +
    1.50 +        case MSR_IA32_LASTINTTOIP:
    1.51 +            msr_content = vmcb->lastinttoip;
    1.52 +            break;
    1.53 +
    1.54          default:
    1.55              if ( rdmsr_hypervisor_regs(ecx, &eax, &edx) ||
    1.56                   rdmsr_safe(ecx, eax, edx) == 0 )
    1.57 @@ -1852,6 +1877,34 @@ static void svm_do_msr_access(
    1.58              svm_inject_exception(v, TRAP_gp_fault, 1, 0);
    1.59              break;
    1.60  
    1.61 +        case MSR_IA32_DEBUGCTLMSR:
    1.62 +            vmcb->debugctlmsr = msr_content;
    1.63 +            if ( !msr_content || !cpu_has_svm_lbrv )
    1.64 +                break;
    1.65 +            vmcb->lbr_control.fields.enable = 1;
    1.66 +            svm_disable_intercept_for_msr(v, MSR_IA32_DEBUGCTLMSR);
    1.67 +            svm_disable_intercept_for_msr(v, MSR_IA32_LASTBRANCHFROMIP);
    1.68 +            svm_disable_intercept_for_msr(v, MSR_IA32_LASTBRANCHTOIP);
    1.69 +            svm_disable_intercept_for_msr(v, MSR_IA32_LASTINTFROMIP);
    1.70 +            svm_disable_intercept_for_msr(v, MSR_IA32_LASTINTTOIP);
    1.71 +            break;
    1.72 +
    1.73 +        case MSR_IA32_LASTBRANCHFROMIP:
    1.74 +            vmcb->lastbranchfromip = msr_content;
    1.75 +            break;
    1.76 +
    1.77 +        case MSR_IA32_LASTBRANCHTOIP:
    1.78 +            vmcb->lastbranchtoip = msr_content;
    1.79 +            break;
    1.80 +
    1.81 +        case MSR_IA32_LASTINTFROMIP:
    1.82 +            vmcb->lastintfromip = msr_content;
    1.83 +            break;
    1.84 +
    1.85 +        case MSR_IA32_LASTINTTOIP:
    1.86 +            vmcb->lastinttoip = msr_content;
    1.87 +            break;
    1.88 +
    1.89          default:
    1.90              switch ( long_mode_do_msr_write(regs) )
    1.91              {
     2.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Thu Oct 11 19:23:40 2007 +0100
     2.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Fri Oct 12 10:19:55 2007 +0100
     2.3 @@ -80,8 +80,10 @@ struct host_save_area *alloc_host_save_a
     2.4      return hsa;
     2.5  }
     2.6  
     2.7 -static void disable_intercept_for_msr(char *msr_bitmap, u32 msr)
     2.8 +void svm_disable_intercept_for_msr(struct vcpu *v, u32 msr)
     2.9  {
    2.10 +    char *msr_bitmap = v->arch.hvm_svm.msrpm;
    2.11 +
    2.12      /*
    2.13       * See AMD64 Programmers Manual, Vol 2, Section 15.10 (MSR-Bitmap Address).
    2.14       */
    2.15 @@ -142,16 +144,16 @@ static int construct_vmcb(struct vcpu *v
    2.16          return -ENOMEM;
    2.17      memset(arch_svm->msrpm, 0xff, MSRPM_SIZE);
    2.18  
    2.19 -    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_FS_BASE);
    2.20 -    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_GS_BASE);
    2.21 -    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_SHADOW_GS_BASE);
    2.22 -    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_CSTAR);
    2.23 -    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_LSTAR);
    2.24 -    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_STAR);
    2.25 -    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_SYSCALL_MASK);
    2.26 -    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_IA32_SYSENTER_CS);
    2.27 -    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_IA32_SYSENTER_ESP);
    2.28 -    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_IA32_SYSENTER_EIP);
    2.29 +    svm_disable_intercept_for_msr(v, MSR_FS_BASE);
    2.30 +    svm_disable_intercept_for_msr(v, MSR_GS_BASE);
    2.31 +    svm_disable_intercept_for_msr(v, MSR_SHADOW_GS_BASE);
    2.32 +    svm_disable_intercept_for_msr(v, MSR_CSTAR);
    2.33 +    svm_disable_intercept_for_msr(v, MSR_LSTAR);
    2.34 +    svm_disable_intercept_for_msr(v, MSR_STAR);
    2.35 +    svm_disable_intercept_for_msr(v, MSR_SYSCALL_MASK);
    2.36 +    svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_CS);
    2.37 +    svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_ESP);
    2.38 +    svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP);
    2.39  
    2.40      vmcb->msrpm_base_pa = (u64)virt_to_maddr(arch_svm->msrpm);
    2.41      vmcb->iopm_base_pa  = (u64)virt_to_maddr(hvm_io_bitmap);
     3.1 --- a/xen/include/asm-x86/hvm/svm/svm.h	Thu Oct 11 19:23:40 2007 +0100
     3.2 +++ b/xen/include/asm-x86/hvm/svm/svm.h	Fri Oct 12 10:19:55 2007 +0100
     3.3 @@ -61,4 +61,16 @@ static inline void svm_vmsave(void *vmcb
     3.4          : : "a" (__pa(vmcb)) : "memory" );
     3.5  }
     3.6  
     3.7 +extern u32 svm_feature_flags;
     3.8 +
     3.9 +#define SVM_FEATURE_NPT     0
    3.10 +#define SVM_FEATURE_LBRV    1
    3.11 +#define SVM_FEATURE_SVML    2
    3.12 +#define SVM_FEATURE_NRIPS   3
    3.13 +
    3.14 +#define cpu_has_svm_npt     test_bit(SVM_FEATURE_NPT, &svm_feature_flags)
    3.15 +#define cpu_has_svm_lbrv    test_bit(SVM_FEATURE_LBRV, &svm_feature_flags)
    3.16 +#define cpu_has_svm_svml    test_bit(SVM_FEATURE_SVML, &svm_feature_flags)
    3.17 +#define cpu_has_svm_nrips   test_bit(SVM_FEATURE_NRIPS, &svm_feature_flags)
    3.18 +
    3.19  #endif /* __ASM_X86_HVM_SVM_H__ */
     4.1 --- a/xen/include/asm-x86/hvm/svm/vmcb.h	Thu Oct 11 19:23:40 2007 +0100
     4.2 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h	Fri Oct 12 10:19:55 2007 +0100
     4.3 @@ -355,6 +355,15 @@ typedef union
     4.4      } fields;
     4.5  } __attribute__ ((packed)) ioio_info_t;
     4.6  
     4.7 +typedef union
     4.8 +{
     4.9 +    u64 bytes;
    4.10 +    struct
    4.11 +    {
    4.12 +        u64 enable:1;
    4.13 +    } fields;
    4.14 +} __attribute__ ((packed)) lbrctrl_t;
    4.15 +
    4.16  struct vmcb_struct {
    4.17      u32 cr_intercepts;          /* offset 0x00 */
    4.18      u32 dr_intercepts;          /* offset 0x04 */
    4.19 @@ -383,7 +392,8 @@ struct vmcb_struct {
    4.20      u64 res08[2];
    4.21      eventinj_t  eventinj;       /* offset 0xA8 */
    4.22      u64 h_cr3;                  /* offset 0xB0 */
    4.23 -    u64 res09[105];             /* offset 0xB8 pad to save area */
    4.24 +    lbrctrl_t lbr_control;      /* offset 0xB8 */
    4.25 +    u64 res09[104];             /* offset 0xC0 pad to save area */
    4.26  
    4.27      svm_segment_register_t es;      /* offset 1024 */
    4.28      svm_segment_register_t cs;
    4.29 @@ -426,20 +436,21 @@ struct vmcb_struct {
    4.30      u64 pdpe2;
    4.31      u64 pdpe3;
    4.32      u64 g_pat;
    4.33 -    u64 res16[50];
    4.34 -    u64 res17[128];
    4.35 -    u64 res18[128];
    4.36 +    u64 debugctlmsr;
    4.37 +    u64 lastbranchfromip;
    4.38 +    u64 lastbranchtoip;
    4.39 +    u64 lastintfromip;
    4.40 +    u64 lastinttoip;
    4.41 +    u64 res16[301];
    4.42  } __attribute__ ((packed));
    4.43  
    4.44 -
    4.45  struct arch_svm_struct {
    4.46      struct vmcb_struct *vmcb;
    4.47 -    u64                 vmcb_pa;
    4.48 -    u64                 asid_generation; /* ASID tracking, moved here to
    4.49 -                                            prevent cacheline misses. */
    4.50 -    u32                *msrpm;
    4.51 -    int                 launch_core;
    4.52 -    bool_t              vmcb_in_sync;     /* VMCB sync'ed with VMSAVE? */
    4.53 +    u64    vmcb_pa;
    4.54 +    u64    asid_generation; /* ASID tracking, moved here for cache locality. */
    4.55 +    char  *msrpm;
    4.56 +    int    launch_core;
    4.57 +    bool_t vmcb_in_sync;    /* VMCB sync'ed with VMSAVE? */
    4.58  };
    4.59  
    4.60  struct vmcb_struct *alloc_vmcb(void);
    4.61 @@ -451,6 +462,8 @@ void svm_destroy_vmcb(struct vcpu *v);
    4.62  
    4.63  void setup_vmcb_dump(void);
    4.64  
    4.65 +void svm_disable_intercept_for_msr(struct vcpu *v, u32 msr);
    4.66 +
    4.67  #endif /* ASM_X86_HVM_SVM_VMCS_H__ */
    4.68  
    4.69  /*