]> xenbits.xensource.com Git - xen.git/commitdiff
svm: last branch recording MSR emulation
authorKeir Fraser <keir@xensource.com>
Fri, 12 Oct 2007 09:19:55 +0000 (10:19 +0100)
committerKeir Fraser <keir@xensource.com>
Fri, 12 Oct 2007 09:19:55 +0000 (10:19 +0100)
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/svm/vmcb.c
xen/include/asm-x86/hvm/svm/svm.h
xen/include/asm-x86/hvm/svm/vmcb.h

index 204b746f6deb0022c6190d771d7074d9f6ccedac..e85f70a12511967d09fa1de4b1e9b2c96bb2a365 100644 (file)
@@ -50,6 +50,8 @@
 #include <asm/hvm/trace.h>
 #include <asm/hap.h>
 
+u32 svm_feature_flags;
+
 #define set_segment_register(name, value)  \
     asm volatile ( "movw %%ax ,%%" STR(name) "" : : "a" (value) )
 
@@ -935,13 +937,16 @@ int start_svm(struct cpuinfo_x86 *c)
 
     setup_vmcb_dump();
 
+    svm_feature_flags = ((cpuid_eax(0x80000000) >= 0x8000000A) ?
+                         cpuid_edx(0x8000000A) : 0);
+
 #ifdef __x86_64__
     /*
      * Check CPUID for nested paging support. We support NPT only on 64-bit
      * hosts since the phys-to-machine table is in host format. Hence 32-bit
      * Xen could only support guests using NPT with up to a 4GB memory map.
      */
-    svm_function_table.hap_supported = (cpuid_edx(0x8000000A) & 1);
+    svm_function_table.hap_supported = cpu_has_svm_npt;
 #endif
 
     hvm_enable(&svm_function_table);
@@ -1810,6 +1815,26 @@ static void svm_do_msr_access(
             msr_content = 0;
             break;
 
+        case MSR_IA32_DEBUGCTLMSR:
+            msr_content = vmcb->debugctlmsr;
+            break;
+
+        case MSR_IA32_LASTBRANCHFROMIP:
+            msr_content = vmcb->lastbranchfromip;
+            break;
+
+        case MSR_IA32_LASTBRANCHTOIP:
+            msr_content = vmcb->lastbranchtoip;
+            break;
+
+        case MSR_IA32_LASTINTFROMIP:
+            msr_content = vmcb->lastintfromip;
+            break;
+
+        case MSR_IA32_LASTINTTOIP:
+            msr_content = vmcb->lastinttoip;
+            break;
+
         default:
             if ( rdmsr_hypervisor_regs(ecx, &eax, &edx) ||
                  rdmsr_safe(ecx, eax, edx) == 0 )
@@ -1852,6 +1877,34 @@ static void svm_do_msr_access(
             svm_inject_exception(v, TRAP_gp_fault, 1, 0);
             break;
 
+        case MSR_IA32_DEBUGCTLMSR:
+            vmcb->debugctlmsr = msr_content;
+            if ( !msr_content || !cpu_has_svm_lbrv )
+                break;
+            vmcb->lbr_control.fields.enable = 1;
+            svm_disable_intercept_for_msr(v, MSR_IA32_DEBUGCTLMSR);
+            svm_disable_intercept_for_msr(v, MSR_IA32_LASTBRANCHFROMIP);
+            svm_disable_intercept_for_msr(v, MSR_IA32_LASTBRANCHTOIP);
+            svm_disable_intercept_for_msr(v, MSR_IA32_LASTINTFROMIP);
+            svm_disable_intercept_for_msr(v, MSR_IA32_LASTINTTOIP);
+            break;
+
+        case MSR_IA32_LASTBRANCHFROMIP:
+            vmcb->lastbranchfromip = msr_content;
+            break;
+
+        case MSR_IA32_LASTBRANCHTOIP:
+            vmcb->lastbranchtoip = msr_content;
+            break;
+
+        case MSR_IA32_LASTINTFROMIP:
+            vmcb->lastintfromip = msr_content;
+            break;
+
+        case MSR_IA32_LASTINTTOIP:
+            vmcb->lastinttoip = msr_content;
+            break;
+
         default:
             switch ( long_mode_do_msr_write(regs) )
             {
index a04d91acc6df5a64ed7328cd87413ca4a46311b4..adb723b6be34e21462d0633d73fd3c18ad348308 100644 (file)
@@ -80,8 +80,10 @@ struct host_save_area *alloc_host_save_area(void)
     return hsa;
 }
 
-static void disable_intercept_for_msr(char *msr_bitmap, u32 msr)
+void svm_disable_intercept_for_msr(struct vcpu *v, u32 msr)
 {
+    char *msr_bitmap = v->arch.hvm_svm.msrpm;
+
     /*
      * See AMD64 Programmers Manual, Vol 2, Section 15.10 (MSR-Bitmap Address).
      */
@@ -142,16 +144,16 @@ static int construct_vmcb(struct vcpu *v)
         return -ENOMEM;
     memset(arch_svm->msrpm, 0xff, MSRPM_SIZE);
 
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_FS_BASE);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_GS_BASE);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_SHADOW_GS_BASE);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_CSTAR);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_LSTAR);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_STAR);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_SYSCALL_MASK);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_IA32_SYSENTER_CS);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_IA32_SYSENTER_ESP);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_IA32_SYSENTER_EIP);
+    svm_disable_intercept_for_msr(v, MSR_FS_BASE);
+    svm_disable_intercept_for_msr(v, MSR_GS_BASE);
+    svm_disable_intercept_for_msr(v, MSR_SHADOW_GS_BASE);
+    svm_disable_intercept_for_msr(v, MSR_CSTAR);
+    svm_disable_intercept_for_msr(v, MSR_LSTAR);
+    svm_disable_intercept_for_msr(v, MSR_STAR);
+    svm_disable_intercept_for_msr(v, MSR_SYSCALL_MASK);
+    svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_CS);
+    svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_ESP);
+    svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP);
 
     vmcb->msrpm_base_pa = (u64)virt_to_maddr(arch_svm->msrpm);
     vmcb->iopm_base_pa  = (u64)virt_to_maddr(hvm_io_bitmap);
index 649afe9082dd5a7b0b99b8366ba212e4fff49938..dc62a7fba2a49671fd6b168576c20318e411cfd7 100644 (file)
@@ -61,4 +61,16 @@ static inline void svm_vmsave(void *vmcb)
         : : "a" (__pa(vmcb)) : "memory" );
 }
 
+extern u32 svm_feature_flags;
+
+#define SVM_FEATURE_NPT     0
+#define SVM_FEATURE_LBRV    1
+#define SVM_FEATURE_SVML    2
+#define SVM_FEATURE_NRIPS   3
+
+#define cpu_has_svm_npt     test_bit(SVM_FEATURE_NPT, &svm_feature_flags)
+#define cpu_has_svm_lbrv    test_bit(SVM_FEATURE_LBRV, &svm_feature_flags)
+#define cpu_has_svm_svml    test_bit(SVM_FEATURE_SVML, &svm_feature_flags)
+#define cpu_has_svm_nrips   test_bit(SVM_FEATURE_NRIPS, &svm_feature_flags)
+
 #endif /* __ASM_X86_HVM_SVM_H__ */
index 19fa838d7916a2aca4d258c471e3e85ad76d8740..2c6ef68eaa8346eb17bfd8f8a114ef1b35270eec 100644 (file)
@@ -355,6 +355,15 @@ typedef union
     } fields;
 } __attribute__ ((packed)) ioio_info_t;
 
+typedef union
+{
+    u64 bytes;
+    struct
+    {
+        u64 enable:1;
+    } fields;
+} __attribute__ ((packed)) lbrctrl_t;
+
 struct vmcb_struct {
     u32 cr_intercepts;          /* offset 0x00 */
     u32 dr_intercepts;          /* offset 0x04 */
@@ -383,7 +392,8 @@ struct vmcb_struct {
     u64 res08[2];
     eventinj_t  eventinj;       /* offset 0xA8 */
     u64 h_cr3;                  /* offset 0xB0 */
-    u64 res09[105];             /* offset 0xB8 pad to save area */
+    lbrctrl_t lbr_control;      /* offset 0xB8 */
+    u64 res09[104];             /* offset 0xC0 pad to save area */
 
     svm_segment_register_t es;      /* offset 1024 */
     svm_segment_register_t cs;
@@ -426,20 +436,21 @@ struct vmcb_struct {
     u64 pdpe2;
     u64 pdpe3;
     u64 g_pat;
-    u64 res16[50];
-    u64 res17[128];
-    u64 res18[128];
+    u64 debugctlmsr;
+    u64 lastbranchfromip;
+    u64 lastbranchtoip;
+    u64 lastintfromip;
+    u64 lastinttoip;
+    u64 res16[301];
 } __attribute__ ((packed));
 
-
 struct arch_svm_struct {
     struct vmcb_struct *vmcb;
-    u64                 vmcb_pa;
-    u64                 asid_generation; /* ASID tracking, moved here to
-                                            prevent cacheline misses. */
-    u32                *msrpm;
-    int                 launch_core;
-    bool_t              vmcb_in_sync;     /* VMCB sync'ed with VMSAVE? */
+    u64    vmcb_pa;
+    u64    asid_generation; /* ASID tracking, moved here for cache locality. */
+    char  *msrpm;
+    int    launch_core;
+    bool_t vmcb_in_sync;    /* VMCB sync'ed with VMSAVE? */
 };
 
 struct vmcb_struct *alloc_vmcb(void);
@@ -451,6 +462,8 @@ void svm_destroy_vmcb(struct vcpu *v);
 
 void setup_vmcb_dump(void);
 
+void svm_disable_intercept_for_msr(struct vcpu *v, u32 msr);
+
 #endif /* ASM_X86_HVM_SVM_VMCS_H__ */
 
 /*