]> xenbits.xensource.com Git - people/iwj/xen.git/commitdiff
x86/VPMU: Factor out VPMU common code
authorBoris Ostrovsky <boris.ostrovsky@oracle.com>
Mon, 15 Apr 2013 09:26:44 +0000 (11:26 +0200)
committerJan Beulich <jbeulich@suse.com>
Mon, 15 Apr 2013 09:26:44 +0000 (11:26 +0200)
Factor out common code from SVM amd VMX into VPMU.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com>
Tested-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com>
Acked-by: Jun Nakajima <jun.nakajima@intel.com>
xen/arch/x86/hvm/svm/vpmu.c
xen/arch/x86/hvm/vmx/vpmu_core2.c
xen/arch/x86/hvm/vpmu.c
xen/include/asm-x86/hvm/vmx/vpmu_core2.h
xen/include/asm-x86/hvm/vpmu.h

index 51e5495bc6615162abf2c60cafeee69c825c4cbd..f2f2d125fdeeaac99c62de810dd4f1d8fec0fee6 100644 (file)
@@ -87,7 +87,6 @@ static const u32 AMD_F15H_CTRLS[] = {
 struct amd_vpmu_context {
     u64 counters[MAX_NUM_COUNTERS];
     u64 ctrls[MAX_NUM_COUNTERS];
-    u32 hw_lapic_lvtpc;
     bool_t msr_bitmap_set;
 };
 
@@ -171,22 +170,6 @@ static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
 
 static int amd_vpmu_do_interrupt(struct cpu_user_regs *regs)
 {
-    struct vcpu *v = current;
-    struct vlapic *vlapic = vcpu_vlapic(v);
-    u32 vlapic_lvtpc;
-    unsigned char int_vec;
-
-    if ( !is_vlapic_lvtpc_enabled(vlapic) )
-        return 0;
-
-    vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC);
-    int_vec = vlapic_lvtpc & APIC_VECTOR_MASK;
-
-    if ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) == APIC_MODE_FIXED )
-        vlapic_set_irq(vcpu_vlapic(v), int_vec, 0);
-    else
-        v->nmi_pending = 1;
-
     return 1;
 }
 
@@ -205,17 +188,7 @@ static inline void context_restore(struct vcpu *v)
 
 static void amd_vpmu_restore(struct vcpu *v)
 {
-    struct vpmu_struct *vpmu = vcpu_vpmu(v);
-    struct amd_vpmu_context *ctxt = vpmu->context;
-
-    if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
-           vpmu_is_set(vpmu, VPMU_RUNNING)) )
-        return;
-
-    apic_write(APIC_LVTPC, ctxt->hw_lapic_lvtpc);
     context_restore(v);
-
-    vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
 }
 
 static inline void context_save(struct vcpu *v)
@@ -237,18 +210,10 @@ static void amd_vpmu_save(struct vcpu *v)
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
     struct amd_vpmu_context *ctx = vpmu->context;
 
-    if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
-           vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)) )
-        return;
-
     context_save(v);
 
     if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && ctx->msr_bitmap_set )
         amd_vpmu_unset_msr_bitmap(v);
-
-    ctx->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
-    apic_write(APIC_LVTPC,  ctx->hw_lapic_lvtpc | APIC_LVT_MASKED);
-    vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
 }
 
 static void context_update(unsigned int msr, u64 msr_content)
@@ -271,8 +236,6 @@ static void context_update(unsigned int msr, u64 msr_content)
     for ( i = 0; i < num_counters; i++ )
         if ( msr == ctrls[i] )
             ctxt->ctrls[i] = msr_content;
-
-    ctxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
 }
 
 static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
index 7c86a0b6b13e0f8fd5440260138760ed0472605f..6195bfc477983c0e5d75a1dd5b1661eff6d0f0a3 100644 (file)
@@ -305,25 +305,18 @@ static inline void __core2_vpmu_save(struct vcpu *v)
         rdmsrl(core2_fix_counters.msr[i], core2_vpmu_cxt->fix_counters[i]);
     for ( i = 0; i < core2_get_pmc_count(); i++ )
         rdmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter);
-    core2_vpmu_cxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
-    apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
 }
 
 static void core2_vpmu_save(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
-    if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
-           vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)) )
-        return;
-
     __core2_vpmu_save(v);
 
     /* Unset PMU MSR bitmap to trap lazy load. */
     if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && cpu_has_vmx_msr_bitmap )
         core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
 
-    vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
     return;
 }
 
@@ -341,20 +334,11 @@ static inline void __core2_vpmu_load(struct vcpu *v)
         wrmsrl(core2_ctrls.msr[i], core2_vpmu_cxt->ctrls[i]);
     for ( i = 0; i < core2_get_pmc_count(); i++ )
         wrmsrl(MSR_P6_EVNTSEL0+i, core2_vpmu_cxt->arch_msr_pair[i].control);
-
-    apic_write_around(APIC_LVTPC, core2_vpmu_cxt->hw_lapic_lvtpc);
 }
 
 static void core2_vpmu_load(struct vcpu *v)
 {
-    struct vpmu_struct *vpmu = vcpu_vpmu(v);
-
-    /* Only when PMU is counting, we load PMU context immediately. */
-    if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
-           vpmu_is_set(vpmu, VPMU_RUNNING)) )
-        return;
     __core2_vpmu_load(v);
-    vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
 }
 
 static int core2_vpmu_alloc_resource(struct vcpu *v)
@@ -705,11 +689,8 @@ static int core2_vpmu_do_interrupt(struct cpu_user_regs *regs)
 {
     struct vcpu *v = current;
     u64 msr_content;
-    u32 vlapic_lvtpc;
-    unsigned char int_vec;
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
     struct core2_vpmu_context *core2_vpmu_cxt = vpmu->context;
-    struct vlapic *vlapic = vcpu_vlapic(v);
 
     rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, msr_content);
     if ( msr_content )
@@ -728,18 +709,9 @@ static int core2_vpmu_do_interrupt(struct cpu_user_regs *regs)
             return 0;
     }
 
+    /* HW sets the MASK bit when performance counter interrupt occurs*/
     apic_write_around(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
 
-    if ( !is_vlapic_lvtpc_enabled(vlapic) )
-        return 1;
-
-    vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC);
-    int_vec = vlapic_lvtpc & APIC_VECTOR_MASK;
-    vlapic_set_reg(vlapic, APIC_LVTPC, vlapic_lvtpc | APIC_LVT_MASKED);
-    if ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) == APIC_MODE_FIXED )
-        vlapic_set_irq(vcpu_vlapic(v), int_vec, 0);
-    else
-        v->nmi_pending = 1;
     return 1;
 }
 
index 3b6958081e9e0cf7cf68a286bcf9a7e2e2490c82..ff5f065c3ac4075fbdf1f357632ac91a34d3fcb4 100644 (file)
@@ -31,7 +31,7 @@
 #include <asm/hvm/vpmu.h>
 #include <asm/hvm/svm/svm.h>
 #include <asm/hvm/svm/vmcb.h>
-
+#include <asm/apic.h>
 
 /*
  * "vpmu" :     vpmu generally enabled
@@ -83,10 +83,31 @@ int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
 
 int vpmu_do_interrupt(struct cpu_user_regs *regs)
 {
-    struct vpmu_struct *vpmu = vcpu_vpmu(current);
+    struct vcpu *v = current;
+    struct vpmu_struct *vpmu = vcpu_vpmu(v);
+
+    if ( vpmu->arch_vpmu_ops )
+    {
+        struct vlapic *vlapic = vcpu_vlapic(v);
+        u32 vlapic_lvtpc;
+        unsigned char int_vec;
+
+        if ( !vpmu->arch_vpmu_ops->do_interrupt(regs) )
+            return 0;
+
+        if ( !is_vlapic_lvtpc_enabled(vlapic) )
+            return 1;
+
+        vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC);
+        int_vec = vlapic_lvtpc & APIC_VECTOR_MASK;
+
+        if ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) == APIC_MODE_FIXED )
+            vlapic_set_irq(vcpu_vlapic(v), int_vec, 0);
+        else
+            v->nmi_pending = 1;
+        return 1;
+    }
 
-    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_interrupt )
-        return vpmu->arch_vpmu_ops->do_interrupt(regs);
     return 0;
 }
 
@@ -104,16 +125,35 @@ void vpmu_save(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
-    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_save )
+    if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
+           vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)) )
+       return;
+
+    if ( vpmu->arch_vpmu_ops )
         vpmu->arch_vpmu_ops->arch_vpmu_save(v);
+
+    vpmu->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
+    apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
+
+    vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
 }
 
 void vpmu_load(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
+    /* Only when PMU is counting, we load PMU context immediately. */
+    if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
+           vpmu_is_set(vpmu, VPMU_RUNNING)) )
+        return;
+
     if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load )
+    {
+        apic_write_around(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
         vpmu->arch_vpmu_ops->arch_vpmu_load(v);
+    }
+
+    vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
 }
 
 void vpmu_initialise(struct vcpu *v)
index 4128f2ac24cf67010cc4d905d54a57a002f2f6c8..60b05fd9bcb2fc9960ad7d78362d46c74da85bd7 100644 (file)
@@ -44,7 +44,6 @@ struct core2_vpmu_context {
     u64 fix_counters[VPMU_CORE2_NUM_FIXED];
     u64 ctrls[VPMU_CORE2_NUM_CTRLS];
     u64 global_ovf_status;
-    u32 hw_lapic_lvtpc;
     struct arch_msr_pair arch_msr_pair[1];
 };
 
index cd31f5eb0bea9245ca980a2d0089316c606a15ce..01be97683eb46e85f027c3f11e7b26b31be67c33 100644 (file)
@@ -62,6 +62,7 @@ int svm_vpmu_initialise(struct vcpu *, unsigned int flags);
 
 struct vpmu_struct {
     u32 flags;
+    u32 hw_lapic_lvtpc;
     void *context;
     struct arch_vpmu_ops *arch_vpmu_ops;
 };