Factor out common code from SVM amd VMX into VPMU.
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com>
Tested-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com>
Acked-by: Jun Nakajima <jun.nakajima@intel.com>
struct amd_vpmu_context {
u64 counters[MAX_NUM_COUNTERS];
u64 ctrls[MAX_NUM_COUNTERS];
- u32 hw_lapic_lvtpc;
bool_t msr_bitmap_set;
};
static int amd_vpmu_do_interrupt(struct cpu_user_regs *regs)
{
- struct vcpu *v = current;
- struct vlapic *vlapic = vcpu_vlapic(v);
- u32 vlapic_lvtpc;
- unsigned char int_vec;
-
- if ( !is_vlapic_lvtpc_enabled(vlapic) )
- return 0;
-
- vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC);
- int_vec = vlapic_lvtpc & APIC_VECTOR_MASK;
-
- if ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) == APIC_MODE_FIXED )
- vlapic_set_irq(vcpu_vlapic(v), int_vec, 0);
- else
- v->nmi_pending = 1;
-
return 1;
}
static void amd_vpmu_restore(struct vcpu *v)
{
- struct vpmu_struct *vpmu = vcpu_vpmu(v);
- struct amd_vpmu_context *ctxt = vpmu->context;
-
- if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
- vpmu_is_set(vpmu, VPMU_RUNNING)) )
- return;
-
- apic_write(APIC_LVTPC, ctxt->hw_lapic_lvtpc);
context_restore(v);
-
- vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
}
static inline void context_save(struct vcpu *v)
struct vpmu_struct *vpmu = vcpu_vpmu(v);
struct amd_vpmu_context *ctx = vpmu->context;
- if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
- vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)) )
- return;
-
context_save(v);
if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && ctx->msr_bitmap_set )
amd_vpmu_unset_msr_bitmap(v);
-
- ctx->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
- apic_write(APIC_LVTPC, ctx->hw_lapic_lvtpc | APIC_LVT_MASKED);
- vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
}
static void context_update(unsigned int msr, u64 msr_content)
for ( i = 0; i < num_counters; i++ )
if ( msr == ctrls[i] )
ctxt->ctrls[i] = msr_content;
-
- ctxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
}
static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
rdmsrl(core2_fix_counters.msr[i], core2_vpmu_cxt->fix_counters[i]);
for ( i = 0; i < core2_get_pmc_count(); i++ )
rdmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter);
- core2_vpmu_cxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
- apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
}
static void core2_vpmu_save(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
- vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)) )
- return;
-
__core2_vpmu_save(v);
/* Unset PMU MSR bitmap to trap lazy load. */
if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && cpu_has_vmx_msr_bitmap )
core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
- vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
return;
}
wrmsrl(core2_ctrls.msr[i], core2_vpmu_cxt->ctrls[i]);
for ( i = 0; i < core2_get_pmc_count(); i++ )
wrmsrl(MSR_P6_EVNTSEL0+i, core2_vpmu_cxt->arch_msr_pair[i].control);
-
- apic_write_around(APIC_LVTPC, core2_vpmu_cxt->hw_lapic_lvtpc);
}
static void core2_vpmu_load(struct vcpu *v)
{
- struct vpmu_struct *vpmu = vcpu_vpmu(v);
-
- /* Only when PMU is counting, we load PMU context immediately. */
- if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
- vpmu_is_set(vpmu, VPMU_RUNNING)) )
- return;
__core2_vpmu_load(v);
- vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
}
static int core2_vpmu_alloc_resource(struct vcpu *v)
{
struct vcpu *v = current;
u64 msr_content;
- u32 vlapic_lvtpc;
- unsigned char int_vec;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
struct core2_vpmu_context *core2_vpmu_cxt = vpmu->context;
- struct vlapic *vlapic = vcpu_vlapic(v);
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, msr_content);
if ( msr_content )
return 0;
}
+ /* HW sets the MASK bit when performance counter interrupt occurs*/
apic_write_around(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
- if ( !is_vlapic_lvtpc_enabled(vlapic) )
- return 1;
-
- vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC);
- int_vec = vlapic_lvtpc & APIC_VECTOR_MASK;
- vlapic_set_reg(vlapic, APIC_LVTPC, vlapic_lvtpc | APIC_LVT_MASKED);
- if ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) == APIC_MODE_FIXED )
- vlapic_set_irq(vcpu_vlapic(v), int_vec, 0);
- else
- v->nmi_pending = 1;
return 1;
}
#include <asm/hvm/vpmu.h>
#include <asm/hvm/svm/svm.h>
#include <asm/hvm/svm/vmcb.h>
-
+#include <asm/apic.h>
/*
* "vpmu" : vpmu generally enabled
int vpmu_do_interrupt(struct cpu_user_regs *regs)
{
- struct vpmu_struct *vpmu = vcpu_vpmu(current);
+ struct vcpu *v = current;
+ struct vpmu_struct *vpmu = vcpu_vpmu(v);
+
+ if ( vpmu->arch_vpmu_ops )
+ {
+ struct vlapic *vlapic = vcpu_vlapic(v);
+ u32 vlapic_lvtpc;
+ unsigned char int_vec;
+
+ if ( !vpmu->arch_vpmu_ops->do_interrupt(regs) )
+ return 0;
+
+ if ( !is_vlapic_lvtpc_enabled(vlapic) )
+ return 1;
+
+ vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC);
+ int_vec = vlapic_lvtpc & APIC_VECTOR_MASK;
+
+ if ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) == APIC_MODE_FIXED )
+ vlapic_set_irq(vcpu_vlapic(v), int_vec, 0);
+ else
+ v->nmi_pending = 1;
+ return 1;
+ }
- if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_interrupt )
- return vpmu->arch_vpmu_ops->do_interrupt(regs);
return 0;
}
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_save )
+ if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
+ vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)) )
+ return;
+
+ if ( vpmu->arch_vpmu_ops )
vpmu->arch_vpmu_ops->arch_vpmu_save(v);
+
+ vpmu->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
+ apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
+
+ vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
}
void vpmu_load(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
+ /* Only when PMU is counting, we load PMU context immediately. */
+ if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
+ vpmu_is_set(vpmu, VPMU_RUNNING)) )
+ return;
+
if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load )
+ {
+ apic_write_around(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
vpmu->arch_vpmu_ops->arch_vpmu_load(v);
+ }
+
+ vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
}
void vpmu_initialise(struct vcpu *v)
u64 fix_counters[VPMU_CORE2_NUM_FIXED];
u64 ctrls[VPMU_CORE2_NUM_CTRLS];
u64 global_ovf_status;
- u32 hw_lapic_lvtpc;
struct arch_msr_pair arch_msr_pair[1];
};
struct vpmu_struct {
u32 flags;
+ u32 hw_lapic_lvtpc;
void *context;
struct arch_vpmu_ops *arch_vpmu_ops;
};