*
* Author: Haitao Shan <haitao.shan@intel.com>
*/
-#include <xen/sched.h>
-#include <xen/xenoprof.h>
-#include <xen/event.h>
-#include <xen/guest_access.h>
#include <xen/cpu.h>
+#include <xen/err.h>
#include <xen/param.h>
+#include <xen/event.h>
+#include <xen/guest_access.h>
+#include <xen/sched.h>
#include <asm/regs.h>
#include <asm/types.h>
#include <asm/msr.h>
static unsigned int __read_mostly opt_vpmu_enabled;
unsigned int __read_mostly vpmu_mode = XENPMU_MODE_OFF;
unsigned int __read_mostly vpmu_features = 0;
+static struct arch_vpmu_ops __read_mostly vpmu_ops;
static DEFINE_SPINLOCK(vpmu_lock);
static unsigned vpmu_count;
{
struct vcpu *curr = current;
struct vpmu_struct *vpmu;
- const struct arch_vpmu_ops *ops;
int ret = 0;
/*
goto nop;
vpmu = vcpu_vpmu(curr);
- ops = vpmu->arch_vpmu_ops;
- if ( !ops )
+ if ( !vpmu_is_set(vpmu, VPMU_INITIALIZED) )
goto nop;
- if ( is_write && ops->do_wrmsr )
- ret = ops->do_wrmsr(msr, *msr_content, supported);
- else if ( !is_write && ops->do_rdmsr )
- ret = ops->do_rdmsr(msr, msr_content);
+ if ( is_write && vpmu_ops.do_wrmsr )
+ ret = alternative_call(vpmu_ops.do_wrmsr, msr, *msr_content, supported);
+ else if ( !is_write && vpmu_ops.do_rdmsr )
+ ret = alternative_call(vpmu_ops.do_rdmsr, msr, msr_content);
else
goto nop;
vpmu_is_set(vpmu, VPMU_CACHED) )
{
vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
- ops->arch_vpmu_save(curr, 0);
+ alternative_vcall(vpmu_ops.arch_vpmu_save, curr, 0);
vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
}
sampling = sampled;
vpmu = vcpu_vpmu(sampling);
- if ( !vpmu->arch_vpmu_ops )
+ if ( !vpmu_is_set(vpmu, VPMU_INITIALIZED) )
return;
/* PV(H) guest */
/* PV guest will be reading PMU MSRs from xenpmu_data */
vpmu_set(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
- vpmu->arch_vpmu_ops->arch_vpmu_save(sampling, 1);
+ alternative_vcall(vpmu_ops.arch_vpmu_save, sampling, 1);
vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
if ( is_hvm_vcpu(sampled) )
/* We don't support (yet) HVM dom0 */
ASSERT(sampling == sampled);
- if ( !vpmu->arch_vpmu_ops->do_interrupt(regs) ||
+ if ( !alternative_call(vpmu_ops.do_interrupt, regs) ||
!is_vlapic_lvtpc_enabled(vlapic) )
return;
vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
- if ( vpmu->arch_vpmu_ops )
- (void)vpmu->arch_vpmu_ops->arch_vpmu_save(v, 0);
+ alternative_vcall(vpmu_ops.arch_vpmu_save, v, 0);
vpmu_reset(vpmu, VPMU_CONTEXT_SAVE);
vpmu->last_pcpu = pcpu;
per_cpu(last_vcpu, pcpu) = v;
- if ( vpmu->arch_vpmu_ops )
- if ( vpmu->arch_vpmu_ops->arch_vpmu_save(v, 0) )
- vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
+ if ( alternative_call(vpmu_ops.arch_vpmu_save, v, 0) )
+ vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
}
vpmu_is_set(vpmu, VPMU_CACHED)) )
return 0;
- if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load )
+ if ( vpmu_ops.arch_vpmu_load )
{
int ret;
apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
/* Arch code needs to set VPMU_CONTEXT_LOADED */
- ret = vpmu->arch_vpmu_ops->arch_vpmu_load(v, from_guest);
+ ret = alternative_call(vpmu_ops.arch_vpmu_load, v, from_guest);
if ( ret )
{
apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc | APIC_LVT_MASKED);
on_selected_cpus(cpumask_of(vpmu->last_pcpu),
vpmu_clear_last, v, 1);
- if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_destroy )
+ if ( vpmu_ops.arch_vpmu_destroy )
{
/*
* Unload VPMU first if VPMU_CONTEXT_LOADED being set.
on_selected_cpus(cpumask_of(vcpu_vpmu(v)->last_pcpu),
vpmu_save_force, v, 1);
- vpmu->arch_vpmu_ops->arch_vpmu_destroy(v);
+ alternative_vcall(vpmu_ops.arch_vpmu_destroy, v);
}
vpmu_reset(vpmu, VPMU_CONTEXT_ALLOCATED);
/* Dump some vpmu information to console. Used in keyhandler dump_domains(). */
void vpmu_dump(struct vcpu *v)
{
- struct vpmu_struct *vpmu = vcpu_vpmu(v);
-
- if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_dump )
- vpmu->arch_vpmu_ops->arch_vpmu_dump(v);
+ if ( vpmu_is_set(vcpu_vpmu(v), VPMU_INITIALIZED) &&
+ vpmu_ops.arch_vpmu_dump )
+ alternative_vcall(vpmu_ops.arch_vpmu_dump, v);
}
long do_xenpmu_op(unsigned int op, XEN_GUEST_HANDLE_PARAM(xen_pmu_params_t) arg)
static int __init vpmu_init(void)
{
int vendor = current_cpu_data.x86_vendor;
+ const struct arch_vpmu_ops *ops = NULL;
if ( !opt_vpmu_enabled )
return 0;
switch ( vendor )
{
case X86_VENDOR_AMD:
- if ( amd_vpmu_init() )
- vpmu_mode = XENPMU_MODE_OFF;
+ ops = amd_vpmu_init();
break;
case X86_VENDOR_HYGON:
- if ( hygon_vpmu_init() )
- vpmu_mode = XENPMU_MODE_OFF;
+ ops = hygon_vpmu_init();
break;
case X86_VENDOR_INTEL:
- if ( core2_vpmu_init() )
- vpmu_mode = XENPMU_MODE_OFF;
+ ops = core2_vpmu_init();
break;
default:
printk(XENLOG_WARNING "VPMU: Unknown CPU vendor: %d. "
"Turning VPMU off.\n", vendor);
- vpmu_mode = XENPMU_MODE_OFF;
break;
}
- if ( vpmu_mode != XENPMU_MODE_OFF )
+ if ( !IS_ERR_OR_NULL(ops) )
{
+ vpmu_ops = *ops;
register_cpu_notifier(&cpu_nfb);
printk(XENLOG_INFO "VPMU: version " __stringify(XENPMU_VER_MAJ) "."
__stringify(XENPMU_VER_MIN) "\n");
}
else
+ {
+ vpmu_mode = XENPMU_MODE_OFF;
opt_vpmu_enabled = 0;
+ }
return 0;
}
-__initcall(vpmu_init);
+presmp_initcall(vpmu_init);
*
*/
-#include <xen/xenoprof.h>
+#include <xen/err.h>
#include <xen/sched.h>
-#include <xen/irq.h>
+#include <xen/xenoprof.h>
#include <asm/apic.h>
#include <asm/vpmu.h>
#include <asm/hvm/save.h>
}
}
-static const struct arch_vpmu_ops amd_vpmu_ops = {
+static const struct arch_vpmu_ops __initconstrel amd_vpmu_ops = {
.do_wrmsr = amd_vpmu_do_wrmsr,
.do_rdmsr = amd_vpmu_do_rdmsr,
.do_interrupt = amd_vpmu_do_interrupt,
offsetof(struct xen_pmu_amd_ctxt, regs));
}
- vpmu->arch_vpmu_ops = &amd_vpmu_ops;
+ vpmu_set(vpmu, VPMU_INITIALIZED | VPMU_CONTEXT_ALLOCATED);
- vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED);
return 0;
}
-static int __init common_init(void)
+static const struct arch_vpmu_ops *__init common_init(void)
{
unsigned int i;
{
printk(XENLOG_WARNING "VPMU: Unsupported CPU family %#x\n",
current_cpu_data.x86);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
if ( sizeof(struct xen_pmu_data) +
"VPMU: Register bank does not fit into VPMU shared page\n");
counters = ctrls = NULL;
num_counters = 0;
- return -ENOSPC;
+ return ERR_PTR(-ENOSPC);
}
for ( i = 0; i < num_counters; i++ )
ctrl_rsvd[i] &= CTRL_RSVD_MASK;
}
- return 0;
+ return &amd_vpmu_ops;
}
-int __init amd_vpmu_init(void)
+const struct arch_vpmu_ops *__init amd_vpmu_init(void)
{
switch ( current_cpu_data.x86 )
{
return common_init();
}
-int __init hygon_vpmu_init(void)
+const struct arch_vpmu_ops *__init hygon_vpmu_init(void)
{
switch ( current_cpu_data.x86 )
{
* Author: Haitao Shan <haitao.shan@intel.com>
*/
+#include <xen/err.h>
#include <xen/sched.h>
#include <xen/xenoprof.h>
-#include <xen/irq.h>
#include <asm/system.h>
#include <asm/regs.h>
#include <asm/types.h>
vpmu_clear(vpmu);
}
-static const struct arch_vpmu_ops core2_vpmu_ops = {
+static const struct arch_vpmu_ops __initconstrel core2_vpmu_ops = {
.do_wrmsr = core2_vpmu_do_wrmsr,
.do_rdmsr = core2_vpmu_do_rdmsr,
.do_interrupt = core2_vpmu_do_interrupt,
if ( is_pv_vcpu(v) && !core2_vpmu_alloc_resource(v) )
return -EIO;
- vpmu->arch_vpmu_ops = &core2_vpmu_ops;
+ vpmu_set(vpmu, VPMU_INITIALIZED);
return 0;
}
-int __init core2_vpmu_init(void)
+const struct arch_vpmu_ops *__init core2_vpmu_init(void)
{
unsigned int version = 0;
unsigned int i;
default:
printk(XENLOG_WARNING "VPMU: PMU version %u is not supported\n",
version);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
if ( current_cpu_data.x86 != 6 )
{
printk(XENLOG_WARNING "VPMU: only family 6 is supported\n");
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
arch_pmc_cnt = core2_get_arch_pmc_count();
printk(XENLOG_WARNING
"VPMU: Register bank does not fit into VPMU share page\n");
arch_pmc_cnt = fixed_pmc_cnt = 0;
- return -ENOSPC;
+ return ERR_PTR(-ENOSPC);
}
- return 0;
+ return &core2_vpmu_ops;
}
void (*arch_vpmu_dump)(const struct vcpu *);
};
-int core2_vpmu_init(void);
+const struct arch_vpmu_ops *core2_vpmu_init(void);
int vmx_vpmu_initialise(struct vcpu *);
-int amd_vpmu_init(void);
-int hygon_vpmu_init(void);
+const struct arch_vpmu_ops *amd_vpmu_init(void);
+const struct arch_vpmu_ops *hygon_vpmu_init(void);
int svm_vpmu_initialise(struct vcpu *);
struct vpmu_struct {
u32 hw_lapic_lvtpc;
void *context; /* May be shared with PV guest */
void *priv_context; /* hypervisor-only */
- const struct arch_vpmu_ops *arch_vpmu_ops;
struct xen_pmu_data *xenpmu_data;
spinlock_t vpmu_lock;
};
/* VPMU states */
-#define VPMU_CONTEXT_ALLOCATED 0x1
-#define VPMU_CONTEXT_LOADED 0x2
-#define VPMU_RUNNING 0x4
-#define VPMU_CONTEXT_SAVE 0x8 /* Force context save */
-#define VPMU_FROZEN 0x10 /* Stop counters while VCPU is not running */
-#define VPMU_PASSIVE_DOMAIN_ALLOCATED 0x20
+#define VPMU_INITIALIZED 0x0001
+#define VPMU_CONTEXT_ALLOCATED 0x0002
+#define VPMU_CONTEXT_LOADED 0x0004
+#define VPMU_RUNNING 0x0008
+#define VPMU_CONTEXT_SAVE 0x0010 /* Force context save */
+#define VPMU_FROZEN 0x0020 /* Stop counters while VCPU is not running */
+#define VPMU_PASSIVE_DOMAIN_ALLOCATED 0x0040
/* PV(H) guests: VPMU registers are accessed by guest from shared page */
-#define VPMU_CACHED 0x40
-#define VPMU_AVAILABLE 0x80
+#define VPMU_CACHED 0x0080
+#define VPMU_AVAILABLE 0x0100
/* Intel-specific VPMU features */
-#define VPMU_CPU_HAS_DS 0x100 /* Has Debug Store */
-#define VPMU_CPU_HAS_BTS 0x200 /* Has Branch Trace Store */
+#define VPMU_CPU_HAS_DS 0x1000 /* Has Debug Store */
+#define VPMU_CPU_HAS_BTS 0x2000 /* Has Branch Trace Store */
static inline void vpmu_set(struct vpmu_struct *vpmu, const u32 mask)
{