#include <asm/apic.h>
#include <asm/hvm/vlapic.h>
#include <asm/hvm/vpmu.h>
-
-#define F10H_NUM_COUNTERS 4
-#define F15H_NUM_COUNTERS 6
-#define MAX_NUM_COUNTERS F15H_NUM_COUNTERS
+#include <public/pmu.h>
#define MSR_F10H_EVNTSEL_GO_SHIFT 40
#define MSR_F10H_EVNTSEL_EN_SHIFT 22
static const u32 __read_mostly *ctrls;
static bool_t __read_mostly k7_counters_mirrored;
+#define F10H_NUM_COUNTERS 4
+#define F15H_NUM_COUNTERS 6
+
/* PMU Counter MSRs. */
static const u32 AMD_F10H_COUNTERS[] = {
MSR_K7_PERFCTR0,
MSR_AMD_FAM15H_EVNTSEL5
};
-/* storage for context switching */
-struct amd_vpmu_context {
- u64 counters[MAX_NUM_COUNTERS];
- u64 ctrls[MAX_NUM_COUNTERS];
- bool_t msr_bitmap_set;
-};
+/* Use private context as a flag for MSR bitmap */
+#define msr_bitmap_on(vpmu) do { \
+ (vpmu)->priv_context = (void *)-1L; \
+ } while (0)
+#define msr_bitmap_off(vpmu) do { \
+ (vpmu)->priv_context = NULL; \
+ } while (0)
+#define is_msr_bitmap_on(vpmu) ((vpmu)->priv_context != NULL)
static inline int get_pmu_reg_type(u32 addr)
{
{
unsigned int i;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- struct amd_vpmu_context *ctxt = vpmu->context;
for ( i = 0; i < num_counters; i++ )
{
svm_intercept_msr(v, ctrls[i], MSR_INTERCEPT_WRITE);
}
- ctxt->msr_bitmap_set = 1;
+ msr_bitmap_on(vpmu);
}
static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
{
unsigned int i;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- struct amd_vpmu_context *ctxt = vpmu->context;
for ( i = 0; i < num_counters; i++ )
{
svm_intercept_msr(v, ctrls[i], MSR_INTERCEPT_RW);
}
- ctxt->msr_bitmap_set = 0;
+ msr_bitmap_off(vpmu);
}
static int amd_vpmu_do_interrupt(struct cpu_user_regs *regs)
{
unsigned int i;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- struct amd_vpmu_context *ctxt = vpmu->context;
+ struct xen_pmu_amd_ctxt *ctxt = vpmu->context;
+ uint64_t *counter_regs = vpmu_reg_pointer(ctxt, counters);
+ uint64_t *ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
for ( i = 0; i < num_counters; i++ )
{
- wrmsrl(counters[i], ctxt->counters[i]);
- wrmsrl(ctrls[i], ctxt->ctrls[i]);
+ wrmsrl(counters[i], counter_regs[i]);
+ wrmsrl(ctrls[i], ctrl_regs[i]);
}
}
static void amd_vpmu_load(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- struct amd_vpmu_context *ctxt = vpmu->context;
+ struct xen_pmu_amd_ctxt *ctxt = vpmu->context;
+ uint64_t *ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
vpmu_reset(vpmu, VPMU_FROZEN);
unsigned int i;
for ( i = 0; i < num_counters; i++ )
- wrmsrl(ctrls[i], ctxt->ctrls[i]);
+ wrmsrl(ctrls[i], ctrl_regs[i]);
return;
}
{
unsigned int i;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- struct amd_vpmu_context *ctxt = vpmu->context;
+ struct xen_pmu_amd_ctxt *ctxt = vpmu->context;
+ uint64_t *counter_regs = vpmu_reg_pointer(ctxt, counters);
/* No need to save controls -- they are saved in amd_vpmu_do_wrmsr */
for ( i = 0; i < num_counters; i++ )
- rdmsrl(counters[i], ctxt->counters[i]);
+ rdmsrl(counters[i], counter_regs[i]);
}
static int amd_vpmu_save(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- struct amd_vpmu_context *ctx = vpmu->context;
unsigned int i;
/*
context_save(v);
if ( !vpmu_is_set(vpmu, VPMU_RUNNING) &&
- has_hvm_container_vcpu(v) && ctx->msr_bitmap_set )
+ has_hvm_container_vcpu(v) && is_msr_bitmap_on(vpmu) )
amd_vpmu_unset_msr_bitmap(v);
return 1;
unsigned int i;
struct vcpu *v = current;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- struct amd_vpmu_context *ctxt = vpmu->context;
+ struct xen_pmu_amd_ctxt *ctxt = vpmu->context;
+ uint64_t *counter_regs = vpmu_reg_pointer(ctxt, counters);
+ uint64_t *ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
if ( k7_counters_mirrored &&
((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3)) )
{
if ( msr == ctrls[i] )
{
- ctxt->ctrls[i] = msr_content;
+ ctrl_regs[i] = msr_content;
return;
}
else if (msr == counters[i] )
{
- ctxt->counters[i] = msr_content;
+ counter_regs[i] = msr_content;
return;
}
}
return 1;
vpmu_set(vpmu, VPMU_RUNNING);
- if ( has_hvm_container_vcpu(v) &&
- !((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
+ if ( has_hvm_container_vcpu(v) && is_msr_bitmap_on(vpmu) )
amd_vpmu_set_msr_bitmap(v);
}
(is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, VPMU_RUNNING) )
{
vpmu_reset(vpmu, VPMU_RUNNING);
- if ( has_hvm_container_vcpu(v) &&
- ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
+ if ( has_hvm_container_vcpu(v) && is_msr_bitmap_on(vpmu) )
amd_vpmu_unset_msr_bitmap(v);
release_pmu_ownship(PMU_OWNER_HVM);
}
static int amd_vpmu_initialise(struct vcpu *v)
{
- struct amd_vpmu_context *ctxt;
+ struct xen_pmu_amd_ctxt *ctxt;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
uint8_t family = current_cpu_data.x86;
}
}
- ctxt = xzalloc(struct amd_vpmu_context);
+ ctxt = xzalloc_bytes(sizeof(*ctxt) +
+ 2 * sizeof(uint64_t) * num_counters);
if ( !ctxt )
{
gdprintk(XENLOG_WARNING, "Insufficient memory for PMU, "
return -ENOMEM;
}
+ ctxt->counters = sizeof(*ctxt);
+ ctxt->ctrls = ctxt->counters + sizeof(uint64_t) * num_counters;
+
vpmu->context = ctxt;
+ vpmu->priv_context = NULL;
vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED);
return 0;
}
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- if ( has_hvm_container_vcpu(v) &&
- ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
+ if ( has_hvm_container_vcpu(v) && is_msr_bitmap_on(vpmu) )
amd_vpmu_unset_msr_bitmap(v);
xfree(vpmu->context);
static void amd_vpmu_dump(const struct vcpu *v)
{
const struct vpmu_struct *vpmu = vcpu_vpmu(v);
- const struct amd_vpmu_context *ctxt = vpmu->context;
+ const struct xen_pmu_amd_ctxt *ctxt = vpmu->context;
+ const uint64_t *counter_regs = vpmu_reg_pointer(ctxt, counters);
+ const uint64_t *ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
unsigned int i;
printk(" VPMU state: 0x%x ", vpmu->flags);
rdmsrl(ctrls[i], ctrl);
rdmsrl(counters[i], cntr);
printk(" %#x: %#lx (%#lx in HW) %#x: %#lx (%#lx in HW)\n",
- ctrls[i], ctxt->ctrls[i], ctrl,
- counters[i], ctxt->counters[i], cntr);
+ ctrls[i], ctrl_regs[i], ctrl,
+ counters[i], counter_regs[i], cntr);
}
}
#include <asm/hvm/vmx/vmcs.h>
#include <public/sched.h>
#include <public/hvm/save.h>
+#include <public/pmu.h>
#include <asm/hvm/vpmu.h>
-#include <asm/hvm/vmx/vpmu_core2.h>
/*
* See Intel SDM Vol 2a Instruction Set Reference chapter 3 for CPUID
#define MSR_PMC_ALIAS_MASK (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_A_PERFCTR0))
static bool_t __read_mostly full_width_write;
+/* Intel-specific VPMU features */
+#define VPMU_CPU_HAS_DS 0x100 /* Has Debug Store */
+#define VPMU_CPU_HAS_BTS 0x200 /* Has Branch Trace Store */
+
/*
* MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed
* counters. 4 bits for every counter.
#define FIXED_CTR_CTRL_BITS 4
#define FIXED_CTR_CTRL_MASK ((1 << FIXED_CTR_CTRL_BITS) - 1)
-#define VPMU_CORE2_MAX_FIXED_PMCS 4
-struct core2_vpmu_context {
- u64 fixed_ctrl;
- u64 ds_area;
- u64 pebs_enable;
- u64 global_ovf_status;
- u64 enabled_cntrs; /* Follows PERF_GLOBAL_CTRL MSR format */
- u64 fix_counters[VPMU_CORE2_MAX_FIXED_PMCS];
- struct arch_msr_pair arch_msr_pair[1];
-};
-
/* Number of general-purpose and fixed performance counters */
static unsigned int __read_mostly arch_pmc_cnt, fixed_pmc_cnt;
}
}
+static inline int msraddr_to_bitpos(int x)
+{
+ ASSERT(x == (x & 0x1fff));
+ return x;
+}
+
static void core2_vpmu_set_msr_bitmap(unsigned long *msr_bitmap)
{
int i;
static inline void __core2_vpmu_save(struct vcpu *v)
{
int i;
- struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context;
+ struct xen_pmu_intel_ctxt *core2_vpmu_cxt = vcpu_vpmu(v)->context;
+ uint64_t *fixed_counters = vpmu_reg_pointer(core2_vpmu_cxt, fixed_counters);
+ struct xen_pmu_cntr_pair *xen_pmu_cntr_pair =
+ vpmu_reg_pointer(core2_vpmu_cxt, arch_counters);
for ( i = 0; i < fixed_pmc_cnt; i++ )
- rdmsrl(MSR_CORE_PERF_FIXED_CTR0 + i, core2_vpmu_cxt->fix_counters[i]);
+ rdmsrl(MSR_CORE_PERF_FIXED_CTR0 + i, fixed_counters[i]);
for ( i = 0; i < arch_pmc_cnt; i++ )
- rdmsrl(MSR_IA32_PERFCTR0 + i, core2_vpmu_cxt->arch_msr_pair[i].counter);
+ rdmsrl(MSR_IA32_PERFCTR0 + i, xen_pmu_cntr_pair[i].counter);
}
static int core2_vpmu_save(struct vcpu *v)
static inline void __core2_vpmu_load(struct vcpu *v)
{
unsigned int i, pmc_start;
- struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context;
+ struct xen_pmu_intel_ctxt *core2_vpmu_cxt = vcpu_vpmu(v)->context;
+ uint64_t *fixed_counters = vpmu_reg_pointer(core2_vpmu_cxt, fixed_counters);
+ struct xen_pmu_cntr_pair *xen_pmu_cntr_pair =
+ vpmu_reg_pointer(core2_vpmu_cxt, arch_counters);
for ( i = 0; i < fixed_pmc_cnt; i++ )
- wrmsrl(MSR_CORE_PERF_FIXED_CTR0 + i, core2_vpmu_cxt->fix_counters[i]);
+ wrmsrl(MSR_CORE_PERF_FIXED_CTR0 + i, fixed_counters[i]);
if ( full_width_write )
pmc_start = MSR_IA32_A_PERFCTR0;
pmc_start = MSR_IA32_PERFCTR0;
for ( i = 0; i < arch_pmc_cnt; i++ )
{
- wrmsrl(pmc_start + i, core2_vpmu_cxt->arch_msr_pair[i].counter);
- wrmsrl(MSR_P6_EVNTSEL(i), core2_vpmu_cxt->arch_msr_pair[i].control);
+ wrmsrl(pmc_start + i, xen_pmu_cntr_pair[i].counter);
+ wrmsrl(MSR_P6_EVNTSEL(i), xen_pmu_cntr_pair[i].control);
}
wrmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, core2_vpmu_cxt->fixed_ctrl);
static int core2_vpmu_alloc_resource(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- struct core2_vpmu_context *core2_vpmu_cxt;
+ struct xen_pmu_intel_ctxt *core2_vpmu_cxt = NULL;
+ uint64_t *p = NULL;
if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
return 0;
goto out_err;
vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
- core2_vpmu_cxt = xzalloc_bytes(sizeof(struct core2_vpmu_context) +
- (arch_pmc_cnt-1)*sizeof(struct arch_msr_pair));
- if ( !core2_vpmu_cxt )
+ core2_vpmu_cxt = xzalloc_bytes(sizeof(*core2_vpmu_cxt) +
+ sizeof(uint64_t) * fixed_pmc_cnt +
+ sizeof(struct xen_pmu_cntr_pair) *
+ arch_pmc_cnt);
+ p = xzalloc(uint64_t);
+ if ( !core2_vpmu_cxt || !p )
goto out_err;
- vpmu->context = (void *)core2_vpmu_cxt;
+ core2_vpmu_cxt->fixed_counters = sizeof(*core2_vpmu_cxt);
+ core2_vpmu_cxt->arch_counters = core2_vpmu_cxt->fixed_counters +
+ sizeof(uint64_t) * fixed_pmc_cnt;
+
+ vpmu->context = core2_vpmu_cxt;
+ vpmu->priv_context = p;
vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED);
out_err:
release_pmu_ownship(PMU_OWNER_HVM);
+ xfree(core2_vpmu_cxt);
+ xfree(p);
+
printk("Failed to allocate VPMU resources for domain %u vcpu %u\n",
v->vcpu_id, v->domain->domain_id);
int type = -1, index = -1;
struct vcpu *v = current;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- struct core2_vpmu_context *core2_vpmu_cxt = NULL;
+ struct xen_pmu_intel_ctxt *core2_vpmu_cxt;
+ uint64_t *enabled_cntrs;
if ( !core2_vpmu_msr_common_check(msr, &type, &index) )
{
ASSERT(!supported);
core2_vpmu_cxt = vpmu->context;
+ enabled_cntrs = vpmu->priv_context;
switch ( msr )
{
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
- core2_vpmu_cxt->global_ovf_status &= ~msr_content;
+ core2_vpmu_cxt->global_status &= ~msr_content;
return 1;
case MSR_CORE_PERF_GLOBAL_STATUS:
gdprintk(XENLOG_INFO, "Can not write readonly MSR: "
break;
case MSR_CORE_PERF_FIXED_CTR_CTRL:
vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
- core2_vpmu_cxt->enabled_cntrs &=
- ~(((1ULL << VPMU_CORE2_MAX_FIXED_PMCS) - 1) << 32);
+ *enabled_cntrs &= ~(((1ULL << fixed_pmc_cnt) - 1) << 32);
if ( msr_content != 0 )
{
u64 val = msr_content;
for ( i = 0; i < fixed_pmc_cnt; i++ )
{
if ( val & 3 )
- core2_vpmu_cxt->enabled_cntrs |= (1ULL << 32) << i;
+ *enabled_cntrs |= (1ULL << 32) << i;
val >>= FIXED_CTR_CTRL_BITS;
}
}
tmp = msr - MSR_P6_EVNTSEL(0);
if ( tmp >= 0 && tmp < arch_pmc_cnt )
{
+ struct xen_pmu_cntr_pair *xen_pmu_cntr_pair =
+ vpmu_reg_pointer(core2_vpmu_cxt, arch_counters);
+
vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
if ( msr_content & (1ULL << 22) )
- core2_vpmu_cxt->enabled_cntrs |= 1ULL << tmp;
+ *enabled_cntrs |= 1ULL << tmp;
else
- core2_vpmu_cxt->enabled_cntrs &= ~(1ULL << tmp);
+ *enabled_cntrs &= ~(1ULL << tmp);
- core2_vpmu_cxt->arch_msr_pair[tmp].control = msr_content;
+ xen_pmu_cntr_pair[tmp].control = msr_content;
}
}
- if ( (global_ctrl & core2_vpmu_cxt->enabled_cntrs) ||
- (core2_vpmu_cxt->ds_area != 0) )
+ if ( (global_ctrl & *enabled_cntrs) || (core2_vpmu_cxt->ds_area != 0) )
vpmu_set(vpmu, VPMU_RUNNING);
else
vpmu_reset(vpmu, VPMU_RUNNING);
int type = -1, index = -1;
struct vcpu *v = current;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- struct core2_vpmu_context *core2_vpmu_cxt = NULL;
+ struct xen_pmu_intel_ctxt *core2_vpmu_cxt;
if ( core2_vpmu_msr_common_check(msr, &type, &index) )
{
*msr_content = 0;
break;
case MSR_CORE_PERF_GLOBAL_STATUS:
- *msr_content = core2_vpmu_cxt->global_ovf_status;
+ *msr_content = core2_vpmu_cxt->global_status;
break;
case MSR_CORE_PERF_GLOBAL_CTRL:
vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
{
const struct vpmu_struct *vpmu = vcpu_vpmu(v);
unsigned int i;
- const struct core2_vpmu_context *core2_vpmu_cxt = NULL;
+ const struct xen_pmu_intel_ctxt *core2_vpmu_cxt = vpmu->context;
u64 val;
+ uint64_t *fixed_counters;
+ struct xen_pmu_cntr_pair *cntr_pair;
- if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
+ if ( !core2_vpmu_cxt || !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
return;
if ( !vpmu_is_set(vpmu, VPMU_RUNNING) )
}
printk(" vPMU running\n");
- core2_vpmu_cxt = vpmu->context;
+
+ cntr_pair = vpmu_reg_pointer(core2_vpmu_cxt, arch_counters);
+ fixed_counters = vpmu_reg_pointer(core2_vpmu_cxt, fixed_counters);
/* Print the contents of the counter and its configuration msr. */
for ( i = 0; i < arch_pmc_cnt; i++ )
- {
- const struct arch_msr_pair *msr_pair = core2_vpmu_cxt->arch_msr_pair;
-
printk(" general_%d: 0x%016lx ctrl: 0x%016lx\n",
- i, msr_pair[i].counter, msr_pair[i].control);
- }
+ i, cntr_pair[i].counter, cntr_pair[i].control);
+
/*
* The configuration of the fixed counter is 4 bits each in the
* MSR_CORE_PERF_FIXED_CTR_CTRL.
for ( i = 0; i < fixed_pmc_cnt; i++ )
{
printk(" fixed_%d: 0x%016lx ctrl: %#lx\n",
- i, core2_vpmu_cxt->fix_counters[i],
+ i, fixed_counters[i],
val & FIXED_CTR_CTRL_MASK);
val >>= FIXED_CTR_CTRL_BITS;
}
struct vcpu *v = current;
u64 msr_content;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- struct core2_vpmu_context *core2_vpmu_cxt = vpmu->context;
+ struct xen_pmu_intel_ctxt *core2_vpmu_cxt = vpmu->context;
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, msr_content);
if ( msr_content )
{
if ( is_pmc_quirk )
handle_pmc_quirk(msr_content);
- core2_vpmu_cxt->global_ovf_status |= msr_content;
+ core2_vpmu_cxt->global_status |= msr_content;
msr_content = 0xC000000700000000 | ((1 << arch_pmc_cnt) - 1);
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, msr_content);
}
arch_pmc_cnt = core2_get_arch_pmc_count();
fixed_pmc_cnt = core2_get_fixed_pmc_count();
- if ( fixed_pmc_cnt > VPMU_CORE2_MAX_FIXED_PMCS )
- {
- fixed_pmc_cnt = VPMU_CORE2_MAX_FIXED_PMCS;
- printk(XENLOG_G_WARNING "Limiting number of fixed counters to %d\n",
- fixed_pmc_cnt);
- }
-
check_pmc_quirk();
return 0;
}
struct vpmu_struct *vpmu = vcpu_vpmu(v);
xfree(vpmu->context);
+ xfree(vpmu->priv_context);
if ( has_hvm_container_vcpu(v) && cpu_has_vmx_msr_bitmap )
core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
release_pmu_ownship(PMU_OWNER_HVM);
#include <asm/hvm/svm/svm.h>
#include <asm/hvm/svm/vmcb.h>
#include <asm/apic.h>
+#include <public/pmu.h>
+
+#include <compat/pmu.h>
+CHECK_pmu_cntr_pair;
+CHECK_pmu_data;
/*
* "vpmu" : vpmu generally enabled
uint8_t vendor = current_cpu_data.x86_vendor;
int ret;
+ BUILD_BUG_ON(sizeof(struct xen_pmu_intel_ctxt) > XENPMU_CTXT_PAD_SZ);
+ BUILD_BUG_ON(sizeof(struct xen_pmu_amd_ctxt) > XENPMU_CTXT_PAD_SZ);
+
if ( is_pvh_vcpu(v) )
return;
#include <asm/regs.h>
#include <asm/current.h>
#include <asm/hvm/vpmu.h>
-#include <asm/hvm/vmx/vpmu_core2.h>
#include "op_x86_model.h"
#include "op_counter.h"
+struct arch_msr_pair {
+ u64 counter;
+ u64 control;
+};
+
/*
* Intel "Architectural Performance Monitoring" CPUID
* detection/enumeration details:
compat/version.h \
compat/xen.h \
compat/xenoprof.h
+headers-$(CONFIG_X86) += compat/arch-x86/pmu.h
headers-$(CONFIG_X86) += compat/arch-x86/xen-mca.h
headers-$(CONFIG_X86) += compat/arch-x86/xen.h
headers-$(CONFIG_X86) += compat/arch-x86/xen-$(compat-arch-y).h
-headers-y += compat/arch-$(compat-arch-y).h compat/xlat.h
+headers-y += compat/arch-$(compat-arch-y).h compat/pmu.h compat/xlat.h
headers-$(FLASK_ENABLE) += compat/xsm/flask_op.h
cppflags-y := -include public/xen-compat.h
+++ /dev/null
-
-/*
- * vpmu_core2.h: CORE 2 specific PMU virtualization for HVM domain.
- *
- * Copyright (c) 2007, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Author: Haitao Shan <haitao.shan@intel.com>
- */
-
-#ifndef __ASM_X86_HVM_VPMU_CORE_H_
-#define __ASM_X86_HVM_VPMU_CORE_H_
-
-struct arch_msr_pair {
- u64 counter;
- u64 control;
-};
-
-#endif /* __ASM_X86_HVM_VPMU_CORE_H_ */
-
#ifndef __ASM_X86_HVM_VPMU_H_
#define __ASM_X86_HVM_VPMU_H_
+#include <public/pmu.h>
+
/*
* Flag bits given as a string on the hypervisor boot parameter 'vpmu'.
* See arch/x86/hvm/vpmu.c.
#define VPMU_BOOT_ENABLED 0x1 /* vpmu generally enabled. */
#define VPMU_BOOT_BTS 0x2 /* Intel BTS feature wanted. */
-
-#define msraddr_to_bitpos(x) (((x)&0xffff) + ((x)>>31)*0x2000)
#define vcpu_vpmu(vcpu) (&((vcpu)->arch.hvm_vcpu.vpmu))
#define vpmu_vcpu(vpmu) (container_of((vpmu), struct vcpu, \
arch.hvm_vcpu.vpmu))
-#define vpmu_domain(vpmu) (vpmu_vcpu(vpmu)->domain)
#define MSR_TYPE_COUNTER 0
#define MSR_TYPE_CTRL 1
#define MSR_TYPE_ARCH_COUNTER 3
#define MSR_TYPE_ARCH_CTRL 4
+/* Start of PMU register bank */
+#define vpmu_reg_pointer(ctxt, offset) ((void *)((uintptr_t)ctxt + \
+ (uintptr_t)ctxt->offset))
/* Arch specific operations shared by all vpmus */
struct arch_vpmu_ops {
u32 flags;
u32 last_pcpu;
u32 hw_lapic_lvtpc;
- void *context;
+ void *context; /* May be shared with PV guest */
+ void *priv_context; /* hypervisor-only */
struct arch_vpmu_ops *arch_vpmu_ops;
};
#define VPMU_FROZEN 0x10 /* Stop counters while VCPU is not running */
#define VPMU_PASSIVE_DOMAIN_ALLOCATED 0x20
-/* VPMU features */
-#define VPMU_CPU_HAS_DS 0x100 /* Has Debug Store */
-#define VPMU_CPU_HAS_BTS 0x200 /* Has Branch Trace Store */
-
-
static inline void vpmu_set(struct vpmu_struct *vpmu, const u32 mask)
{
vpmu->flags |= mask;
#endif
+#ifndef __ASSEMBLY__
+/* Stub definition of PMU structure */
+typedef struct xen_pmu_arch { uint8_t dummy; } xen_pmu_arch_t;
+#endif
+
#endif /* __XEN_PUBLIC_ARCH_ARM_H__ */
/*
--- /dev/null
+#ifndef __XEN_PUBLIC_ARCH_X86_PMU_H__
+#define __XEN_PUBLIC_ARCH_X86_PMU_H__
+
+/* x86-specific PMU definitions */
+
+/* AMD PMU registers and structures */
+struct xen_pmu_amd_ctxt {
+ /* Offsets to counter and control MSRs (relative to xen_pmu_arch.c.amd) */
+ uint32_t counters;
+ uint32_t ctrls;
+
+ /* Counter MSRs */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ uint64_t regs[];
+#elif defined(__GNUC__)
+ uint64_t regs[0];
+#endif
+};
+typedef struct xen_pmu_amd_ctxt xen_pmu_amd_ctxt_t;
+DEFINE_XEN_GUEST_HANDLE(xen_pmu_amd_ctxt_t);
+
+/* Intel PMU registers and structures */
+struct xen_pmu_cntr_pair {
+ uint64_t counter;
+ uint64_t control;
+};
+typedef struct xen_pmu_cntr_pair xen_pmu_cntr_pair_t;
+DEFINE_XEN_GUEST_HANDLE(xen_pmu_cntr_pair_t);
+
+struct xen_pmu_intel_ctxt {
+ /*
+ * Offsets to fixed and architectural counter MSRs (relative to
+ * xen_pmu_arch.c.intel)
+ */
+ uint32_t fixed_counters;
+ uint32_t arch_counters;
+
+ /* PMU registers */
+ uint64_t global_ctrl;
+ uint64_t global_ovf_ctrl;
+ uint64_t global_status;
+ uint64_t fixed_ctrl;
+ uint64_t ds_area;
+ uint64_t pebs_enable;
+ uint64_t debugctl;
+
+ /* Fixed and architectural counter MSRs */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ uint64_t regs[];
+#elif defined(__GNUC__)
+ uint64_t regs[0];
+#endif
+};
+typedef struct xen_pmu_intel_ctxt xen_pmu_intel_ctxt_t;
+DEFINE_XEN_GUEST_HANDLE(xen_pmu_intel_ctxt_t);
+
+/* Sampled domain's registers */
+struct xen_pmu_regs {
+ uint64_t ip;
+ uint64_t sp;
+ uint64_t flags;
+ uint16_t cs;
+ uint16_t ss;
+ uint8_t cpl;
+ uint8_t pad[3];
+};
+typedef struct xen_pmu_regs xen_pmu_regs_t;
+DEFINE_XEN_GUEST_HANDLE(xen_pmu_regs_t);
+
+/* PMU flags */
+#define PMU_CACHED (1<<0) /* PMU MSRs are cached in the context */
+
+/*
+ * Architecture-specific information describing state of the processor at
+ * the time of PMU interrupt.
+ * Fields of this structure marked as RW for guest should only be written by
+ * the guest when PMU_CACHED bit in pmu_flags is set (which is done by the
+ * hypervisor during PMU interrupt). Hypervisor will read updated data in
+ * XENPMU_flush hypercall and clear PMU_CACHED bit.
+ */
+struct xen_pmu_arch {
+ union {
+ /*
+ * Processor's registers at the time of interrupt.
+ * WO for hypervisor, RO for guests.
+ */
+ struct xen_pmu_regs regs;
+ /* Padding for adding new registers to xen_pmu_regs in the future */
+#define XENPMU_REGS_PAD_SZ 64
+ uint8_t pad[XENPMU_REGS_PAD_SZ];
+ } r;
+
+ /* WO for hypervisor, RO for guest */
+ uint64_t pmu_flags;
+
+ /* Placeholder for APIC LVTPC register */
+ uint64_t lvtpc_pad;
+
+ /* Placeholder for vendor-specific PMU registers */
+#define XENPMU_CTXT_PAD_SZ 128
+ uint64_t pmu_regs_pad[XENPMU_CTXT_PAD_SZ / 8];
+};
+typedef struct xen_pmu_arch xen_pmu_arch_t;
+DEFINE_XEN_GUEST_HANDLE(xen_pmu_arch_t);
+
+#endif /* __XEN_PUBLIC_ARCH_X86_PMU_H__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
+
--- /dev/null
+#ifndef __XEN_PUBLIC_PMU_H__
+#define __XEN_PUBLIC_PMU_H__
+
+#include "xen.h"
+#if defined(__i386__) || defined(__x86_64__)
+#include "arch-x86/pmu.h"
+#elif defined (__arm__) || defined (__aarch64__)
+#include "arch-arm.h"
+#else
+#error "Unsupported architecture"
+#endif
+
+#define XENPMU_VER_MAJ 0
+#define XENPMU_VER_MIN 1
+
+/*
+ * Shared PMU data between hypervisor and PV(H) domains.
+ *
+ * The hypervisor fills out this structure during PMU interrupt and sends an
+ * interrupt to appropriate VCPU.
+ * Architecture-independent fields of xen_pmu_data are WO for the hypervisor
+ * and RO for the guest but some fields in xen_pmu_arch can be writable
+ * by both the hypervisor and the guest (see arch-$arch/pmu.h).
+ */
+struct xen_pmu_data {
+ /* Interrupted VCPU */
+ uint32_t vcpu_id;
+
+ /*
+ * Physical processor on which the interrupt occurred. On non-privileged
+ * guests set to vcpu_id;
+ */
+ uint32_t pcpu_id;
+
+ /*
+ * Domain that was interrupted. On non-privileged guests set to DOMID_SELF.
+ * On privileged guests can be DOMID_SELF, DOMID_XEN, or, when in
+ * XENPMU_MODE_ALL mode, domain ID of another domain.
+ */
+ domid_t domain_id;
+
+ uint8_t pad[6];
+
+ /* Architecture-specific information */
+ struct xen_pmu_arch pmu;
+};
+
+#endif /* __XEN_PUBLIC_PMU_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
! mmuext_op xen.h
! start_info xen.h
? vcpu_time_info xen.h
+? pmu_amd_ctxt arch-x86/pmu.h
+? pmu_arch arch-x86/pmu.h
+? pmu_cntr_pair arch-x86/pmu.h
+? pmu_intel_ctxt arch-x86/pmu.h
+? pmu_regs arch-x86/pmu.h
! cpu_user_regs arch-x86/xen-@arch@.h
! trap_info arch-x86/xen.h
? cpu_offline_action arch-x86/xen-mca.h
? xenpf_pcpuinfo platform.h
? xenpf_pcpu_version platform.h
? xenpf_resource_entry platform.h
+? pmu_data pmu.h
! sched_poll sched.h
? sched_remote_shutdown sched.h
? sched_shutdown sched.h