const struct arch_vpmu_ops *ops;
int ret = 0;
- if ( likely(vpmu_mode == XENPMU_MODE_OFF) )
- goto nop;
+ if ( likely(vpmu_mode == XENPMU_MODE_OFF) ||
+ ((vpmu_mode & XENPMU_MODE_ALL) &&
+ !is_hardware_domain(current->domain)) )
+ goto nop;
vpmu = vcpu_vpmu(curr);
ops = vpmu->arch_vpmu_ops;
struct vlapic *vlapic;
u32 vlapic_lvtpc;
- /* dom0 will handle interrupt for special domains (e.g. idle domain) */
- if ( sampled->domain->domain_id >= DOMID_FIRST_RESERVED )
+ /*
+ * dom0 will handle interrupt for special domains (e.g. idle domain) or,
+ * in XENPMU_MODE_ALL, for everyone.
+ */
+ if ( (vpmu_mode & XENPMU_MODE_ALL) ||
+ (sampled->domain->domain_id >= DOMID_FIRST_RESERVED) )
{
sampling = choose_hwdom_vcpu();
if ( !sampling )
return;
/* PV(H) guest */
- if ( !is_hvm_vcpu(sampling) )
+ if ( !is_hvm_vcpu(sampling) || (vpmu_mode & XENPMU_MODE_ALL) )
{
const struct cpu_user_regs *cur_regs;
uint64_t *flags = &vpmu->xenpmu_data->pmu.pmu_flags;
- domid_t domid = DOMID_SELF;
+ domid_t domid;
if ( !vpmu->xenpmu_data )
return;
if ( is_pvh_vcpu(sampling) &&
+ !(vpmu_mode & XENPMU_MODE_ALL) &&
!vpmu->arch_vpmu_ops->do_interrupt(regs) )
return;
else
*flags = PMU_SAMPLE_PV;
+ if ( sampled == sampling )
+ domid = DOMID_SELF;
+ else
+ domid = sampled->domain->domain_id;
+
/* Store appropriate registers in xenpmu_data */
/* FIXME: 32-bit PVH should go here as well */
if ( is_pv_32bit_vcpu(sampling) )
if ( (vpmu_mode & XENPMU_MODE_SELF) )
cur_regs = guest_cpu_user_regs();
- else if ( !guest_mode(regs) && is_hardware_domain(sampling->domain) )
+ else if ( !guest_mode(regs) &&
+ is_hardware_domain(sampling->domain) )
{
cur_regs = regs;
domid = DOMID_XEN;
printk(XENLOG_G_WARNING "VPMU: Initialization failed for %pv\n", v);
/* Intel needs to initialize VPMU ops even if VPMU is not in use */
- if ( !is_priv_vpmu && (ret || (vpmu_mode == XENPMU_MODE_OFF)) )
+ if ( !is_priv_vpmu &&
+ (ret || (vpmu_mode == XENPMU_MODE_OFF) ||
+ (vpmu_mode == XENPMU_MODE_ALL)) )
{
spin_lock(&vpmu_lock);
vpmu_count--;
struct page_info *page;
uint64_t gfn = params->val;
- if ( vpmu_mode == XENPMU_MODE_OFF )
+ if ( (vpmu_mode == XENPMU_MODE_OFF) ||
+ ((vpmu_mode & XENPMU_MODE_ALL) && !is_hardware_domain(d)) )
return -EINVAL;
if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) )
{
case XENPMU_mode_set:
{
- if ( (pmu_params.val & ~(XENPMU_MODE_SELF | XENPMU_MODE_HV)) ||
+ if ( (pmu_params.val &
+ ~(XENPMU_MODE_SELF | XENPMU_MODE_HV | XENPMU_MODE_ALL)) ||
(hweight64(pmu_params.val) > 1) )
return -EINVAL;
/* 32-bit dom0 can only sample itself. */
- if ( is_pv_32bit_vcpu(current) && (pmu_params.val & XENPMU_MODE_HV) )
+ if ( is_pv_32bit_vcpu(current) &&
+ (pmu_params.val & (XENPMU_MODE_HV | XENPMU_MODE_ALL)) )
return -EINVAL;
spin_lock(&vpmu_lock);
case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5:
if ( vpmu_msr || (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) )
{
+ if ( (vpmu_mode & XENPMU_MODE_ALL) &&
+ !is_hardware_domain(v->domain) )
+ break;
+
if ( vpmu_do_wrmsr(regs->ecx, msr_content, 0) )
goto fail;
}
case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5:
if ( vpmu_msr || (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) )
{
+
+ if ( (vpmu_mode & XENPMU_MODE_ALL) &&
+ !is_hardware_domain(v->domain) )
+ {
+ /* Don't leak PMU MSRs to unprivileged domains */
+ regs->eax = regs->edx = 0;
+ break;
+ }
+
if ( vpmu_do_rdmsr(regs->ecx, &val) )
goto fail;