core2_no_vpmu_ops exists solely to work around the default-leaking of CPUID/MSR
values in Xen.
With CPUID handling removed from arch_vpmu_ops, the RDMSR handling is the last
remaining hook. Since core2_no_vpmu_ops's introduction in c/s
25250ed7 "vpmu
intel: Add cpuid handling when vpmu disabled", a lot of work has been done and
the nop path in vpmu_do_msr() now suffices.
vpmu_do_msr() also falls into the nop path for un-configured or unprivileged
domains, which enables the removal the duplicate logic in priv_op_read_msr().
Finally, make all arch_vpmu_ops structures const as they are never modified,
and make them static as they are not referred to externally.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
const struct arch_vpmu_ops *ops;
int ret = 0;
+ /*
+ * Hide the PMU MSRs if vpmu is not configured, or the hardware domain is
+ * profiling the whole system.
+ */
if ( likely(vpmu_mode == XENPMU_MODE_OFF) ||
((vpmu_mode & XENPMU_MODE_ALL) &&
- !is_hardware_domain(current->domain)) )
+ !is_hardware_domain(curr->domain)) )
goto nop;
vpmu = vcpu_vpmu(curr);
}
}
-struct arch_vpmu_ops amd_vpmu_ops = {
+static const struct arch_vpmu_ops amd_vpmu_ops = {
.do_wrmsr = amd_vpmu_do_wrmsr,
.do_rdmsr = amd_vpmu_do_rdmsr,
.do_interrupt = amd_vpmu_do_interrupt,
vpmu_clear(vpmu);
}
-struct arch_vpmu_ops core2_vpmu_ops = {
+static const struct arch_vpmu_ops core2_vpmu_ops = {
.do_wrmsr = core2_vpmu_do_wrmsr,
.do_rdmsr = core2_vpmu_do_rdmsr,
.do_interrupt = core2_vpmu_do_interrupt,
.arch_vpmu_dump = core2_vpmu_dump
};
-/*
- * If its a vpmu msr set it to 0.
- */
-static int core2_no_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
-{
- int type = -1, index = -1;
- if ( !is_core2_vpmu_msr(msr, &type, &index) )
- return -EINVAL;
- *msr_content = 0;
- return 0;
-}
-
-/*
- * These functions are used in case vpmu is not enabled.
- */
-struct arch_vpmu_ops core2_no_vpmu_ops = {
- .do_rdmsr = core2_no_vpmu_do_rdmsr,
-};
-
int vmx_vpmu_initialise(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
u64 msr_content;
static bool_t ds_warned;
- vpmu->arch_vpmu_ops = &core2_no_vpmu_ops;
if ( vpmu_mode == XENPMU_MODE_OFF )
return 0;
case MSR_K7_EVNTSEL0...MSR_K7_PERFCTR3:
if ( vpmu_msr || (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) )
{
- /* Don't leak PMU MSRs to unprivileged domains. */
- if ( (vpmu_mode & XENPMU_MODE_ALL) &&
- !is_hardware_domain(currd) )
- *val = 0;
- else if ( vpmu_do_rdmsr(reg, val) )
+ if ( vpmu_do_rdmsr(reg, val) )
break;
return X86EMUL_OKAY;
}
u32 hw_lapic_lvtpc;
void *context; /* May be shared with PV guest */
void *priv_context; /* hypervisor-only */
- struct arch_vpmu_ops *arch_vpmu_ops;
+ const struct arch_vpmu_ops *arch_vpmu_ops;
struct xen_pmu_data *xenpmu_data;
spinlock_t vpmu_lock;
};