vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | (val & APIC_LVT_MASKED);
/* Postpone APIC updates for PV(H) guests if PMU interrupt is pending */
- if ( is_hvm_vcpu(curr) || !vpmu->xenpmu_data ||
+ if ( has_vlapic(curr->domain) || !vpmu->xenpmu_data ||
!vpmu_is_set(vpmu, VPMU_CACHED) )
apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
}
* and since do_wr/rdmsr may load VPMU context we should save
* (and unload) it again.
*/
- if ( !is_hvm_vcpu(curr) && vpmu->xenpmu_data &&
+ if ( !has_vlapic(curr->domain) && vpmu->xenpmu_data &&
vpmu_is_set(vpmu, VPMU_CACHED) )
{
vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
return;
/* PV(H) guest */
- if ( !is_hvm_vcpu(sampling) || (vpmu_mode & XENPMU_MODE_ALL) )
+ if ( !has_vlapic(sampling->domain) || (vpmu_mode & XENPMU_MODE_ALL) )
{
const struct cpu_user_regs *cur_regs;
uint64_t *flags = &vpmu->xenpmu_data->pmu.pmu_flags;
/* Only when PMU is counting, we load PMU context immediately. */
if ( !vpmu_is_set(vpmu, VPMU_RUNNING) ||
- (!is_hvm_vcpu(vpmu_vcpu(vpmu)) && vpmu_is_set(vpmu, VPMU_CACHED)) )
+ (!has_vlapic(vpmu_vcpu(vpmu)->domain) &&
+ vpmu_is_set(vpmu, VPMU_CACHED)) )
return 0;
if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load )
struct xen_pmu_data *xenpmu_data;
struct vpmu_struct *vpmu;
- if ( !opt_vpmu_enabled )
+ if ( !opt_vpmu_enabled || has_vlapic(current->domain) )
return -EOPNOTSUPP;
ret = xsm_pmu_op(XSM_OTHER, current->domain, op);
bool_t is_running = 0;
struct xen_pmu_amd_ctxt *guest_ctxt = &vpmu->xenpmu_data->pmu.c.amd;
- ASSERT(!is_hvm_vcpu(v));
+ ASSERT(!has_vlapic(v->domain));
ctxt = vpmu->context;
ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
{
struct xen_pmu_amd_ctxt *guest_ctxt, *ctxt;
- ASSERT(!is_hvm_vcpu(v));
+ ASSERT(!has_vlapic(v->domain));
ctxt = vpmu->context;
guest_ctxt = &vpmu->xenpmu_data->pmu.c.amd;
memcpy(&guest_ctxt->regs[0], &ctxt->regs[0], regs_sz);
vpmu->context = ctxt;
vpmu->priv_context = NULL;
- if ( !is_hvm_vcpu(v) )
+ if ( !has_vlapic(v->domain) )
{
/* Copy register offsets to shared area */
ASSERT(vpmu->xenpmu_data);
if ( to_guest )
{
- ASSERT(!is_hvm_vcpu(v));
+ ASSERT(!has_vlapic(v->domain));
memcpy((void *)(&vpmu->xenpmu_data->pmu.c.intel) + regs_off,
vpmu->context + regs_off, regs_sz);
}
{
int ret;
- ASSERT(!is_hvm_vcpu(v));
+ ASSERT(!has_vlapic(v->domain));
memcpy(vpmu->context + regs_off,
(void *)&v->arch.vpmu.xenpmu_data->pmu.c.intel + regs_off,
vpmu->context = core2_vpmu_cxt;
vpmu->priv_context = p;
- if ( !is_hvm_vcpu(v) )
+ if ( !has_vlapic(v->domain) )
{
/* Copy fixed/arch register offsets to shared area */
ASSERT(vpmu->xenpmu_data);