u64 counters[MAX_NUM_COUNTERS];
u64 ctrls[MAX_NUM_COUNTERS];
u32 hw_lapic_lvtpc;
+ bool_t msr_bitmap_set;
};
static inline int get_pmu_reg_type(u32 addr)
return addr;
}
+static void amd_vpmu_set_msr_bitmap(struct vcpu *v)
+{
+ unsigned int i;
+ struct vpmu_struct *vpmu = vcpu_vpmu(v);
+ struct amd_vpmu_context *ctxt = vpmu->context;
+
+ for ( i = 0; i < num_counters; i++ )
+ {
+ svm_intercept_msr(v, counters[i], MSR_INTERCEPT_NONE);
+ svm_intercept_msr(v, ctrls[i], MSR_INTERCEPT_WRITE);
+ }
+
+ ctxt->msr_bitmap_set = 1;
+}
+
+static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
+{
+ unsigned int i;
+ struct vpmu_struct *vpmu = vcpu_vpmu(v);
+ struct amd_vpmu_context *ctxt = vpmu->context;
+
+ for ( i = 0; i < num_counters; i++ )
+ {
+ svm_intercept_msr(v, counters[i], MSR_INTERCEPT_RW);
+ svm_intercept_msr(v, ctrls[i], MSR_INTERCEPT_RW);
+ }
+
+ ctxt->msr_bitmap_set = 0;
+}
+
static int amd_vpmu_do_interrupt(struct cpu_user_regs *regs)
{
struct vcpu *v = current;
return;
context_save(v);
+
+ if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && ctx->msr_bitmap_set )
+ amd_vpmu_unset_msr_bitmap(v);
+
ctx->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
apic_write(APIC_LVTPC, ctx->hw_lapic_lvtpc | APIC_LVT_MASKED);
}
return 1;
vpmu_set(vpmu, VPMU_RUNNING);
apic_write(APIC_LVTPC, PMU_APIC_VECTOR);
+
+ if ( !((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
+ amd_vpmu_set_msr_bitmap(v);
}
/* stop saving & restore if guest stops first counter */
{
apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
vpmu_reset(vpmu, VPMU_RUNNING);
+ if ( ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
+ amd_vpmu_unset_msr_bitmap(v);
release_pmu_ownship(PMU_OWNER_HVM);
}
if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
return;
+ if ( ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
+ amd_vpmu_unset_msr_bitmap(v);
+
xfree(vpmu->context);
vpmu_reset(vpmu, VPMU_CONTEXT_ALLOCATED);