return 0;
}
-void vpmu_initialise(struct vcpu *v)
+static int vpmu_arch_initialise(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
uint8_t vendor = current_cpu_data.x86_vendor;
int ret;
- bool_t is_priv_vpmu = is_hardware_domain(v->domain);
BUILD_BUG_ON(sizeof(struct xen_pmu_intel_ctxt) > XENPMU_CTXT_PAD_SZ);
BUILD_BUG_ON(sizeof(struct xen_pmu_amd_ctxt) > XENPMU_CTXT_PAD_SZ);
BUILD_BUG_ON(sizeof(struct xen_pmu_regs) > XENPMU_REGS_PAD_SZ);
BUILD_BUG_ON(sizeof(struct compat_pmu_regs) > XENPMU_REGS_PAD_SZ);
- ASSERT(!vpmu->flags && !vpmu->context);
+ ASSERT(!(vpmu->flags & ~VPMU_AVAILABLE) && !vpmu->context);
- if ( !is_priv_vpmu )
- {
- /*
- * Count active VPMUs so that we won't try to change vpmu_mode while
- * they are in use.
- * vpmu_mode can be safely updated while dom0's VPMUs are active and
- * so we don't need to include it in the count.
- */
- spin_lock(&vpmu_lock);
- vpmu_count++;
- spin_unlock(&vpmu_lock);
- }
+ if ( !vpmu_available(v) )
+ return 0;
switch ( vendor )
{
opt_vpmu_enabled = 0;
vpmu_mode = XENPMU_MODE_OFF;
}
- return; /* Don't bother restoring vpmu_count, VPMU is off forever */
+ return -EINVAL;
}
vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED;
if ( ret )
printk(XENLOG_G_WARNING "VPMU: Initialization failed for %pv\n", v);
- /* Intel needs to initialize VPMU ops even if VPMU is not in use */
- if ( !is_priv_vpmu &&
- (ret || (vpmu_mode == XENPMU_MODE_OFF) ||
- (vpmu_mode == XENPMU_MODE_ALL)) )
+ return ret;
+}
+
+static void get_vpmu(struct vcpu *v)
+{
+ spin_lock(&vpmu_lock);
+
+ /*
+ * Keep count of VPMUs in the system so that we won't try to change
+ * vpmu_mode while a guest might be using one.
+ * vpmu_mode can be safely updated while dom0's VPMUs are active and
+ * so we don't need to include it in the count.
+ */
+ if ( !is_hardware_domain(v->domain) &&
+ (vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV)) )
+ {
+ vpmu_count++;
+ vpmu_set(vcpu_vpmu(v), VPMU_AVAILABLE);
+ }
+ else if ( is_hardware_domain(v->domain) &&
+ (vpmu_mode != XENPMU_MODE_OFF) )
+ vpmu_set(vcpu_vpmu(v), VPMU_AVAILABLE);
+
+ spin_unlock(&vpmu_lock);
+}
+
+static void put_vpmu(struct vcpu *v)
+{
+ spin_lock(&vpmu_lock);
+
+ if ( !vpmu_available(v) )
+ goto out;
+
+ if ( !is_hardware_domain(v->domain) &&
+ (vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV)) )
{
- spin_lock(&vpmu_lock);
vpmu_count--;
- spin_unlock(&vpmu_lock);
+ vpmu_reset(vcpu_vpmu(v), VPMU_AVAILABLE);
}
+ else if ( is_hardware_domain(v->domain) &&
+ (vpmu_mode != XENPMU_MODE_OFF) )
+ vpmu_reset(vcpu_vpmu(v), VPMU_AVAILABLE);
+
+ out:
+ spin_unlock(&vpmu_lock);
+}
+
+void vpmu_initialise(struct vcpu *v)
+{
+ get_vpmu(v);
+
+ /*
+ * Guests without LAPIC (i.e. PV) call vpmu_arch_initialise()
+ * from pvpmu_init().
+ */
+ if ( has_vlapic(v->domain) && vpmu_arch_initialise(v) )
+ put_vpmu(v);
}
static void vpmu_clear_last(void *arg)
this_cpu(last_vcpu) = NULL;
}
-void vpmu_destroy(struct vcpu *v)
+static void vpmu_arch_destroy(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
vpmu_save_force, v, 1);
vpmu->arch_vpmu_ops->arch_vpmu_destroy(v);
}
+}
- spin_lock(&vpmu_lock);
- if ( !is_hardware_domain(v->domain) )
- vpmu_count--;
- spin_unlock(&vpmu_lock);
+void vpmu_destroy(struct vcpu *v)
+{
+ vpmu_arch_destroy(v);
+
+ put_vpmu(v);
}
static int pvpmu_init(struct domain *d, xen_pmu_params_t *params)
struct page_info *page;
uint64_t gfn = params->val;
- if ( (vpmu_mode == XENPMU_MODE_OFF) ||
- ((vpmu_mode & XENPMU_MODE_ALL) && !is_hardware_domain(d)) )
- return -EINVAL;
-
if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) )
return -EINVAL;
+ v = d->vcpu[params->vcpu];
+ vpmu = vcpu_vpmu(v);
+
+ if ( !vpmu_available(v) )
+ return -ENOENT;
+
page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
if ( !page )
return -EINVAL;
return -EINVAL;
}
- v = d->vcpu[params->vcpu];
- vpmu = vcpu_vpmu(v);
-
spin_lock(&vpmu->vpmu_lock);
if ( v->arch.vpmu.xenpmu_data )
return -ENOMEM;
}
- vpmu_initialise(v);
+ if ( vpmu_arch_initialise(v) )
+ put_vpmu(v);
spin_unlock(&vpmu->vpmu_lock);
vpmu = vcpu_vpmu(v);
spin_lock(&vpmu->vpmu_lock);
- vpmu_destroy(v);
+ vpmu_arch_destroy(v);
xenpmu_data = vpmu->xenpmu_data;
vpmu->xenpmu_data = NULL;
#define vcpu_vpmu(vcpu) (&(vcpu)->arch.vpmu)
#define vpmu_vcpu(vpmu) container_of((vpmu), struct vcpu, arch.vpmu)
-#define vpmu_enabled(vcpu) vpmu_is_set(vcpu_vpmu(vcpu), VPMU_CONTEXT_ALLOCATED)
+#define vpmu_available(vcpu) vpmu_is_set(vcpu_vpmu(vcpu), VPMU_AVAILABLE)
#define MSR_TYPE_COUNTER 0
#define MSR_TYPE_CTRL 1
#define VPMU_PASSIVE_DOMAIN_ALLOCATED 0x20
/* PV(H) guests: VPMU registers are accessed by guest from shared page */
#define VPMU_CACHED 0x40
+#define VPMU_AVAILABLE 0x80
/* Intel-specific VPMU features */
#define VPMU_CPU_HAS_DS 0x100 /* Has Debug Store */
}
static inline void vpmu_clear(struct vpmu_struct *vpmu)
{
- vpmu->flags = 0;
+ /* VPMU_AVAILABLE should be altered by get/put_vpmu(). */
+ vpmu->flags &= VPMU_AVAILABLE;
}
static inline bool_t vpmu_is_set(const struct vpmu_struct *vpmu, const u32 mask)
{