vpmu->arch_vpmu_ops->arch_vpmu_destroy(v);
}
+
+ vpmu_reset(vpmu, VPMU_CONTEXT_ALLOCATED);
}
-void vpmu_destroy(struct vcpu *v)
+static void vpmu_cleanup(struct vcpu *v)
{
+ struct vpmu_struct *vpmu = vcpu_vpmu(v);
+ void *xenpmu_data;
+
+ spin_lock(&vpmu->vpmu_lock);
+
vpmu_arch_destroy(v);
+ xenpmu_data = vpmu->xenpmu_data;
+ vpmu->xenpmu_data = NULL;
+
+ spin_unlock(&vpmu->vpmu_lock);
+
+ if ( xenpmu_data )
+ {
+ mfn_t mfn = domain_page_map_to_mfn(xenpmu_data);
+
+ ASSERT(mfn_valid(mfn));
+ unmap_domain_page_global(xenpmu_data);
+ put_page_and_type(mfn_to_page(mfn));
+ }
+}
+
+void vpmu_destroy(struct vcpu *v)
+{
+ vpmu_cleanup(v);
put_vpmu(v);
}
static void pvpmu_finish(struct domain *d, xen_pmu_params_t *params)
{
struct vcpu *v;
- struct vpmu_struct *vpmu;
- mfn_t mfn;
- void *xenpmu_data;
if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) )
return;
if ( v != current )
vcpu_pause(v);
- vpmu = vcpu_vpmu(v);
- spin_lock(&vpmu->vpmu_lock);
-
- vpmu_arch_destroy(v);
- xenpmu_data = vpmu->xenpmu_data;
- vpmu->xenpmu_data = NULL;
-
- spin_unlock(&vpmu->vpmu_lock);
-
- if ( xenpmu_data )
- {
- mfn = domain_page_map_to_mfn(xenpmu_data);
- ASSERT(mfn_valid(mfn));
- unmap_domain_page_global(xenpmu_data);
- put_page_and_type(mfn_to_page(mfn));
- }
+ vpmu_cleanup(v);
if ( v != current )
vcpu_unpause(v);
xfree(v->arch.msrs);
v->arch.msrs = NULL;
- if ( !is_idle_domain(v->domain) )
- vpmu_destroy(v);
-
if ( is_hvm_vcpu(v) )
hvm_vcpu_destroy(v);
else
PROGRESS(vcpu_pagetables):
- /* Drop the in-use references to page-table bases. */
+ /*
+ * Drop the in-use references to page-table bases and clean
+ * up vPMU instances.
+ */
for_each_vcpu ( d, v )
{
ret = vcpu_destroy_pagetables(v);
if ( ret )
return ret;
+
+ vpmu_destroy(v);
}
if ( altp2m_active(d) )