void arch_domain_destroy(struct domain *d)
{
/* IOMMU page table is shared with P2M, always call
- * iommu_domain_destroy() before p2m_teardown().
+ * iommu_domain_destroy() before p2m_final_teardown().
*/
iommu_domain_destroy(d);
- p2m_teardown(d);
+ p2m_final_teardown(d);
domain_vgic_free(d);
domain_vuart_free(d);
free_xenheap_page(d->shared_info);
if ( ret )
return ret;
+ d->arch.relmem = RELMEM_p2m;
+ /* Fallthrough */
+
+ case RELMEM_p2m:
+ ret = p2m_teardown(d);
+ if ( ret )
+ return ret;
+
d->arch.relmem = RELMEM_done;
/* Fallthrough */
spin_unlock(&vmid_alloc_lock);
}
-void p2m_teardown(struct domain *d)
+int p2m_teardown(struct domain *d)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ unsigned long count = 0;
struct page_info *pg;
+ unsigned int i;
+ int rc = 0;
+
+ p2m_write_lock(p2m);
+
+ /*
+ * We are about to free the intermediate page-tables, so clear the
+ * root to prevent any walk to use them.
+ */
+ for ( i = 0; i < P2M_ROOT_PAGES; i++ )
+ clear_and_clean_page(p2m->root + i);
+
+ /*
+ * The domain will not be scheduled anymore, so in theory we should
+ * not need to flush the TLBs. Do it for safety purpose.
+ *
+ * Note that all the devices have already been de-assigned. So we don't
+ * need to flush the IOMMU TLB here.
+ */
+ p2m_force_tlb_flush_sync(p2m);
+
+ while ( (pg = page_list_remove_head(&p2m->pages)) )
+ {
+ free_domheap_page(pg);
+ count++;
+ /* Arbitrarily preempt every 512 iterations */
+ if ( !(count % 512) && hypercall_preempt_check() )
+ {
+ rc = -ERESTART;
+ break;
+ }
+ }
+
+ p2m_write_unlock(p2m);
+
+ return rc;
+}
+
+void p2m_final_teardown(struct domain *d)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
/* p2m not actually initialized */
if ( !p2m->domain )
return;
- while ( (pg = page_list_remove_head(&p2m->pages)) )
- free_domheap_page(pg);
+ ASSERT(page_list_empty(&p2m->pages));
if ( p2m->root )
free_domheap_pages(p2m->root, P2M_ROOT_ORDER);
/* Init the datastructures for later use by the p2m code */
int p2m_init(struct domain *d);
-/* Return all the p2m resources to Xen. */
-void p2m_teardown(struct domain *d);
+/*
+ * The P2M resources are freed in two parts:
+ * - p2m_teardown() will be called when relinquish the resources. It
+ * will free large resources (e.g. intermediate page-tables) that
+ * requires preemption.
+ * - p2m_final_teardown() will be called when domain struct is been
+ * freed. This *cannot* be preempted and therefore one small
+ * resources should be freed here.
+ */
+int p2m_teardown(struct domain *d);
+void p2m_final_teardown(struct domain *d);
/*
* Remove mapping refcount on each mapping page in the p2m