Add a new parameter to p2m_teardown() in order to select whether the
root page table should also be freed. Note that all users are
adjusted to pass the parameter to remove the root page tables, so
behavior is not modified.
No functional change intended.
This is part of CVE-2022-33746 / XSA-410.
Suggested-by: Julien Grall <julien@xen.org>
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Tim Deegan <tim@xen.org>
master commit:
1df52a270225527ae27bfa2fc40347bf93b78357
master date: 2022-10-11 14:21:23 +0200
}
for ( i = 0; i < MAX_ALTP2M; i++ )
- p2m_teardown(d->arch.altp2m_p2m[i]);
+ p2m_teardown(d->arch.altp2m_p2m[i], true);
}
/* Destroy nestedp2m's first */
for (i = 0; i < MAX_NESTEDP2M; i++) {
- p2m_teardown(d->arch.nested_p2m[i]);
+ p2m_teardown(d->arch.nested_p2m[i], true);
}
if ( d->arch.paging.hap.total_pages != 0 )
hap_teardown(d, NULL);
- p2m_teardown(p2m_get_hostp2m(d));
+ p2m_teardown(p2m_get_hostp2m(d), true);
/* Free any memory that the p2m teardown released */
paging_lock(d);
hap_set_allocation(d, 0, NULL);
* hvm fixme: when adding support for pvh non-hardware domains, this path must
* cleanup any foreign p2m types (release refcnts on them).
*/
-void p2m_teardown(struct p2m_domain *p2m)
+void p2m_teardown(struct p2m_domain *p2m, bool remove_root)
/* Return all the p2m pages to Xen.
* We know we don't have any extra mappings to these pages */
{
- struct page_info *pg;
+ struct page_info *pg, *root_pg = NULL;
struct domain *d;
if (p2m == NULL)
p2m_lock(p2m);
ASSERT(atomic_read(&d->shr_pages) == 0);
- p2m->phys_table = pagetable_null();
+
+ if ( remove_root )
+ p2m->phys_table = pagetable_null();
+ else if ( !pagetable_is_null(p2m->phys_table) )
+ {
+ root_pg = pagetable_get_page(p2m->phys_table);
+ clear_domain_page(pagetable_get_mfn(p2m->phys_table));
+ }
while ( (pg = page_list_remove_head(&p2m->pages)) )
- d->arch.paging.free_page(d, pg);
+ if ( pg != root_pg )
+ d->arch.paging.free_page(d, pg);
+
+ if ( root_pg )
+ page_list_add(root_pg, &p2m->pages);
+
p2m_unlock(p2m);
}
paging_unlock(d);
out_unlocked:
if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m)) )
- p2m_teardown(p2m);
+ p2m_teardown(p2m, true);
if ( rv != 0 && pg != NULL )
{
pg->count_info &= ~PGC_count_mask;
shadow_teardown(d, NULL);
/* It is now safe to pull down the p2m map. */
- p2m_teardown(p2m_get_hostp2m(d));
+ p2m_teardown(p2m_get_hostp2m(d), true);
/* Free any shadow memory that the p2m teardown released */
paging_lock(d);
shadow_set_allocation(d, 0, NULL);
int p2m_alloc_table(struct p2m_domain *p2m);
/* Return all the p2m resources to Xen. */
-void p2m_teardown(struct p2m_domain *p2m);
+void p2m_teardown(struct p2m_domain *p2m, bool remove_root);
void p2m_final_teardown(struct domain *d);
/* Add a page to a domain's p2m table */