#include <xen/domain_page.h>
#include <xen/guest_access.h>
#include <xen/keyhandler.h>
+#include <asm/altp2m.h>
#include <asm/event.h>
#include <asm/page.h>
#include <asm/current.h>
unsigned int i;
if ( hvm_altp2m_supported() )
- {
- d->arch.altp2m_active = 0;
-
- if ( d->arch.altp2m_eptp )
- {
- free_xenheap_page(d->arch.altp2m_eptp);
- d->arch.altp2m_eptp = NULL;
- }
-
for ( i = 0; i < MAX_ALTP2M; i++ )
p2m_teardown(d->arch.altp2m_p2m[i], true);
- }
/* Destroy nestedp2m's first */
for (i = 0; i < MAX_NESTEDP2M; i++) {
paging_lock(d);
hap_set_allocation(d, 0, NULL);
ASSERT(d->arch.paging.hap.p2m_pages == 0);
+ ASSERT(d->arch.paging.hap.free_pages == 0);
+ ASSERT(d->arch.paging.hap.total_pages == 0);
paging_unlock(d);
}
{
struct vcpu *v;
mfn_t mfn;
+ unsigned int i;
ASSERT(d->is_dying);
ASSERT(d != current->domain);
}
}
+ paging_unlock(d);
+
+ /* Leave the root pt in case we get further attempts to modify the p2m. */
+ if ( hvm_altp2m_supported() )
+ {
+ if ( altp2m_active(d) )
+ for_each_vcpu ( d, v )
+ altp2m_vcpu_disable_ve(v);
+
+ d->arch.altp2m_active = 0;
+
+ FREE_XENHEAP_PAGE(d->arch.altp2m_eptp);
+
+ for ( i = 0; i < MAX_ALTP2M; i++ )
+ p2m_teardown(d->arch.altp2m_p2m[i], false);
+ }
+
+ /* Destroy nestedp2m's after altp2m. */
+ for ( i = 0; i < MAX_NESTEDP2M; i++ )
+ p2m_teardown(d->arch.nested_p2m[i], false);
+
+ p2m_teardown(p2m_get_hostp2m(d), false);
+
+ paging_lock(d);
+
if ( d->arch.paging.hap.total_pages != 0 )
{
hap_set_allocation(d, 0, preempted);
}
}
+ paging_unlock(d);
+
+ p2m_teardown(p2m_get_hostp2m(d), false);
+
+ paging_lock(d);
+
+ /*
+ * Reclaim all shadow memory so that shadow_set_allocation() doesn't find
+ * in-use pages, as _shadow_prealloc() will no longer try to reclaim pages
+ * because the domain is dying.
+ */
+ shadow_blow_tables(d);
+
#if (SHADOW_OPTIMIZATIONS & (SHOPT_VIRTUAL_TLB|SHOPT_OUT_OF_SYNC))
/* Free the virtual-TLB array attached to each vcpu */
for_each_vcpu(d, v)
d->arch.paging.shadow.total_pages,
d->arch.paging.shadow.free_pages,
d->arch.paging.shadow.p2m_pages);
+ ASSERT(!d->arch.paging.shadow.total_pages);
+ ASSERT(!d->arch.paging.shadow.free_pages);
+ ASSERT(!d->arch.paging.shadow.p2m_pages);
paging_unlock(d);
}