}
if ( d->arch.paging.hap.total_pages != 0 )
- hap_teardown(d);
+ hap_teardown(d, NULL);
p2m_teardown(p2m_get_hostp2m(d));
/* Free any memory that the p2m teardown released */
paging_unlock(d);
}
-void hap_teardown(struct domain *d)
+void hap_teardown(struct domain *d, int *preempted)
{
struct vcpu *v;
mfn_t mfn;
if ( d->arch.paging.hap.total_pages != 0 )
{
- HAP_PRINTK("teardown of domain %u starts."
- " pages total = %u, free = %u, p2m=%u\n",
- d->domain_id,
- d->arch.paging.hap.total_pages,
- d->arch.paging.hap.free_pages,
- d->arch.paging.hap.p2m_pages);
- hap_set_allocation(d, 0, NULL);
- HAP_PRINTK("teardown done."
- " pages total = %u, free = %u, p2m=%u\n",
- d->arch.paging.hap.total_pages,
- d->arch.paging.hap.free_pages,
- d->arch.paging.hap.p2m_pages);
+ hap_set_allocation(d, 0, preempted);
+
+ if ( preempted && *preempted )
+ goto out;
+
ASSERT(d->arch.paging.hap.total_pages == 0);
}
xfree(d->arch.hvm_domain.dirty_vram);
d->arch.hvm_domain.dirty_vram = NULL;
+out:
paging_unlock(d);
}
/* Call when destroying a domain */
int paging_teardown(struct domain *d)
{
- int rc;
+ int rc, preempted = 0;
if ( hap_enabled(d) )
- hap_teardown(d);
+ hap_teardown(d, &preempted);
else
- shadow_teardown(d);
+ shadow_teardown(d, &preempted);
+
+ if ( preempted )
+ return -ERESTART;
/* clean up log dirty resources. */
rc = paging_free_log_dirty_bitmap(d, 0);
return rv;
}
-void shadow_teardown(struct domain *d)
+void shadow_teardown(struct domain *d, int *preempted)
/* Destroy the shadow pagetables of this domain and free its shadow memory.
* Should only be called for dying domains. */
{
if ( d->arch.paging.shadow.total_pages != 0 )
{
- SHADOW_PRINTK("teardown of domain %u starts."
- " Shadow pages total = %u, free = %u, p2m=%u\n",
- d->domain_id,
- d->arch.paging.shadow.total_pages,
- d->arch.paging.shadow.free_pages,
- d->arch.paging.shadow.p2m_pages);
/* Destroy all the shadows and release memory to domheap */
- sh_set_allocation(d, 0, NULL);
+ sh_set_allocation(d, 0, preempted);
+
+ if ( preempted && *preempted )
+ goto out;
+
/* Release the hash table back to xenheap */
if (d->arch.paging.shadow.hash_table)
shadow_hash_teardown(d);
- /* Should not have any more memory held */
- SHADOW_PRINTK("teardown done."
- " Shadow pages total = %u, free = %u, p2m=%u\n",
- d->arch.paging.shadow.total_pages,
- d->arch.paging.shadow.free_pages,
- d->arch.paging.shadow.p2m_pages);
+
ASSERT(d->arch.paging.shadow.total_pages == 0);
}
d->arch.hvm_domain.dirty_vram = NULL;
}
+out:
paging_unlock(d);
/* Must be called outside the lock */
* It is possible for a domain that never got domain_kill()ed
* to get here with its shadow allocation intact. */
if ( d->arch.paging.shadow.total_pages != 0 )
- shadow_teardown(d);
+ shadow_teardown(d, NULL);
/* It is now safe to pull down the p2m map. */
p2m_teardown(p2m_get_hostp2m(d));
XEN_GUEST_HANDLE_PARAM(void) u_domctl);
int hap_enable(struct domain *d, u32 mode);
void hap_final_teardown(struct domain *d);
-void hap_teardown(struct domain *d);
+void hap_teardown(struct domain *d, int *preempted);
void hap_vcpu_init(struct vcpu *v);
int hap_track_dirty_vram(struct domain *d,
unsigned long begin_pfn,
XEN_GUEST_HANDLE_PARAM(void) u_domctl);
/* Call when destroying a domain */
-void shadow_teardown(struct domain *d);
+void shadow_teardown(struct domain *d, int *preempted);
/* Call once all of the references to the domain have gone away */
void shadow_final_teardown(struct domain *d);
#else /* !CONFIG_SHADOW_PAGING */
-#define shadow_teardown(d) ASSERT(is_pv_domain(d))
+#define shadow_teardown(d, p) ASSERT(is_pv_domain(d))
#define shadow_final_teardown(d) ASSERT(is_pv_domain(d))
#define shadow_enable(d, mode) \
({ ASSERT(is_pv_domain(d)); -EOPNOTSUPP; })