]> xenbits.xensource.com Git - xen.git/commitdiff
[IA64] Prevent softlock when destroying VTi domain
authorAlex Williamson <alex.williamson@hp.com>
Tue, 23 Oct 2007 16:21:31 +0000 (10:21 -0600)
committerAlex Williamson <alex.williamson@hp.com>
Tue, 23 Oct 2007 16:21:31 +0000 (10:21 -0600)
Prevent softlock up when VTi domain destruction by making
relinquish_memory() continuable.  It was assumed that
mm_teardown() frees most of page_list so that the list which
is passed to relinquish_memory() is short.  However the
assumption isn't true for VTi domain case because qemu-dm
maps all the domain pages.  To avoid softlock up message,
make relinquish_memory() continuable.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
xen/arch/ia64/xen/domain.c
xen/include/asm-ia64/domain.h

index f8e3bff5ebcd3e34bdd8afb0864383eca84f4a29..5d41234cc80a17cd67a8295d3b4c5c5f010356a8 100644 (file)
@@ -584,7 +584,9 @@ int arch_domain_create(struct domain *d)
                goto fail_nomem;
 
        memset(&d->arch.mm, 0, sizeof(d->arch.mm));
+       d->arch.relres = RELRES_not_started;
        d->arch.mm_teardown_offset = 0;
+       INIT_LIST_HEAD(&d->arch.relmem_list);
 
        if ((d->arch.mm.pgd = pgd_alloc(&d->arch.mm)) == NULL)
            goto fail_nomem;
@@ -1495,13 +1497,14 @@ int arch_set_info_guest(struct vcpu *v, vcpu_guest_context_u c)
        return rc;
 }
 
-static void relinquish_memory(struct domain *d, struct list_head *list)
+static int relinquish_memory(struct domain *d, struct list_head *list)
 {
     struct list_head *ent;
     struct page_info *page;
 #ifndef __ia64__
     unsigned long     x, y;
 #endif
+    int               ret = 0;
 
     /* Use a recursive lock, as we may enter 'free_domheap_page'. */
     spin_lock_recursive(&d->page_alloc_lock);
@@ -1514,6 +1517,7 @@ static void relinquish_memory(struct domain *d, struct list_head *list)
         {
             /* Couldn't get a reference -- someone is freeing this page. */
             ent = ent->next;
+            list_move_tail(&page->list, &d->arch.relmem_list);
             continue;
         }
 
@@ -1550,35 +1554,72 @@ static void relinquish_memory(struct domain *d, struct list_head *list)
         /* Follow the list chain and /then/ potentially free the page. */
         ent = ent->next;
         BUG_ON(get_gpfn_from_mfn(page_to_mfn(page)) != INVALID_M2P_ENTRY);
+        list_move_tail(&page->list, &d->arch.relmem_list);
         put_page(page);
+
+        if (hypercall_preempt_check()) {
+                ret = -EAGAIN;
+                goto out;
+        }
     }
 
+    list_splice_init(&d->arch.relmem_list, list);
+
+ out:
     spin_unlock_recursive(&d->page_alloc_lock);
+    return ret;
 }
 
 int domain_relinquish_resources(struct domain *d)
 {
-    int ret;
-    /* Relinquish guest resources for VT-i domain. */
-    if (d->arch.is_vti)
-           vmx_relinquish_guest_resources(d);
+       int ret = 0;
 
-    /* Tear down shadow mode stuff. */
-    ret = mm_teardown(d);
-    if (ret != 0)
-        return ret;
+       switch (d->arch.relres) {
+       case RELRES_not_started:
+               /* Relinquish guest resources for VT-i domain. */
+               if (d->arch.is_vti)
+                       vmx_relinquish_guest_resources(d);
+               d->arch.relres = RELRES_mm_teardown;
+               /*fallthrough*/
+
+       case RELRES_mm_teardown:
+               /* Tear down shadow mode stuff. */
+               ret = mm_teardown(d);
+               if (ret != 0)
+                       return ret;
+               d->arch.relres = RELRES_xen;
+               /* fallthrough */
+
+       case RELRES_xen:
+               /* Relinquish every xen page of memory. */
+               ret = relinquish_memory(d, &d->xenpage_list);
+               if (ret != 0)
+                       return ret;
+               d->arch.relres = RELRES_dom;
+               /* fallthrough */
+
+       case RELRES_dom:
+               /* Relinquish every domain page of memory. */
+               ret = relinquish_memory(d, &d->page_list);
+               if (ret != 0)
+                       return ret;
+               d->arch.relres = RELRES_done;
+               /* fallthrough */    
+
+       case RELRES_done:
+               break;
 
-    /* Relinquish every page of memory. */
-    relinquish_memory(d, &d->xenpage_list);
-    relinquish_memory(d, &d->page_list);
+       default:
+               BUG();
+       }
 
-    if (d->arch.is_vti && d->arch.sal_data)
-           xfree(d->arch.sal_data);
+       if (d->arch.is_vti && d->arch.sal_data)
+               xfree(d->arch.sal_data);
 
-    /* Free page used by xen oprofile buffer */
-    free_xenoprof_pages(d);
+       /* Free page used by xen oprofile buffer */
+       free_xenoprof_pages(d);
 
-    return 0;
+       return 0;
 }
 
 unsigned long
index 68e5256f048af72fd61b80d3d3b29b554e57bb48..e259402dda3835163d69616e70959451b6b933ac 100644 (file)
@@ -192,7 +192,17 @@ struct arch_domain {
 #endif
 
     /* for domctl_destroy_domain continuation */
+    enum {
+        RELRES_not_started,
+        RELRES_mm_teardown,
+        RELRES_xen,
+        RELRES_dom,
+        RELRES_done,
+    } relres;
+    /* Continuable mm_teardown() */
     unsigned long mm_teardown_offset;
+    /* Continuable domain_relinquish_resources() */
+    struct list_head relmem_list;
 };
 #define INT_ENABLE_OFFSET(v)             \
     (sizeof(vcpu_info_t) * (v)->vcpu_id + \