]> xenbits.xensource.com Git - people/liuw/libxenctrl-split/xen.git/commitdiff
x86/PV: fix unintended dependency of m2p-strict mode on migration-v2
authorJan Beulich <jbeulich@suse.com>
Wed, 3 Feb 2016 13:12:00 +0000 (14:12 +0100)
committerJan Beulich <jbeulich@suse.com>
Wed, 3 Feb 2016 13:12:00 +0000 (14:12 +0100)
This went unnoticed until a backport of this to an older Xen got used,
causing migration of guests enabling this VM assist to fail, because
page table pinning there precedes vCPU context loading, and hence L4
tables get initialized for the wrong mode. Fix this by post-processing
L4 tables when setting the intended VM assist flags for the guest.

Note that this leaves in place a dependency on vCPU 0 getting its guest
context restored first, but afaict the logic here is not the only thing
depending on that.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
xen/arch/x86/domain.c
xen/arch/x86/mm.c
xen/include/asm-x86/domain.h
xen/include/asm-x86/mm.h

index b513028640fba8f281776c700a90d80ca19f87a9..9d43f7b1be385a823222f9d63af596c900d9346e 100644 (file)
@@ -1075,7 +1075,48 @@ int arch_set_info_guest(
         goto out;
 
     if ( v->vcpu_id == 0 )
+    {
+        /*
+         * In the restore case we need to deal with L4 pages which got
+         * initialized with m2p_strict still clear (and which hence lack the
+         * correct initial RO_MPT_VIRT_{START,END} L4 entry).
+         */
+        if ( d != current->domain && !VM_ASSIST(d, m2p_strict) &&
+             is_pv_domain(d) && !is_pv_32bit_domain(d) &&
+             test_bit(VMASST_TYPE_m2p_strict, &c.nat->vm_assist) &&
+             atomic_read(&d->arch.pv_domain.nr_l4_pages) )
+        {
+            bool_t done = 0;
+
+            spin_lock_recursive(&d->page_alloc_lock);
+
+            for ( i = 0; ; )
+            {
+                struct page_info *page = page_list_remove_head(&d->page_list);
+
+                if ( page_lock(page) )
+                {
+                    if ( (page->u.inuse.type_info & PGT_type_mask) ==
+                         PGT_l4_page_table )
+                        done = !fill_ro_mpt(page_to_mfn(page));
+
+                    page_unlock(page);
+                }
+
+                page_list_add_tail(page, &d->page_list);
+
+                if ( done || (!(++i & 0xff) && hypercall_preempt_check()) )
+                    break;
+            }
+
+            spin_unlock_recursive(&d->page_alloc_lock);
+
+            if ( !done )
+                return -ERESTART;
+        }
+
         d->vm_assist = c(vm_assist);
+    }
 
     rc = put_old_guest_table(current);
     if ( rc )
index 1ee431e60f510bce74ec69e73360658c75fc3999..6fcd2c269e72ae1638bfb6ce7a241a895e91e4ee 100644 (file)
@@ -1463,13 +1463,20 @@ void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
         l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
 }
 
-void fill_ro_mpt(unsigned long mfn)
+bool_t fill_ro_mpt(unsigned long mfn)
 {
     l4_pgentry_t *l4tab = map_domain_page(_mfn(mfn));
+    bool_t ret = 0;
 
-    l4tab[l4_table_offset(RO_MPT_VIRT_START)] =
-        idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)];
+    if ( !l4e_get_intpte(l4tab[l4_table_offset(RO_MPT_VIRT_START)]) )
+    {
+        l4tab[l4_table_offset(RO_MPT_VIRT_START)] =
+            idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)];
+        ret = 1;
+    }
     unmap_domain_page(l4tab);
+
+    return ret;
 }
 
 void zap_ro_mpt(unsigned long mfn)
@@ -1527,10 +1534,15 @@ static int alloc_l4_table(struct page_info *page)
         adjust_guest_l4e(pl4e[i], d);
     }
 
-    init_guest_l4_table(pl4e, d, !VM_ASSIST(d, m2p_strict));
+    if ( rc >= 0 )
+    {
+        init_guest_l4_table(pl4e, d, !VM_ASSIST(d, m2p_strict));
+        atomic_inc(&d->arch.pv_domain.nr_l4_pages);
+        rc = 0;
+    }
     unmap_domain_page(pl4e);
 
-    return rc > 0 ? 0 : rc;
+    return rc;
 }
 
 static void free_l1_table(struct page_info *page)
@@ -1648,7 +1660,13 @@ static int free_l4_table(struct page_info *page)
 
     unmap_domain_page(pl4e);
 
-    return rc > 0 ? 0 : rc;
+    if ( rc >= 0 )
+    {
+        atomic_dec(&d->arch.pv_domain.nr_l4_pages);
+        rc = 0;
+    }
+
+    return rc;
 }
 
 int page_lock(struct page_info *page)
index 405adef24394508dd99c3c679374352b46afed3d..4072e273ae2d56142d1bf2759434f3506f93c109 100644 (file)
@@ -248,6 +248,8 @@ struct pv_domain
 {
     l1_pgentry_t **gdt_ldt_l1tab;
 
+    atomic_t nr_l4_pages;
+
     /* map_domain_page() mapping cache. */
     struct mapcache_domain mapcache;
 };
index 4560debcf9fa50545f07076d645da973575491f9..a09738265768ff36d848abd4934f5633aafed7e6 100644 (file)
@@ -323,7 +323,7 @@ int free_page_type(struct page_info *page, unsigned long type,
 
 void init_guest_l4_table(l4_pgentry_t[], const struct domain *,
                          bool_t zap_ro_mpt);
-void fill_ro_mpt(unsigned long mfn);
+bool_t fill_ro_mpt(unsigned long mfn);
 void zap_ro_mpt(unsigned long mfn);
 
 int is_iomem_page(unsigned long mfn);