]> xenbits.xensource.com Git - people/hx242/xen.git/commitdiff
x86/pv: refactor how building dom0 in PV handles domheap mappings
authorHongyan Xia <hongyax@amazon.com>
Fri, 13 Sep 2019 14:23:52 +0000 (15:23 +0100)
committerHongyan Xia <hongyax@amazon.com>
Wed, 16 Oct 2019 12:25:06 +0000 (13:25 +0100)
Building a PV dom0 is allocating from the domheap but uses it like the
xenheap. This is clearly wrong. Fix.

Signed-off-by: Hongyan Xia <hongyax@amazon.com>
xen/arch/x86/pv/dom0_build.c

index 4617f31278baf35a8e3563e74e91b8673f77d107..c8e59c121e40f3415ff68e6a669c2a1c6e29ad04 100644 (file)
@@ -317,6 +317,7 @@ int __init dom0_construct_pv(struct domain *d,
     l3_pgentry_t *l3tab = NULL, *l3start = NULL;
     l2_pgentry_t *l2tab = NULL, *l2start = NULL;
     l1_pgentry_t *l1tab = NULL, *l1start = NULL;
+    paddr_t l3start_maddr = 0, l2start_maddr = 0, l1start_maddr = 0;
 
     /*
      * This fully describes the memory layout of the initial domain. All
@@ -623,19 +624,23 @@ int __init dom0_construct_pv(struct domain *d,
     if ( !is_pv_32bit_domain(d) )
     {
         maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
-        l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
+        l4start = l4tab = map_xen_pagetable(maddr_to_mfn(mpt_alloc));
         clear_page(l4tab);
-        init_xen_l4_slots(l4tab, _mfn(virt_to_mfn(l4start)),
+        init_xen_l4_slots(l4tab, maddr_to_mfn(mpt_alloc),
                           d, INVALID_MFN, true);
-        v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
+        v->arch.guest_table = pagetable_from_paddr(mpt_alloc);
+        mpt_alloc += PAGE_SIZE;
     }
     else
     {
         /* Monitor table already created by switch_compat(). */
-        l4start = l4tab = __va(pagetable_get_paddr(v->arch.guest_table));
+        l4start = l4tab =
+                map_xen_pagetable(pagetable_get_mfn(v->arch.guest_table));
         /* See public/xen.h on why the following is needed. */
         maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l3_page_table;
-        l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
+        l3start = map_xen_pagetable(maddr_to_mfn(mpt_alloc));
+        l3start_maddr = mpt_alloc;
+        mpt_alloc += PAGE_SIZE;
     }
 
     l4tab += l4_table_offset(v_start);
@@ -645,14 +650,20 @@ int __init dom0_construct_pv(struct domain *d,
         if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
         {
             maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l1_page_table;
-            l1start = l1tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
+            UNMAP_XEN_PAGETABLE(l1start);
+            l1start = l1tab = map_xen_pagetable(maddr_to_mfn(mpt_alloc));
+            l1start_maddr = mpt_alloc;
+            mpt_alloc += PAGE_SIZE;
             clear_page(l1tab);
             if ( count == 0 )
                 l1tab += l1_table_offset(v_start);
             if ( !((unsigned long)l2tab & (PAGE_SIZE-1)) )
             {
                 maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l2_page_table;
-                l2start = l2tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
+                UNMAP_XEN_PAGETABLE(l2start);
+                l2start = l2tab = map_xen_pagetable(maddr_to_mfn(mpt_alloc));
+                l2start_maddr = mpt_alloc;
+                mpt_alloc += PAGE_SIZE;
                 clear_page(l2tab);
                 if ( count == 0 )
                     l2tab += l2_table_offset(v_start);
@@ -662,19 +673,22 @@ int __init dom0_construct_pv(struct domain *d,
                     {
                         maddr_to_page(mpt_alloc)->u.inuse.type_info =
                             PGT_l3_page_table;
-                        l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
+                        UNMAP_XEN_PAGETABLE(l3start);
+                        l3start = map_xen_pagetable(maddr_to_mfn(mpt_alloc));
+                        l3start_maddr = mpt_alloc;
+                        mpt_alloc += PAGE_SIZE;
                     }
                     l3tab = l3start;
                     clear_page(l3tab);
                     if ( count == 0 )
                         l3tab += l3_table_offset(v_start);
-                    *l4tab = l4e_from_paddr(__pa(l3start), L4_PROT);
+                    *l4tab = l4e_from_paddr(l3start_maddr, L4_PROT);
                     l4tab++;
                 }
-                *l3tab = l3e_from_paddr(__pa(l2start), L3_PROT);
+                *l3tab = l3e_from_paddr(l2start_maddr, L3_PROT);
                 l3tab++;
             }
-            *l2tab = l2e_from_paddr(__pa(l1start), L2_PROT);
+            *l2tab = l2e_from_paddr(l1start_maddr, L2_PROT);
             l2tab++;
         }
         if ( count < initrd_pfn || count >= initrd_pfn + PFN_UP(initrd_len) )
@@ -701,9 +715,12 @@ int __init dom0_construct_pv(struct domain *d,
             if ( !l3e_get_intpte(*l3tab) )
             {
                 maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l2_page_table;
-                l2tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
-                clear_page(l2tab);
-                *l3tab = l3e_from_paddr(__pa(l2tab), L3_PROT);
+                UNMAP_XEN_PAGETABLE(l2start);
+                l2start = map_xen_pagetable(maddr_to_mfn(mpt_alloc));
+                l2start_maddr = mpt_alloc;
+                mpt_alloc += PAGE_SIZE;
+                clear_page(l2start);
+                *l3tab = l3e_from_paddr(l2start_maddr, L3_PROT);
             }
             if ( i == 3 )
                 l3e_get_page(*l3tab)->u.inuse.type_info |= PGT_pae_xen_l2;
@@ -714,6 +731,10 @@ int __init dom0_construct_pv(struct domain *d,
         UNMAP_XEN_PAGETABLE(l2t);
     }
 
+    UNMAP_XEN_PAGETABLE(l1start);
+    UNMAP_XEN_PAGETABLE(l2start);
+    UNMAP_XEN_PAGETABLE(l3start);
+
     /* Pages that are part of page tables must be read only. */
     mark_pv_pt_pages_rdonly(d, l4start, vpt_start, nr_pt_pages);
 
@@ -889,6 +910,9 @@ int __init dom0_construct_pv(struct domain *d,
     mapcache_override_current(NULL);
     switch_cr3_cr4(current->arch.cr3, read_cr4());
 
+    /* Careful. Be sure to unmap this after returning to idle's mapcache. */
+    UNMAP_XEN_PAGETABLE(l4start);
+
     update_domain_wallclock_time(d);
 
     /*