]> xenbits.xensource.com Git - people/hx242/xen.git/commitdiff
x86/setup: destroy mappings in the direct map when directmap=no
authorHongyan Xia <hongyxia@amazon.com>
Wed, 4 Mar 2020 10:20:38 +0000 (10:20 +0000)
committerHongyan Xia <hongyxia@amazon.com>
Fri, 14 Aug 2020 21:06:32 +0000 (22:06 +0100)
Detailed comments were added explaining why we need to create then
destroy instead of not creating them in the first place.

Signed-off-by: Hongyan Xia <hongyxia@amazon.com>
xen/arch/x86/setup.c

index 06e51d76d343cb127d9ee6f59dd491a7cef68e23..67bca188a7c2ad9a411b1a28411672af57a1a490 100644 (file)
@@ -1499,8 +1499,27 @@ void __init noreturn __start_xen(unsigned long mbi_p)
         map_e = min_t(uint64_t, e,
                       ARRAY_SIZE(l2_directmap) << L2_PAGETABLE_SHIFT);
 
+        /*
+         * When directmap=no, we destroy the 1:1 mappings as we go through the
+         * e820. Of course, can't we just not create them in the first place
+         * instead of creating and then destroying? The problem is that even
+         * though we do not want any mappings in the direct map region, we want
+         * to populate the L3 tables for that region so that any mappings
+         * created in the direct map for the idle domain can then be seen by all
+         * domains since they share the L4 entries of the direct map, hence L3
+         * tables. This is needed for the on-demand xenheap, which is mapped and
+         * unmapped in the direct map and must be seen by everyone.
+         *
+         * destroy_xen_mappings() will not free L3 tables and will not clear the
+         * L4 entry, thus achieving exactly what we want.
+         */
+
         /* Pass mapped memory to allocator /before/ creating new mappings. */
         init_boot_pages(s, min(map_s, e));
+        if ( !arch_has_directmap() && s < min(map_s, e) )
+            destroy_xen_mappings((unsigned long)__va(s),
+                                 (unsigned long)__va(min(map_s, e)));
+
         s = map_s;
         if ( s < map_e )
         {
@@ -1509,6 +1528,9 @@ void __init noreturn __start_xen(unsigned long mbi_p)
             map_s = (s + mask) & ~mask;
             map_e &= ~mask;
             init_boot_pages(map_s, map_e);
+            if ( !arch_has_directmap() && map_s < map_e )
+                destroy_xen_mappings((unsigned long)__va(map_s),
+                                     (unsigned long)__va(map_e));
         }
 
         if ( map_s > map_e )
@@ -1525,6 +1547,9 @@ void __init noreturn __start_xen(unsigned long mbi_p)
                 map_pages_to_xen((unsigned long)__va(map_e), maddr_to_mfn(map_e),
                                  PFN_DOWN(end - map_e), PAGE_HYPERVISOR);
                 init_boot_pages(map_e, end);
+                if ( !arch_has_directmap() )
+                    destroy_xen_mappings((unsigned long)__va(map_e),
+                                         (unsigned long)__va(end));
                 map_e = end;
             }
         }
@@ -1534,12 +1559,18 @@ void __init noreturn __start_xen(unsigned long mbi_p)
              * must also not be mapped with _PAGE_GLOBAL. */
             map_pages_to_xen((unsigned long)__va(map_e), maddr_to_mfn(map_e),
                              PFN_DOWN(e - map_e), __PAGE_HYPERVISOR_RW);
+            if ( !arch_has_directmap() )
+                destroy_xen_mappings((unsigned long)__va(map_e),
+                                     (unsigned long)__va(e));
         }
         if ( s < map_s )
         {
             map_pages_to_xen((unsigned long)__va(s), maddr_to_mfn(s),
                              PFN_DOWN(map_s - s), PAGE_HYPERVISOR);
             init_boot_pages(s, map_s);
+            if ( !arch_has_directmap() )
+                destroy_xen_mappings((unsigned long)__va(s),
+                                     (unsigned long)__va(map_s));
         }
     }