]> xenbits.xensource.com Git - people/royger/xen.git/commitdiff
x86/dom0: take alignment into account when populating p2m in PVH mode pvh-dom0-fixes-v1 gitlab/pvh-dom0-fixes-v1
authorRoger Pau Monne <roger.pau@citrix.com>
Thu, 27 Dec 2018 14:48:42 +0000 (15:48 +0100)
committerRoger Pau Monne <roger.pau@citrix.com>
Thu, 27 Dec 2018 14:48:42 +0000 (15:48 +0100)
Current code that allocates memory and populates the p2m for PVH Dom0
doesn't take the address alignment into account, this can lead to high
order allocations that start on a non-aligned address to be broken
down into lower order entries on the p2m page tables.

Fix this by taking into account the p2m page sizes and alignment
requirements when allocating the memory and populating the p2m.

Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
xen/arch/x86/hvm/dom0_build.c

index 7ea29c443a29966432e01b49a5e5aa31b0dd4d15..2d85e808b62529bdc56309ee85f22e54089e05a1 100644 (file)
@@ -91,32 +91,61 @@ static int __init pvh_populate_memory_range(struct domain *d,
                                             unsigned long start,
                                             unsigned long nr_pages)
 {
-    unsigned int order = MAX_ORDER, i = 0;
+    struct {
+        unsigned long align;
+        unsigned int order;
+    } static const __initconst orders[] = {
+        /* NB: must be sorted by decreasing size. */
+        { .align = PFN_DOWN(GB(1)), .order = PAGE_ORDER_1G },
+        { .align = PFN_DOWN(MB(2)), .order = PAGE_ORDER_2M },
+        { .align = PFN_DOWN(KB(4)), .order = PAGE_ORDER_4K },
+    };
+    unsigned int max_order = orders[0].order, i = 0;
     struct page_info *page;
     int rc;
 #define MAP_MAX_ITER 64
 
     while ( nr_pages != 0 )
     {
-        unsigned int range_order = get_order_from_pages(nr_pages + 1);
+        unsigned int order, j;
 
-        order = min(range_order ? range_order - 1 : 0, order);
+        for ( j = 0; j < ARRAY_SIZE(orders); j++ )
+            if ( IS_ALIGNED(start, orders[j].align) &&
+                 nr_pages >= (1UL << orders[j].order) )
+            {
+                order = orders[j].order;
+                break;
+            }
+
+        if ( j == ARRAY_SIZE(orders) )
+        {
+           printk("Unable to find allocation order for [%#lx,%#lx)\n",
+                  start, start + nr_pages);
+           return -EINVAL;
+        }
+
+        order = min(order, max_order);
         page = alloc_domheap_pages(d, order, dom0_memflags | MEMF_no_scrub);
         if ( page == NULL )
         {
-            if ( order == 0 && dom0_memflags )
-            {
-                /* Try again without any dom0_memflags. */
-                dom0_memflags = 0;
-                order = MAX_ORDER;
-                continue;
-            }
-            if ( order == 0 )
+            if ( order == orders[ARRAY_SIZE(orders) - 1].order )
             {
+                if ( dom0_memflags )
+                {
+                    /* Try again without any dom0_memflags. */
+                    max_order = orders[0].order;
+                    dom0_memflags = 0;
+                    continue;
+                }
                 printk("Unable to allocate memory with order 0!\n");
                 return -ENOMEM;
             }
-            order--;
+            for ( j = 0; j < ARRAY_SIZE(orders) - 1; j++ )
+                if ( order == orders[j].order )
+                {
+                    max_order = orders[j + 1].order;
+                    break;
+                }
             continue;
         }