]> xenbits.xensource.com Git - people/royger/linux-2.6.18-xen.git/commitdiff
xen/x86: use dynamically adjusted upper bound for contiguous regions
authorJan Beulich <jbeulich@novell.com>
Mon, 25 Jul 2011 12:49:25 +0000 (13:49 +0100)
committerJan Beulich <jbeulich@novell.com>
Mon, 25 Jul 2011 12:49:25 +0000 (13:49 +0100)
After years of not causing problems, the 2MB (order 9) limit on
contiguous regions' size was recently found to prevent booting of
certain systems - at least the FC variant of the MPT driver can
(possibly only for out-of-tree and/or post-2.6.18 versions) require
allocation of a buffer quite a bit larger than 2Mb.

Rather than increasing the limit on the order to 10, make the whole
logic dynamic - start out with a static order and respective argument
buffers large enough to allow initialization up to the point where
core_initcall()-s get processed, and from then on dynamically allocate
(and re-size) argument buffers on demand.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
arch/i386/mm/hypervisor.c

index 295021fb597a406b5865b1c72d3e6b16c602d22e..dfa16e1f80ff9eb17b4e9119a6ec2bb807fb59c7 100644 (file)
@@ -190,17 +190,71 @@ void xen_set_ldt(const void *ptr, unsigned int ents)
 }
 
 /* Protected by balloon_lock. */
-#define MAX_CONTIG_ORDER 9 /* 2MB */
-static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
-static unsigned long limited_frames[1<<MAX_CONTIG_ORDER];
-static multicall_entry_t cr_mcl[1<<MAX_CONTIG_ORDER];
+#define INIT_CONTIG_ORDER 6 /* 256kB */
+static unsigned int __read_mostly max_contig_order = INIT_CONTIG_ORDER;
+static unsigned long __initdata init_df[1U << INIT_CONTIG_ORDER];
+static unsigned long *__read_mostly discontig_frames = init_df;
+static multicall_entry_t __initdata init_mc[1U << INIT_CONTIG_ORDER];
+static multicall_entry_t *__read_mostly cr_mcl = init_mc;
+
+static int __init init_contig_order(void)
+{
+       discontig_frames = vmalloc((sizeof(*discontig_frames)
+                                   + sizeof(*cr_mcl)) << INIT_CONTIG_ORDER);
+       BUG_ON(!discontig_frames);
+
+       cr_mcl = (void *)(discontig_frames + (1U << INIT_CONTIG_ORDER));
+
+       return 0;
+}
+core_initcall(init_contig_order);
+
+static int check_contig_order(unsigned int order)
+{
+#ifdef CONFIG_64BIT
+       if (unlikely(order >= 32))
+#else
+       if (unlikely(order > BITS_PER_LONG - fls(sizeof(*cr_mcl))))
+#endif
+               return -ENOMEM;
+
+       if (unlikely(order > max_contig_order))
+       {
+               unsigned long *df = __vmalloc((sizeof(*discontig_frames)
+                                              + sizeof(*cr_mcl)) << order,
+                                             GFP_ATOMIC, PAGE_KERNEL);
+               unsigned long flags;
+
+               if (!df) {
+                       vfree(df);
+                       return -ENOMEM;
+               }
+               balloon_lock(flags);
+               if (order > max_contig_order) {
+                       void *temp = discontig_frames;
+
+                       discontig_frames = df;
+                       cr_mcl = (void *)(df + (1U << order));
+                       df = temp;
+
+                       wmb();
+                       max_contig_order = order;
+               }
+               balloon_unlock(flags);
+               vfree(df);
+               printk(KERN_INFO
+                      "Adjusted maximum contiguous region order to %u\n",
+                      order);
+       }
+
+       return 0;
+}
 
 /* Ensure multi-page extents are contiguous in machine memory. */
 int xen_create_contiguous_region(
        unsigned long vstart, unsigned int order, unsigned int address_bits)
 {
-       unsigned long *in_frames = discontig_frames, out_frame;
-       unsigned long  frame, flags;
+       unsigned long *in_frames, out_frame, frame, flags;
        unsigned int   i;
        int            rc, success;
        struct xen_memory_exchange exchange = {
@@ -225,16 +279,19 @@ int xen_create_contiguous_region(
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return 0;
 
-       if (unlikely(order > MAX_CONTIG_ORDER))
-               return -ENOMEM;
+       rc = check_contig_order(order);
+       if (unlikely(rc))
+               return rc;
 
-       set_xen_guest_handle(exchange.in.extent_start, in_frames);
        set_xen_guest_handle(exchange.out.extent_start, &out_frame);
 
        scrub_pages((void *)vstart, 1 << order);
 
        balloon_lock(flags);
 
+       in_frames = discontig_frames;
+       set_xen_guest_handle(exchange.in.extent_start, in_frames);
+
        /* 1. Zap current PTEs, remembering MFNs. */
        for (i = 0; i < (1U<<order); i++) {
                in_frames[i] = pfn_to_mfn((__pa(vstart) >> PAGE_SHIFT) + i);
@@ -293,8 +350,7 @@ EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
 
 void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
 {
-       unsigned long *out_frames = discontig_frames, in_frame;
-       unsigned long  frame, flags;
+       unsigned long *out_frames, in_frame, frame, flags;
        unsigned int   i;
        int            rc, success;
        struct xen_memory_exchange exchange = {
@@ -313,16 +369,18 @@ void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return;
 
-       if (unlikely(order > MAX_CONTIG_ORDER))
+       if (unlikely(order > max_contig_order))
                return;
 
        set_xen_guest_handle(exchange.in.extent_start, &in_frame);
-       set_xen_guest_handle(exchange.out.extent_start, out_frames);
 
        scrub_pages((void *)vstart, 1 << order);
 
        balloon_lock(flags);
 
+       out_frames = discontig_frames;
+       set_xen_guest_handle(exchange.out.extent_start, out_frames);
+
        /* 1. Find start MFN of contiguous extent. */
        in_frame = pfn_to_mfn(__pa(vstart) >> PAGE_SHIFT);
 
@@ -376,7 +434,7 @@ EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
 static void undo_limit_pages(struct page *pages, unsigned int order)
 {
        BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
-       BUG_ON(order > MAX_CONTIG_ORDER);
+       BUG_ON(order > max_contig_order);
        xen_limit_pages_to_max_mfn(pages, order, 0);
        ClearPageForeign(pages);
        __free_pages(pages, order);
@@ -385,12 +443,11 @@ static void undo_limit_pages(struct page *pages, unsigned int order)
 int xen_limit_pages_to_max_mfn(
        struct page *pages, unsigned int order, unsigned int address_bits)
 {
-       unsigned long flags, frame;
-       unsigned long *in_frames = discontig_frames, *out_frames = limited_frames;
+       unsigned long flags, frame, *limit_map, _limit_map;
+       unsigned long *in_frames, *out_frames;
        struct page *page;
        unsigned int i, n, nr_mcl;
        int rc, success;
-       DECLARE_BITMAP(limit_map, 1 << MAX_CONTIG_ORDER);
 
        struct xen_memory_exchange exchange = {
                .in = {
@@ -407,23 +464,30 @@ int xen_limit_pages_to_max_mfn(
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return 0;
 
-       if (unlikely(order > MAX_CONTIG_ORDER))
-               return -ENOMEM;
+       if (address_bits && address_bits < PAGE_SHIFT)
+               return -EINVAL;
 
-       if (address_bits) {
-               if (address_bits < PAGE_SHIFT)
-                       return -EINVAL;
+       rc = check_contig_order(order + 1);
+       if (unlikely(rc))
+               return rc;
+
+       if (BITS_PER_LONG >> order) {
+               limit_map = kmalloc(BITS_TO_LONGS(1U << order)
+                                   * sizeof(*limit_map), GFP_ATOMIC);
+               if (unlikely(!limit_map))
+                       return -ENOMEM;
+       } else
+               limit_map = &_limit_map;
+
+       if (address_bits)
                bitmap_zero(limit_map, 1U << order);
-       else if (order) {
+       else if (order) {
                BUILD_BUG_ON(sizeof(pages->index) != sizeof(*limit_map));
                for (i = 0; i < BITS_TO_LONGS(1U << order); ++i)
                        limit_map[i] = pages[i + 1].index;
        } else
                __set_bit(0, limit_map);
 
-       set_xen_guest_handle(exchange.in.extent_start, in_frames);
-       set_xen_guest_handle(exchange.out.extent_start, out_frames);
-
        /* 0. Scrub the pages. */
        for (i = 0, n = 0; i < 1U<<order ; i++) {
                page = &pages[i];
@@ -443,14 +507,22 @@ int xen_limit_pages_to_max_mfn(
                }
 #endif
        }
-       if (bitmap_empty(limit_map, 1U << order))
+       if (bitmap_empty(limit_map, 1U << order)) {
+               if (limit_map != &_limit_map)
+                       kfree(limit_map);
                return 0;
+       }
 
        if (n)
                kmap_flush_unused();
 
        balloon_lock(flags);
 
+       in_frames = discontig_frames;
+       set_xen_guest_handle(exchange.in.extent_start, in_frames);
+       out_frames = in_frames + (1U << order);
+       set_xen_guest_handle(exchange.out.extent_start, out_frames);
+
        /* 1. Zap current PTEs (if any), remembering MFNs. */
        for (i = 0, n = 0, nr_mcl = 0; i < (1U<<order); i++) {
                if(!test_bit(i, limit_map))
@@ -517,10 +589,7 @@ int xen_limit_pages_to_max_mfn(
 
        balloon_unlock(flags);
 
-       if (!success)
-               return -ENOMEM;
-
-       if (address_bits) {
+       if (success && address_bits) {
                if (order) {
                        BUILD_BUG_ON(sizeof(*limit_map) != sizeof(pages->index));
                        for (i = 0; i < BITS_TO_LONGS(1U << order); ++i)
@@ -529,7 +598,10 @@ int xen_limit_pages_to_max_mfn(
                SetPageForeign(pages, undo_limit_pages);
        }
 
-       return 0;
+       if (limit_map != &_limit_map)
+               kfree(limit_map);
+
+       return success ? 0 : -ENOMEM;
 }
 EXPORT_SYMBOL_GPL(xen_limit_pages_to_max_mfn);