ia64/xen-unstable

changeset 14103:ee4850bc895b

xen memory alloctor: remove bit width restrictions

Hide the (default or user specified) DMA width from anything outside
the heap allocator. I/O-capable guests can now request any width for
the memory they want exchanged/added.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author kfraser@localhost.localdomain
date Fri Feb 23 17:02:58 2007 +0000 (2007-02-23)
parents 70098102f84d
children 5ba3037ea5e1
files xen/arch/x86/domain_build.c xen/common/memory.c xen/common/page_alloc.c xen/include/asm-ia64/config.h xen/include/asm-x86/config.h xen/include/xen/mm.h
line diff
     1.1 --- a/xen/arch/x86/domain_build.c	Fri Feb 23 17:01:38 2007 +0000
     1.2 +++ b/xen/arch/x86/domain_build.c	Fri Feb 23 17:02:58 2007 +0000
     1.3 @@ -429,11 +429,14 @@ int construct_dom0(struct domain *d,
     1.4      if ( (1UL << order) > nr_pages )
     1.5          panic("Domain 0 allocation is too small for kernel image.\n");
     1.6  
     1.7 -    /*
     1.8 -     * Allocate from DMA pool: on i386 this ensures that our low-memory 1:1
     1.9 -     * mapping covers the allocation.
    1.10 -     */
    1.11 -    if ( (page = alloc_domheap_pages(d, order, MEMF_dma)) == NULL )
    1.12 +#ifdef __i386__
    1.13 +    /* Ensure that our low-memory 1:1 mapping covers the allocation. */
    1.14 +    page = alloc_domheap_pages(d, order,
    1.15 +                               MEMF_bits(30 + (v_start >> 31)));
    1.16 +#else
    1.17 +    page = alloc_domheap_pages(d, order, 0);
    1.18 +#endif
    1.19 +    if ( page == NULL )
    1.20          panic("Not enough RAM for domain 0 allocation.\n");
    1.21      alloc_spfn = page_to_mfn(page);
    1.22      alloc_epfn = alloc_spfn + d->tot_pages;
     2.1 --- a/xen/common/memory.c	Fri Feb 23 17:01:38 2007 +0000
     2.2 +++ b/xen/common/memory.c	Fri Feb 23 17:02:58 2007 +0000
     2.3 @@ -324,12 +324,12 @@ static long memory_exchange(XEN_GUEST_HA
     2.4           (exch.out.address_bits <
     2.5            (get_order_from_pages(max_page) + PAGE_SHIFT)) )
     2.6      {
     2.7 -        if ( exch.out.address_bits < dma_bitsize )
     2.8 +        if ( exch.out.address_bits <= PAGE_SHIFT )
     2.9          {
    2.10              rc = -ENOMEM;
    2.11              goto fail_early;
    2.12          }
    2.13 -        memflags = MEMF_dma;
    2.14 +        memflags = MEMF_bits(exch.out.address_bits);
    2.15      }
    2.16  
    2.17      if ( exch.in.extent_order <= exch.out.extent_order )
    2.18 @@ -537,9 +537,9 @@ long do_memory_op(unsigned long cmd, XEN
    2.19               (reservation.address_bits <
    2.20                (get_order_from_pages(max_page) + PAGE_SHIFT)) )
    2.21          {
    2.22 -            if ( reservation.address_bits < dma_bitsize )
    2.23 +            if ( reservation.address_bits <= PAGE_SHIFT )
    2.24                  return start_extent;
    2.25 -            args.memflags = MEMF_dma;
    2.26 +            args.memflags = MEMF_bits(reservation.address_bits);
    2.27          }
    2.28  
    2.29          if ( likely(reservation.domid == DOMID_SELF) )
     3.1 --- a/xen/common/page_alloc.c	Fri Feb 23 17:01:38 2007 +0000
     3.2 +++ b/xen/common/page_alloc.c	Fri Feb 23 17:02:58 2007 +0000
     3.3 @@ -48,8 +48,8 @@ string_param("badpage", opt_badpage);
     3.4  /*
     3.5   * Bit width of the DMA heap.
     3.6   */
     3.7 -unsigned int  dma_bitsize = CONFIG_DMA_BITSIZE;
     3.8 -unsigned long max_dma_mfn = (1UL << (CONFIG_DMA_BITSIZE - PAGE_SHIFT)) - 1;
     3.9 +static unsigned int  dma_bitsize = CONFIG_DMA_BITSIZE;
    3.10 +static unsigned long max_dma_mfn = (1UL << (CONFIG_DMA_BITSIZE - PAGE_SHIFT)) - 1;
    3.11  static void parse_dma_bits(char *s)
    3.12  {
    3.13      unsigned int v = simple_strtol(s, NULL, 0);
    3.14 @@ -58,7 +58,7 @@ static void parse_dma_bits(char *s)
    3.15          dma_bitsize = BITS_PER_LONG + PAGE_SHIFT;
    3.16          max_dma_mfn = ~0UL;
    3.17      }
    3.18 -    else if ( v > PAGE_SHIFT )
    3.19 +    else if ( v > PAGE_SHIFT + 1 )
    3.20      {
    3.21          dma_bitsize = v;
    3.22          max_dma_mfn = (1UL << (dma_bitsize - PAGE_SHIFT)) - 1;
    3.23 @@ -741,12 +741,22 @@ struct page_info *__alloc_domheap_pages(
    3.24      struct page_info *pg = NULL;
    3.25      cpumask_t mask;
    3.26      unsigned long i;
    3.27 +    unsigned int bits = memflags >> _MEMF_bits, zone_hi;
    3.28  
    3.29      ASSERT(!in_irq());
    3.30  
    3.31 -    if ( !(memflags & MEMF_dma) )
    3.32 +    if ( bits && bits <= PAGE_SHIFT + 1 )
    3.33 +        return NULL;
    3.34 +
    3.35 +    zone_hi = bits - PAGE_SHIFT - 1;
    3.36 +    if ( zone_hi >= NR_ZONES )
    3.37 +        zone_hi = NR_ZONES - 1;
    3.38 +
    3.39 +    if ( NR_ZONES + PAGE_SHIFT > dma_bitsize &&
    3.40 +         (!bits || bits > dma_bitsize) )
    3.41      {
    3.42 -        pg = alloc_heap_pages(dma_bitsize - PAGE_SHIFT, NR_ZONES - 1, cpu, order);
    3.43 +        pg = alloc_heap_pages(dma_bitsize - PAGE_SHIFT, zone_hi, cpu, order);
    3.44 +
    3.45          /* Failure? Then check if we can fall back to the DMA pool. */
    3.46          if ( unlikely(pg == NULL) &&
    3.47               ((order > MAX_ORDER) ||
    3.48 @@ -759,7 +769,7 @@ struct page_info *__alloc_domheap_pages(
    3.49  
    3.50      if ( pg == NULL )
    3.51          if ( (pg = alloc_heap_pages(MEMZONE_XEN + 1,
    3.52 -                                    dma_bitsize - PAGE_SHIFT - 1,
    3.53 +                                    zone_hi,
    3.54                                      cpu, order)) == NULL )
    3.55              return NULL;
    3.56  
     4.1 --- a/xen/include/asm-ia64/config.h	Fri Feb 23 17:01:38 2007 +0000
     4.2 +++ b/xen/include/asm-ia64/config.h	Fri Feb 23 17:02:58 2007 +0000
     4.3 @@ -42,7 +42,7 @@
     4.4  #define CONFIG_IOSAPIC
     4.5  #define supervisor_mode_kernel (0)
     4.6  
     4.7 -#define CONFIG_DMA_BITSIZE 30
     4.8 +#define CONFIG_DMA_BITSIZE 32
     4.9  
    4.10  /* If PERFC is used, include privop maps.  */
    4.11  #ifdef PERF_COUNTERS
     5.1 --- a/xen/include/asm-x86/config.h	Fri Feb 23 17:01:38 2007 +0000
     5.2 +++ b/xen/include/asm-x86/config.h	Fri Feb 23 17:02:58 2007 +0000
     5.3 @@ -82,7 +82,7 @@
     5.4  /* Debug stack is restricted to 8kB by guard pages. */
     5.5  #define DEBUG_STACK_SIZE 8192
     5.6  
     5.7 -#define CONFIG_DMA_BITSIZE 30
     5.8 +#define CONFIG_DMA_BITSIZE 32
     5.9  
    5.10  #if defined(__x86_64__)
    5.11  
     6.1 --- a/xen/include/xen/mm.h	Fri Feb 23 17:01:38 2007 +0000
     6.2 +++ b/xen/include/xen/mm.h	Fri Feb 23 17:02:58 2007 +0000
     6.3 @@ -74,10 +74,10 @@ int assign_pages(
     6.4      unsigned int memflags);
     6.5  
     6.6  /* memflags: */
     6.7 -#define _MEMF_dma         0
     6.8 -#define  MEMF_dma         (1U<<_MEMF_dma)
     6.9 -#define _MEMF_no_refcount 1
    6.10 +#define _MEMF_no_refcount 0
    6.11  #define  MEMF_no_refcount (1U<<_MEMF_no_refcount)
    6.12 +#define _MEMF_bits        24
    6.13 +#define  MEMF_bits(n)     ((n)<<_MEMF_bits)
    6.14  
    6.15  #ifdef CONFIG_PAGEALLOC_MAX_ORDER
    6.16  #define MAX_ORDER CONFIG_PAGEALLOC_MAX_ORDER
    6.17 @@ -85,10 +85,6 @@ int assign_pages(
    6.18  #define MAX_ORDER 20 /* 2^20 contiguous pages */
    6.19  #endif
    6.20  
    6.21 -/* DMA heap parameters. */
    6.22 -extern unsigned int  dma_bitsize;
    6.23 -extern unsigned long max_dma_mfn;
    6.24 -
    6.25  /* Automatic page scrubbing for dead domains. */
    6.26  extern struct list_head page_scrub_list;
    6.27  #define page_scrub_schedule_work()              \