ia64/xen-unstable

changeset 5929:51094fae410e

Attached patch adds a DMA zone to xen, also modifies xen_contig_memory()
to ask for DMA pages.
Signed-off-by: srparish@us.ibm.com
author kaf24@firebug.cl.cam.ac.uk
date Fri Jul 29 10:31:22 2005 +0000 (2005-07-29)
parents 04d01b8fa219
children 8c1944538086
files linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c xen/arch/x86/domain_build.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_64/mm.c xen/common/dom_mem_ops.c xen/common/page_alloc.c xen/include/xen/mm.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c	Fri Jul 29 10:27:12 2005 +0000
     1.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c	Fri Jul 29 10:31:22 2005 +0000
     1.3 @@ -296,7 +296,7 @@ void xen_contig_memory(unsigned long vst
     1.4  
     1.5      /* 2. Get a new contiguous memory extent. */
     1.6      BUG_ON(HYPERVISOR_dom_mem_op(
     1.7 -        MEMOP_increase_reservation, &mfn, 1, order) != 1);
     1.8 +	       MEMOP_increase_reservation, &mfn, 1, order | (32<<8)) != 1);
     1.9  
    1.10      /* 3. Map the new extent in place of old pages. */
    1.11      for (i = 0; i < (1<<order); i++) {
     2.1 --- a/xen/arch/x86/domain_build.c	Fri Jul 29 10:27:12 2005 +0000
     2.2 +++ b/xen/arch/x86/domain_build.c	Fri Jul 29 10:31:22 2005 +0000
     2.3 @@ -63,7 +63,7 @@ static struct pfn_info *alloc_largest(st
     2.4      unsigned int order = get_order(max * PAGE_SIZE);
     2.5      if ( (max & (max-1)) != 0 )
     2.6          order--;
     2.7 -    while ( (page = alloc_domheap_pages(d, order)) == NULL )
     2.8 +    while ( (page = alloc_domheap_pages(d, order, 0)) == NULL )
     2.9          if ( order-- == 0 )
    2.10              break;
    2.11      return page;
     3.1 --- a/xen/arch/x86/x86_32/mm.c	Fri Jul 29 10:27:12 2005 +0000
     3.2 +++ b/xen/arch/x86/x86_32/mm.c	Fri Jul 29 10:31:22 2005 +0000
     3.3 @@ -102,7 +102,7 @@ void __init paging_init(void)
     3.4          mpt_size = 4*1024*1024;
     3.5      for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++ )
     3.6      {
     3.7 -        if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER)) == NULL )
     3.8 +        if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0) == NULL )
     3.9              panic("Not enough memory to bootstrap Xen.\n");
    3.10          idle_pg_table_l2[l2_linear_offset(RDWR_MPT_VIRT_START) + i] =
    3.11              l2e_from_page(pg, PAGE_HYPERVISOR | _PAGE_PSE);
     4.1 --- a/xen/arch/x86/x86_64/mm.c	Fri Jul 29 10:27:12 2005 +0000
     4.2 +++ b/xen/arch/x86/x86_64/mm.c	Fri Jul 29 10:31:22 2005 +0000
     4.3 @@ -100,7 +100,7 @@ void __init paging_init(void)
     4.4       */
     4.5      for ( i = 0; i < max_page; i += ((1UL << L2_PAGETABLE_SHIFT) / 8) )
     4.6      {
     4.7 -        pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER);
     4.8 +        pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0);
     4.9          if ( pg == NULL )
    4.10              panic("Not enough memory for m2p table\n");
    4.11          map_pages_to_xen(
     5.1 --- a/xen/common/dom_mem_ops.c	Fri Jul 29 10:27:12 2005 +0000
     5.2 +++ b/xen/common/dom_mem_ops.c	Fri Jul 29 10:31:22 2005 +0000
     5.3 @@ -37,7 +37,8 @@ alloc_dom_mem(struct domain *d,
     5.4                unsigned long *extent_list, 
     5.5                unsigned long  start_extent,
     5.6                unsigned int   nr_extents,
     5.7 -              unsigned int   extent_order)
     5.8 +              unsigned int   extent_order,
     5.9 +    		  unsigned int   flags)
    5.10  {
    5.11      struct pfn_info *page;
    5.12      unsigned long    i;
    5.13 @@ -56,7 +57,8 @@ alloc_dom_mem(struct domain *d,
    5.14      {
    5.15          PREEMPT_CHECK(MEMOP_increase_reservation);
    5.16  
    5.17 -        if ( unlikely((page = alloc_domheap_pages(d, extent_order)) == NULL) )
    5.18 +        if ( unlikely((page = alloc_domheap_pages(d, extent_order,
    5.19 +                                                  flags)) == NULL) )
    5.20          {
    5.21              DPRINTK("Could not allocate a frame\n");
    5.22              return i;
    5.23 @@ -131,11 +133,16 @@ do_dom_mem_op(unsigned long  op,
    5.24  {
    5.25      struct domain *d;
    5.26      unsigned long  rc, start_extent;
    5.27 +    unsigned int   address_bits_order;
    5.28  
    5.29      /* Extract @start_extent from @op. */
    5.30      start_extent  = op >> START_EXTENT_SHIFT;
    5.31      op           &= (1 << START_EXTENT_SHIFT) - 1;
    5.32  
    5.33 +    /* seperate extent_order and address_bits_order */
    5.34 +    address_bits_order = (extent_order >> 1) & 0xff;
    5.35 +    extent_order &= 0xff;
    5.36 +
    5.37      if ( unlikely(start_extent > nr_extents) )
    5.38          return -EINVAL;
    5.39  
    5.40 @@ -150,7 +157,8 @@ do_dom_mem_op(unsigned long  op,
    5.41      {
    5.42      case MEMOP_increase_reservation:
    5.43          rc = alloc_dom_mem(
    5.44 -            d, extent_list, start_extent, nr_extents, extent_order);
    5.45 +            d, extent_list, start_extent, nr_extents, extent_order,
    5.46 +            (address_bits_order <= 32) ? ALLOC_DOM_DMA : 0);
    5.47          break;
    5.48      case MEMOP_decrease_reservation:
    5.49          rc = free_dom_mem(
     6.1 --- a/xen/common/page_alloc.c	Fri Jul 29 10:27:12 2005 +0000
     6.2 +++ b/xen/common/page_alloc.c	Fri Jul 29 10:31:22 2005 +0000
     6.3 @@ -207,7 +207,13 @@ unsigned long alloc_boot_pages(unsigned 
     6.4  
     6.5  #define MEMZONE_XEN 0
     6.6  #define MEMZONE_DOM 1
     6.7 -#define NR_ZONES    2
     6.8 +#define MEMZONE_DMADOM 2
     6.9 +#define NR_ZONES    3
    6.10 +
    6.11 +
    6.12 +#define MAX_DMADOM_PFN 0xFFFFF
    6.13 +#define pfn_dom_zone_type(_pfn)                                 \
    6.14 +    (((_pfn) <= MAX_DMADOM_PFN) ? MEMZONE_DMADOM : MEMZONE_DOM)
    6.15  
    6.16  /* Up to 2^20 pages can be allocated at once. */
    6.17  #define MAX_ORDER 20
    6.18 @@ -236,7 +242,7 @@ void end_boot_allocator(void)
    6.19          if ( next_free )
    6.20              map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */
    6.21          if ( curr_free )
    6.22 -            free_heap_pages(MEMZONE_DOM, pfn_to_page(i), 0);
    6.23 +            free_heap_pages(pfn_dom_zone_type(i), pfn_to_page(i), 0);
    6.24      }
    6.25  }
    6.26  
    6.27 @@ -474,14 +480,21 @@ void init_domheap_pages(physaddr_t ps, p
    6.28  {
    6.29      ASSERT(!in_irq());
    6.30  
    6.31 -    ps = round_pgup(ps);
    6.32 -    pe = round_pgdown(pe);
    6.33 +    ps = round_pgup(ps) >> PAGE_SHIFT;
    6.34 +    pe = round_pgdown(pe) >> PAGE_SHIFT;
    6.35  
    6.36 -    init_heap_pages(MEMZONE_DOM, phys_to_page(ps), (pe - ps) >> PAGE_SHIFT);
    6.37 +    if (ps < MAX_DMADOM_PFN && pe > MAX_DMADOM_PFN) {
    6.38 +        init_heap_pages(MEMZONE_DMADOM, pfn_to_page(ps), MAX_DMADOM_PFN - ps);
    6.39 +        init_heap_pages(MEMZONE_DOM, pfn_to_page(MAX_DMADOM_PFN),
    6.40 +                        pe - MAX_DMADOM_PFN);
    6.41 +    }
    6.42 +    else
    6.43 +        init_heap_pages(pfn_dom_zone_type(ps), pfn_to_page(ps), pe - ps);
    6.44  }
    6.45  
    6.46  
    6.47 -struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order)
    6.48 +struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order,
    6.49 +                                     unsigned int flags)
    6.50  {
    6.51      struct pfn_info *pg;
    6.52      cpumask_t mask;
    6.53 @@ -489,8 +502,13 @@ struct pfn_info *alloc_domheap_pages(str
    6.54  
    6.55      ASSERT(!in_irq());
    6.56  
    6.57 -    if ( unlikely((pg = alloc_heap_pages(MEMZONE_DOM, order)) == NULL) )
    6.58 -        return NULL;
    6.59 +    pg = NULL;
    6.60 +    if (! (flags & ALLOC_DOM_DMA))
    6.61 +        pg = alloc_heap_pages(MEMZONE_DOM, order);
    6.62 +    if (pg == NULL) {
    6.63 +        if ( unlikely((pg = alloc_heap_pages(MEMZONE_DMADOM, order)) == NULL) )
    6.64 +            return NULL;
    6.65 +    }
    6.66  
    6.67      mask = pg->u.free.cpumask;
    6.68      tlbflush_filter(mask, pg->tlbflush_timestamp);
    6.69 @@ -531,7 +549,7 @@ struct pfn_info *alloc_domheap_pages(str
    6.70          DPRINTK("...or the domain is dying (%d)\n", 
    6.71                  !!test_bit(_DOMF_dying, &d->domain_flags));
    6.72          spin_unlock(&d->page_alloc_lock);
    6.73 -        free_heap_pages(MEMZONE_DOM, pg, order);
    6.74 +        free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order);
    6.75          return NULL;
    6.76      }
    6.77  
    6.78 @@ -596,7 +614,7 @@ void free_domheap_pages(struct pfn_info 
    6.79  
    6.80          if ( likely(!test_bit(_DOMF_dying, &d->domain_flags)) )
    6.81          {
    6.82 -            free_heap_pages(MEMZONE_DOM, pg, order);
    6.83 +            free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order);
    6.84          }
    6.85          else
    6.86          {
    6.87 @@ -616,7 +634,7 @@ void free_domheap_pages(struct pfn_info 
    6.88      else
    6.89      {
    6.90          /* Freeing an anonymous domain-heap page. */
    6.91 -        free_heap_pages(MEMZONE_DOM, pg, order);
    6.92 +        free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order);
    6.93          drop_dom_ref = 0;
    6.94      }
    6.95  
    6.96 @@ -627,7 +645,7 @@ void free_domheap_pages(struct pfn_info 
    6.97  
    6.98  unsigned long avail_domheap_pages(void)
    6.99  {
   6.100 -    return avail[MEMZONE_DOM];
   6.101 +    return avail[MEMZONE_DOM] + avail[MEMZONE_DMADOM];
   6.102  }
   6.103  
   6.104  
   6.105 @@ -676,7 +694,7 @@ static void page_scrub_softirq(void)
   6.106              p = map_domain_page(page_to_pfn(pg));
   6.107              clear_page(p);
   6.108              unmap_domain_page(p);
   6.109 -            free_heap_pages(MEMZONE_DOM, pg, 0);
   6.110 +            free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, 0);
   6.111          }
   6.112      } while ( (NOW() - start) < MILLISECS(1) );
   6.113  }
     7.1 --- a/xen/include/xen/mm.h	Fri Jul 29 10:27:12 2005 +0000
     7.2 +++ b/xen/include/xen/mm.h	Fri Jul 29 10:31:22 2005 +0000
     7.3 @@ -33,12 +33,15 @@ void free_xenheap_pages(void *v, unsigne
     7.4  
     7.5  /* Domain suballocator. These functions are *not* interrupt-safe.*/
     7.6  void init_domheap_pages(physaddr_t ps, physaddr_t pe);
     7.7 -struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order);
     7.8 +struct pfn_info *alloc_domheap_pages(
     7.9 +    struct domain *d, unsigned int order, unsigned int flags);
    7.10  void free_domheap_pages(struct pfn_info *pg, unsigned int order);
    7.11  unsigned long avail_domheap_pages(void);
    7.12 -#define alloc_domheap_page(d) (alloc_domheap_pages(d,0))
    7.13 +#define alloc_domheap_page(d) (alloc_domheap_pages(d,0,0))
    7.14  #define free_domheap_page(p)  (free_domheap_pages(p,0))
    7.15  
    7.16 +#define ALLOC_DOM_DMA 1
    7.17 +
    7.18  /* Automatic page scrubbing for dead domains. */
    7.19  extern struct list_head page_scrub_list;
    7.20  #define page_scrub_schedule_work()              \