direct-io.hg

changeset 8677:60beade30a0c

merge
author Ian.Campbell@xensource.com
date Fri Jan 27 11:51:57 2006 +0000 (2006-01-27)
parents 0eb38397e608 17dc21008351
children f1bfe32828a1 990c009015e8
files
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c	Fri Jan 27 11:31:14 2006 +0000
     1.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c	Fri Jan 27 11:51:57 2006 +0000
     1.3 @@ -315,9 +315,9 @@ int xen_create_contiguous_region(
     1.4  	pud_t         *pud; 
     1.5  	pmd_t         *pmd;
     1.6  	pte_t         *pte;
     1.7 -	unsigned long  mfn, i, flags;
     1.8 +	unsigned long  frame, i, flags;
     1.9  	struct xen_memory_reservation reservation = {
    1.10 -		.extent_start = &mfn,
    1.11 +		.extent_start = &frame,
    1.12  		.nr_extents   = 1,
    1.13  		.extent_order = 0,
    1.14  		.domid        = DOMID_SELF
    1.15 @@ -333,7 +333,7 @@ int xen_create_contiguous_region(
    1.16  		pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
    1.17  		pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
    1.18  		pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
    1.19 -		mfn = pte_mfn(*pte);
    1.20 +		frame = pte_mfn(*pte);
    1.21  		BUG_ON(HYPERVISOR_update_va_mapping(
    1.22  			vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
    1.23  		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
    1.24 @@ -345,7 +345,8 @@ int xen_create_contiguous_region(
    1.25  	/* 2. Get a new contiguous memory extent. */
    1.26  	reservation.extent_order = order;
    1.27  	reservation.address_bits = address_bits;
    1.28 -	if (HYPERVISOR_memory_op(XENMEM_increase_reservation,
    1.29 +	frame = __pa(vstart) >> PAGE_SHIFT;
    1.30 +	if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
    1.31  				 &reservation) != 1)
    1.32  		goto fail;
    1.33  
    1.34 @@ -353,9 +354,8 @@ int xen_create_contiguous_region(
    1.35  	for (i = 0; i < (1<<order); i++) {
    1.36  		BUG_ON(HYPERVISOR_update_va_mapping(
    1.37  			vstart + (i*PAGE_SIZE),
    1.38 -			pfn_pte_ma(mfn+i, PAGE_KERNEL), 0));
    1.39 -		xen_machphys_update(mfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
    1.40 -		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, mfn+i);
    1.41 +			pfn_pte_ma(frame+i, PAGE_KERNEL), 0));
    1.42 +		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame+i);
    1.43  	}
    1.44  
    1.45  	flush_tlb_all();
    1.46 @@ -371,13 +371,13 @@ int xen_create_contiguous_region(
    1.47  	reservation.address_bits = 0;
    1.48  
    1.49  	for (i = 0; i < (1<<order); i++) {
    1.50 +		frame = (__pa(vstart) >> PAGE_SHIFT) + i;
    1.51  		BUG_ON(HYPERVISOR_memory_op(
    1.52 -			XENMEM_increase_reservation, &reservation) != 1);
    1.53 +			XENMEM_populate_physmap, &reservation) != 1);
    1.54  		BUG_ON(HYPERVISOR_update_va_mapping(
    1.55  			vstart + (i*PAGE_SIZE),
    1.56 -			pfn_pte_ma(mfn, PAGE_KERNEL), 0));
    1.57 -		xen_machphys_update(mfn, (__pa(vstart)>>PAGE_SHIFT)+i);
    1.58 -		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, mfn);
    1.59 +			pfn_pte_ma(frame, PAGE_KERNEL), 0));
    1.60 +		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
    1.61  	}
    1.62  
    1.63  	flush_tlb_all();
    1.64 @@ -393,9 +393,9 @@ void xen_destroy_contiguous_region(unsig
    1.65  	pud_t         *pud; 
    1.66  	pmd_t         *pmd;
    1.67  	pte_t         *pte;
    1.68 -	unsigned long  mfn, i, flags;
    1.69 +	unsigned long  frame, i, flags;
    1.70  	struct xen_memory_reservation reservation = {
    1.71 -		.extent_start = &mfn,
    1.72 +		.extent_start = &frame,
    1.73  		.nr_extents   = 1,
    1.74  		.extent_order = 0,
    1.75  		.domid        = DOMID_SELF
    1.76 @@ -413,7 +413,7 @@ void xen_destroy_contiguous_region(unsig
    1.77  		pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
    1.78  		pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
    1.79  		pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
    1.80 -		mfn = pte_mfn(*pte);
    1.81 +		frame = pte_mfn(*pte);
    1.82  		BUG_ON(HYPERVISOR_update_va_mapping(
    1.83  			vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
    1.84  		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
    1.85 @@ -424,13 +424,13 @@ void xen_destroy_contiguous_region(unsig
    1.86  
    1.87  	/* 2. Map new pages in place of old pages. */
    1.88  	for (i = 0; i < (1<<order); i++) {
    1.89 +		frame = (__pa(vstart) >> PAGE_SHIFT) + i;
    1.90  		BUG_ON(HYPERVISOR_memory_op(
    1.91 -			XENMEM_increase_reservation, &reservation) != 1);
    1.92 +			XENMEM_populate_physmap, &reservation) != 1);
    1.93  		BUG_ON(HYPERVISOR_update_va_mapping(
    1.94  			vstart + (i*PAGE_SIZE),
    1.95 -			pfn_pte_ma(mfn, PAGE_KERNEL), 0));
    1.96 -		xen_machphys_update(mfn, (__pa(vstart)>>PAGE_SHIFT)+i);
    1.97 -		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, mfn);
    1.98 +			pfn_pte_ma(frame, PAGE_KERNEL), 0));
    1.99 +		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
   1.100  	}
   1.101  
   1.102  	flush_tlb_all();
     2.1 --- a/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c	Fri Jan 27 11:31:14 2006 +0000
     2.2 +++ b/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c	Fri Jan 27 11:51:57 2006 +0000
     2.3 @@ -139,6 +139,21 @@ static struct page *balloon_retrieve(voi
     2.4  	return page;
     2.5  }
     2.6  
     2.7 +static struct page *balloon_first_page(void)
     2.8 +{
     2.9 +	if (list_empty(&ballooned_pages))
    2.10 +		return NULL;
    2.11 +	return LIST_TO_PAGE(ballooned_pages.next);
    2.12 +}
    2.13 +
    2.14 +static struct page *balloon_next_page(struct page *page)
    2.15 +{
    2.16 +	struct list_head *next = PAGE_TO_LIST(page)->next;
    2.17 +	if (next == &ballooned_pages)
    2.18 +		return NULL;
    2.19 +	return LIST_TO_PAGE(next);
    2.20 +}
    2.21 +
    2.22  static void balloon_alarm(unsigned long unused)
    2.23  {
    2.24  	schedule_work(&balloon_worker);
    2.25 @@ -154,7 +169,7 @@ static unsigned long current_target(void
    2.26  
    2.27  static int increase_reservation(unsigned long nr_pages)
    2.28  {
    2.29 -	unsigned long *mfn_list, pfn, i, flags;
    2.30 +	unsigned long *frame_list, pfn, i, flags;
    2.31  	struct page   *page;
    2.32  	long           rc;
    2.33  	struct xen_memory_reservation reservation = {
    2.34 @@ -166,20 +181,27 @@ static int increase_reservation(unsigned
    2.35  	if (nr_pages > (PAGE_SIZE / sizeof(unsigned long)))
    2.36  		nr_pages = PAGE_SIZE / sizeof(unsigned long);
    2.37  
    2.38 -	mfn_list = (unsigned long *)__get_free_page(GFP_KERNEL);
    2.39 -	if (mfn_list == NULL)
    2.40 +	frame_list = (unsigned long *)__get_free_page(GFP_KERNEL);
    2.41 +	if (frame_list == NULL)
    2.42  		return -ENOMEM;
    2.43  
    2.44  	balloon_lock(flags);
    2.45  
    2.46 -	reservation.extent_start = mfn_list;
    2.47 +	page = balloon_first_page();
    2.48 +	for (i = 0; i < nr_pages; i++) {
    2.49 +		BUG_ON(page == NULL);
    2.50 +		frame_list[i] = page_to_pfn(page);;
    2.51 +		page = balloon_next_page(page);
    2.52 +	}
    2.53 +
    2.54 +	reservation.extent_start = frame_list;
    2.55  	reservation.nr_extents   = nr_pages;
    2.56  	rc = HYPERVISOR_memory_op(
    2.57 -		XENMEM_increase_reservation, &reservation);
    2.58 +		XENMEM_populate_physmap, &reservation);
    2.59  	if (rc < nr_pages) {
    2.60  		int ret;
    2.61  		/* We hit the Xen hard limit: reprobe. */
    2.62 -		reservation.extent_start = mfn_list;
    2.63 +		reservation.extent_start = frame_list;
    2.64  		reservation.nr_extents   = rc;
    2.65  		ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
    2.66  				&reservation);
    2.67 @@ -196,15 +218,15 @@ static int increase_reservation(unsigned
    2.68  		BUG_ON(phys_to_machine_mapping_valid(pfn));
    2.69  
    2.70  		/* Update P->M and M->P tables. */
    2.71 -		set_phys_to_machine(pfn, mfn_list[i]);
    2.72 -		xen_machphys_update(mfn_list[i], pfn);
    2.73 +		set_phys_to_machine(pfn, frame_list[i]);
    2.74 +		xen_machphys_update(frame_list[i], pfn);
    2.75              
    2.76  		/* Link back into the page tables if not highmem. */
    2.77  		if (pfn < max_low_pfn) {
    2.78  			int ret;
    2.79  			ret = HYPERVISOR_update_va_mapping(
    2.80  				(unsigned long)__va(pfn << PAGE_SHIFT),
    2.81 -				pfn_pte_ma(mfn_list[i], PAGE_KERNEL),
    2.82 +				pfn_pte_ma(frame_list[i], PAGE_KERNEL),
    2.83  				0);
    2.84  			BUG_ON(ret);
    2.85  		}
    2.86 @@ -221,14 +243,14 @@ static int increase_reservation(unsigned
    2.87   out:
    2.88  	balloon_unlock(flags);
    2.89  
    2.90 -	free_page((unsigned long)mfn_list);
    2.91 +	free_page((unsigned long)frame_list);
    2.92  
    2.93  	return 0;
    2.94  }
    2.95  
    2.96  static int decrease_reservation(unsigned long nr_pages)
    2.97  {
    2.98 -	unsigned long *mfn_list, pfn, i, flags;
    2.99 +	unsigned long *frame_list, pfn, i, flags;
   2.100  	struct page   *page;
   2.101  	void          *v;
   2.102  	int            need_sleep = 0;
   2.103 @@ -242,8 +264,8 @@ static int decrease_reservation(unsigned
   2.104  	if (nr_pages > (PAGE_SIZE / sizeof(unsigned long)))
   2.105  		nr_pages = PAGE_SIZE / sizeof(unsigned long);
   2.106  
   2.107 -	mfn_list = (unsigned long *)__get_free_page(GFP_KERNEL);
   2.108 -	if (mfn_list == NULL)
   2.109 +	frame_list = (unsigned long *)__get_free_page(GFP_KERNEL);
   2.110 +	if (frame_list == NULL)
   2.111  		return -ENOMEM;
   2.112  
   2.113  	for (i = 0; i < nr_pages; i++) {
   2.114 @@ -254,7 +276,7 @@ static int decrease_reservation(unsigned
   2.115  		}
   2.116  
   2.117  		pfn = page_to_pfn(page);
   2.118 -		mfn_list[i] = pfn_to_mfn(pfn);
   2.119 +		frame_list[i] = pfn_to_mfn(pfn);
   2.120  
   2.121  		if (!PageHighMem(page)) {
   2.122  			v = phys_to_virt(pfn << PAGE_SHIFT);
   2.123 @@ -280,12 +302,12 @@ static int decrease_reservation(unsigned
   2.124  
   2.125  	/* No more mappings: invalidate P2M and add to balloon. */
   2.126  	for (i = 0; i < nr_pages; i++) {
   2.127 -		pfn = mfn_to_pfn(mfn_list[i]);
   2.128 +		pfn = mfn_to_pfn(frame_list[i]);
   2.129  		set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
   2.130  		balloon_append(pfn_to_page(pfn));
   2.131  	}
   2.132  
   2.133 -	reservation.extent_start = mfn_list;
   2.134 +	reservation.extent_start = frame_list;
   2.135  	reservation.nr_extents   = nr_pages;
   2.136  	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
   2.137  	BUG_ON(ret != nr_pages);
   2.138 @@ -295,7 +317,7 @@ static int decrease_reservation(unsigned
   2.139  
   2.140  	balloon_unlock(flags);
   2.141  
   2.142 -	free_page((unsigned long)mfn_list);
   2.143 +	free_page((unsigned long)frame_list);
   2.144  
   2.145  	return need_sleep;
   2.146  }
     3.1 --- a/linux-2.6-xen-sparse/drivers/xen/util.c	Fri Jan 27 11:31:14 2006 +0000
     3.2 +++ b/linux-2.6-xen-sparse/drivers/xen/util.c	Fri Jan 27 11:51:57 2006 +0000
     3.3 @@ -1,5 +1,6 @@
     3.4  #include <linux/config.h>
     3.5  #include <linux/mm.h>
     3.6 +#include <linux/module.h>
     3.7  #include <linux/slab.h>
     3.8  #include <linux/vmalloc.h>
     3.9  #include <asm/uaccess.h>
    3.10 @@ -31,7 +32,7 @@ struct vm_struct *alloc_vm_area(unsigned
    3.11  
    3.12  	return area;
    3.13  }
    3.14 -EXPORT_SYMBOL(alloc_vm_area);
    3.15 +EXPORT_SYMBOL_GPL(alloc_vm_area);
    3.16  
    3.17  void free_vm_area(struct vm_struct *area)
    3.18  {
    3.19 @@ -40,7 +41,7 @@ void free_vm_area(struct vm_struct *area
    3.20  	BUG_ON(ret != area);
    3.21  	kfree(area);
    3.22  }
    3.23 -EXPORT_SYMBOL(free_vm_area);
    3.24 +EXPORT_SYMBOL_GPL(free_vm_area);
    3.25  
    3.26  void lock_vm_area(struct vm_struct *area)
    3.27  {
    3.28 @@ -60,13 +61,13 @@ void lock_vm_area(struct vm_struct *area
    3.29  	for (i = 0; i < area->size; i += PAGE_SIZE)
    3.30  		(void)__get_user(c, (char __user *)area->addr + i);
    3.31  }
    3.32 -EXPORT_SYMBOL(lock_vm_area);
    3.33 +EXPORT_SYMBOL_GPL(lock_vm_area);
    3.34  
    3.35  void unlock_vm_area(struct vm_struct *area)
    3.36  {
    3.37  	preempt_enable();
    3.38  }
    3.39 -EXPORT_SYMBOL(unlock_vm_area);
    3.40 +EXPORT_SYMBOL_GPL(unlock_vm_area);
    3.41  
    3.42  /*
    3.43   * Local variables:
     4.1 --- a/xen/common/memory.c	Fri Jan 27 11:31:14 2006 +0000
     4.2 +++ b/xen/common/memory.c	Fri Jan 27 11:51:57 2006 +0000
     4.3 @@ -30,7 +30,7 @@ increase_reservation(
     4.4      int           *preempted)
     4.5  {
     4.6      struct pfn_info *page;
     4.7 -    unsigned int     i;
     4.8 +    unsigned long    i;
     4.9  
    4.10      if ( (extent_list != NULL) &&
    4.11           !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
    4.12 @@ -52,7 +52,7 @@ increase_reservation(
    4.13              d, extent_order, flags)) == NULL) )
    4.14          {
    4.15              DPRINTK("Could not allocate order=%d extent: "
    4.16 -                    "id=%d flags=%x (%d of %d)\n",
    4.17 +                    "id=%d flags=%x (%ld of %d)\n",
    4.18                      extent_order, d->domain_id, flags, i, nr_extents);
    4.19              return i;
    4.20          }
    4.21 @@ -67,6 +67,58 @@ increase_reservation(
    4.22  }
    4.23      
    4.24  static long
    4.25 +populate_physmap(
    4.26 +    struct domain *d, 
    4.27 +    unsigned long *extent_list, 
    4.28 +    unsigned int   nr_extents,
    4.29 +    unsigned int   extent_order,
    4.30 +    unsigned int   flags,
    4.31 +    int           *preempted)
    4.32 +{
    4.33 +    struct pfn_info *page;
    4.34 +    unsigned long    i, j, pfn, mfn;
    4.35 +
    4.36 +    if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
    4.37 +        return 0;
    4.38 +
    4.39 +    if ( (extent_order != 0) &&
    4.40 +         !multipage_allocation_permitted(current->domain) )
    4.41 +        return 0;
    4.42 +
    4.43 +    for ( i = 0; i < nr_extents; i++ )
    4.44 +    {
    4.45 +        if ( hypercall_preempt_check() )
    4.46 +        {
    4.47 +            *preempted = 1;
    4.48 +            return i;
    4.49 +        }
    4.50 +
    4.51 +        if ( unlikely((page = alloc_domheap_pages(
    4.52 +            d, extent_order, flags)) == NULL) )
    4.53 +        {
    4.54 +            DPRINTK("Could not allocate order=%d extent: "
    4.55 +                    "id=%d flags=%x (%ld of %d)\n",
    4.56 +                    extent_order, d->domain_id, flags, i, nr_extents);
    4.57 +            return i;
    4.58 +        }
    4.59 +
    4.60 +        mfn = page_to_pfn(page);
    4.61 +
    4.62 +        if ( unlikely(__get_user(pfn, &extent_list[i]) != 0) )
    4.63 +            return i;
    4.64 +
    4.65 +        for ( j = 0; j < (1 << extent_order); j++ )
    4.66 +            set_pfn_from_mfn(mfn + j, pfn + j);
    4.67 +
    4.68 +        /* Inform the domain of the new page's machine address. */ 
    4.69 +        if ( __put_user(mfn, &extent_list[i]) != 0 )
    4.70 +            return i;
    4.71 +    }
    4.72 +
    4.73 +    return nr_extents;
    4.74 +}
    4.75 +    
    4.76 +static long
    4.77  decrease_reservation(
    4.78      struct domain *d, 
    4.79      unsigned long *extent_list, 
    4.80 @@ -76,7 +128,7 @@ decrease_reservation(
    4.81      int           *preempted)
    4.82  {
    4.83      struct pfn_info *page;
    4.84 -    unsigned long    i, j, mpfn;
    4.85 +    unsigned long    i, j, mfn;
    4.86  
    4.87      if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
    4.88          return 0;
    4.89 @@ -89,19 +141,19 @@ decrease_reservation(
    4.90              return i;
    4.91          }
    4.92  
    4.93 -        if ( unlikely(__get_user(mpfn, &extent_list[i]) != 0) )
    4.94 +        if ( unlikely(__get_user(mfn, &extent_list[i]) != 0) )
    4.95              return i;
    4.96  
    4.97          for ( j = 0; j < (1 << extent_order); j++ )
    4.98          {
    4.99 -            if ( unlikely((mpfn + j) >= max_page) )
   4.100 +            if ( unlikely((mfn + j) >= max_page) )
   4.101              {
   4.102                  DPRINTK("Domain %u page number out of range (%lx >= %lx)\n", 
   4.103 -                        d->domain_id, mpfn + j, max_page);
   4.104 +                        d->domain_id, mfn + j, max_page);
   4.105                  return i;
   4.106              }
   4.107              
   4.108 -            page = pfn_to_page(mpfn + j);
   4.109 +            page = pfn_to_page(mfn + j);
   4.110              if ( unlikely(!get_page(page, d)) )
   4.111              {
   4.112                  DPRINTK("Bad page free for domain %u\n", d->domain_id);
   4.113 @@ -143,6 +195,7 @@ long do_memory_op(int cmd, void *arg)
   4.114      {
   4.115      case XENMEM_increase_reservation:
   4.116      case XENMEM_decrease_reservation:
   4.117 +    case XENMEM_populate_physmap:
   4.118          if ( copy_from_user(&reservation, arg, sizeof(reservation)) )
   4.119              return -EFAULT;
   4.120  
   4.121 @@ -170,14 +223,37 @@ long do_memory_op(int cmd, void *arg)
   4.122          else if ( (d = find_domain_by_id(reservation.domid)) == NULL )
   4.123              return -ESRCH;
   4.124  
   4.125 -        rc = ((op == XENMEM_increase_reservation) ?
   4.126 -              increase_reservation : decrease_reservation)(
   4.127 -                  d,
   4.128 -                  reservation.extent_start,
   4.129 -                  reservation.nr_extents,
   4.130 -                  reservation.extent_order,
   4.131 -                  flags,
   4.132 -                  &preempted);
   4.133 +        switch ( op )
   4.134 +        {
   4.135 +        case XENMEM_increase_reservation:
   4.136 +            rc = increase_reservation(
   4.137 +                d,
   4.138 +                reservation.extent_start,
   4.139 +                reservation.nr_extents,
   4.140 +                reservation.extent_order,
   4.141 +                flags,
   4.142 +                &preempted);
   4.143 +            break;
   4.144 +        case XENMEM_decrease_reservation:
   4.145 +            rc = decrease_reservation(
   4.146 +                d,
   4.147 +                reservation.extent_start,
   4.148 +                reservation.nr_extents,
   4.149 +                reservation.extent_order,
   4.150 +                flags,
   4.151 +                &preempted);
   4.152 +            break;
   4.153 +        case XENMEM_populate_physmap:
   4.154 +        default:
   4.155 +            rc = populate_physmap(
   4.156 +                d,
   4.157 +                reservation.extent_start,
   4.158 +                reservation.nr_extents,
   4.159 +                reservation.extent_order,
   4.160 +                flags,
   4.161 +                &preempted);
   4.162 +            break;
   4.163 +        }
   4.164  
   4.165          if ( unlikely(reservation.domid != DOMID_SELF) )
   4.166              put_domain(d);
     5.1 --- a/xen/include/public/memory.h	Fri Jan 27 11:31:14 2006 +0000
     5.2 +++ b/xen/include/public/memory.h	Fri Jan 27 11:51:57 2006 +0000
     5.3 @@ -16,11 +16,18 @@
     5.4   */
     5.5  #define XENMEM_increase_reservation 0
     5.6  #define XENMEM_decrease_reservation 1
     5.7 +#define XENMEM_populate_physmap     6
     5.8  typedef struct xen_memory_reservation {
     5.9  
    5.10      /*
    5.11 -     * MFN bases of extents to free (XENMEM_decrease_reservation).
    5.12 -     * MFN bases of extents that were allocated (XENMEM_increase_reservation).
    5.13 +     * XENMEM_increase_reservation:
    5.14 +     *   OUT: MFN bases of extents that were allocated
    5.15 +     * XENMEM_decrease_reservation:
    5.16 +     *   IN:  MFN bases of extents to free
    5.17 +     * XENMEM_populate_physmap:
    5.18 +     *   IN:  PFN bases of extents to populate with memory
    5.19 +     *   OUT: MFN bases of extents that were allocated
    5.20 +     *   (NB. This command also updates the mach_to_phys translation table)
    5.21       */
    5.22      unsigned long *extent_start;
    5.23  
    5.24 @@ -29,11 +36,10 @@ typedef struct xen_memory_reservation {
    5.25      unsigned int   extent_order;
    5.26  
    5.27      /*
    5.28 -     * XENMEM_increase_reservation: maximum # bits addressable by the user
    5.29 -     * of the allocated region (e.g., I/O devices often have a 32-bit
    5.30 -     * limitation even in 64-bit systems). If zero then the user has no
    5.31 -     * addressing restriction.
    5.32 -     * XENMEM_decrease_reservation: unused.
    5.33 +     * Mmaximum # bits addressable by the user of the allocated region (e.g., 
    5.34 +     * I/O devices often have a 32-bit limitation even in 64-bit systems). If 
    5.35 +     * zero then the user has no addressing restriction.
    5.36 +     * This field is not used by XENMEM_decrease_reservation.
    5.37       */
    5.38      unsigned int   address_bits;
    5.39