direct-io.hg

changeset 6698:2704a88c3295

merge?
author cl349@firebug.cl.cam.ac.uk
date Fri Sep 09 08:56:38 2005 +0000 (2005-09-09)
parents 22c30df92b11 4cdf880c9463
children cdfa7dd00c44
files linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64 linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c linux-2.6-xen-sparse/drivers/xen/netback/interface.c linux-2.6-xen-sparse/drivers/xen/netback/netback.c linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c linux-2.6-xen-sparse/drivers/xen/usbback/interface.c linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h tools/libxc/xc_domain.c tools/libxc/xc_linux_build.c tools/libxc/xc_linux_restore.c tools/libxc/xc_private.c tools/libxc/xenctrl.h tools/python/xen/lowlevel/xc/xc.c tools/python/xen/xend/image.py xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/vmx_vmcs.c xen/arch/x86/x86_32/mm.c xen/common/grant_table.c xen/common/memory.c xen/common/page_alloc.c xen/common/trace.c xen/common/xmalloc.c xen/drivers/char/console.c xen/drivers/char/serial.c xen/include/asm-x86/page.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64	Fri Sep 09 08:56:14 2005 +0000
     1.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64	Fri Sep 09 08:56:38 2005 +0000
     1.3 @@ -2202,7 +2202,7 @@ CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
     1.4  CONFIG_PROC_FS=y
     1.5  CONFIG_PROC_KCORE=y
     1.6  CONFIG_SYSFS=y
     1.7 -# CONFIG_DEVFS_FS is not set
     1.8 +CONFIG_DEVFS_FS=y
     1.9  CONFIG_DEVPTS_FS_XATTR=y
    1.10  CONFIG_DEVPTS_FS_SECURITY=y
    1.11  CONFIG_TMPFS=y
     2.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c	Fri Sep 09 08:56:14 2005 +0000
     2.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c	Fri Sep 09 08:56:38 2005 +0000
     2.3 @@ -22,13 +22,14 @@
     2.4  #define ISA_START_ADDRESS	0x0
     2.5  #define ISA_END_ADDRESS		0x100000
     2.6  
     2.7 +#if 0 /* not PAE safe */
     2.8  /* These hacky macros avoid phys->machine translations. */
     2.9  #define __direct_pte(x) ((pte_t) { (x) } )
    2.10  #define __direct_mk_pte(page_nr,pgprot) \
    2.11    __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
    2.12  #define direct_mk_pte_phys(physpage, pgprot) \
    2.13    __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
    2.14 -
    2.15 +#endif
    2.16  
    2.17  static int direct_remap_area_pte_fn(pte_t *pte, 
    2.18  				    struct page *pte_page,
    2.19 @@ -37,16 +38,16 @@ static int direct_remap_area_pte_fn(pte_
    2.20  {
    2.21  	mmu_update_t **v = (mmu_update_t **)data;
    2.22  
    2.23 -	(*v)->ptr = ((maddr_t)pfn_to_mfn(page_to_pfn(pte_page)) <<
    2.24 +	(*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pte_page)) <<
    2.25  		     PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
    2.26  	(*v)++;
    2.27  
    2.28  	return 0;
    2.29  }
    2.30  
    2.31 -int direct_remap_area_pages(struct mm_struct *mm,
    2.32 +int direct_remap_pfn_range(struct mm_struct *mm,
    2.33  			    unsigned long address, 
    2.34 -			    unsigned long machine_addr,
    2.35 +			    unsigned long mfn,
    2.36  			    unsigned long size, 
    2.37  			    pgprot_t prot,
    2.38  			    domid_t  domid)
    2.39 @@ -77,9 +78,9 @@ int direct_remap_area_pages(struct mm_st
    2.40  		 * Fill in the machine address: PTE ptr is done later by
    2.41  		 * __direct_remap_area_pages(). 
    2.42  		 */
    2.43 -		v->val = pte_val_ma(pfn_pte_ma(machine_addr >> PAGE_SHIFT, prot));
    2.44 +		v->val = pte_val_ma(pfn_pte_ma(mfn, prot));
    2.45  
    2.46 -		machine_addr += PAGE_SIZE;
    2.47 +		mfn++;
    2.48  		address += PAGE_SIZE; 
    2.49  		v++;
    2.50  	}
    2.51 @@ -97,8 +98,10 @@ int direct_remap_area_pages(struct mm_st
    2.52  	return 0;
    2.53  }
    2.54  
    2.55 -EXPORT_SYMBOL(direct_remap_area_pages);
    2.56 +EXPORT_SYMBOL(direct_remap_pfn_range);
    2.57  
    2.58 +
    2.59 +/* FIXME: This is horribly broken on PAE */ 
    2.60  static int lookup_pte_fn(
    2.61  	pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
    2.62  {
    2.63 @@ -218,7 +221,7 @@ void __iomem * __ioremap(unsigned long p
    2.64  #ifdef __x86_64__
    2.65  	flags |= _PAGE_USER;
    2.66  #endif
    2.67 -	if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr,
    2.68 +	if (direct_remap_pfn_range(&init_mm, (unsigned long) addr, phys_addr>>PAGE_SHIFT,
    2.69  				    size, __pgprot(flags), domid)) {
    2.70  		vunmap((void __force *) addr);
    2.71  		return NULL;
     3.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c	Fri Sep 09 08:56:14 2005 +0000
     3.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c	Fri Sep 09 08:56:38 2005 +0000
     3.3 @@ -182,7 +182,7 @@ int blkif_ioctl(struct inode *inode, str
     3.4  static int blkif_queue_request(struct request *req)
     3.5  {
     3.6  	struct blkfront_info *info = req->rq_disk->private_data;
     3.7 -	unsigned long buffer_ma;
     3.8 +	unsigned long buffer_mfn;
     3.9  	blkif_request_t *ring_req;
    3.10  	struct bio *bio;
    3.11  	struct bio_vec *bvec;
    3.12 @@ -221,7 +221,7 @@ static int blkif_queue_request(struct re
    3.13  		bio_for_each_segment (bvec, bio, idx) {
    3.14  			BUG_ON(ring_req->nr_segments
    3.15  			       == BLKIF_MAX_SEGMENTS_PER_REQUEST);
    3.16 -			buffer_ma = page_to_phys(bvec->bv_page);
    3.17 +			buffer_mfn = page_to_phys(bvec->bv_page) >> PAGE_SHIFT;
    3.18  			fsect = bvec->bv_offset >> 9;
    3.19  			lsect = fsect + (bvec->bv_len >> 9) - 1;
    3.20  			/* install a grant reference. */
    3.21 @@ -231,11 +231,11 @@ static int blkif_queue_request(struct re
    3.22  			gnttab_grant_foreign_access_ref(
    3.23  				ref,
    3.24  				info->backend_id,
    3.25 -				buffer_ma >> PAGE_SHIFT,
    3.26 +				buffer_mfn,
    3.27  				rq_data_dir(req) );
    3.28  
    3.29  			info->shadow[id].frame[ring_req->nr_segments] =
    3.30 -				buffer_ma >> PAGE_SHIFT;
    3.31 +				buffer_mfn;
    3.32  
    3.33  			ring_req->frame_and_sects[ring_req->nr_segments] =
    3.34  				blkif_fas_from_gref(ref, fsect, lsect);
     4.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Fri Sep 09 08:56:14 2005 +0000
     4.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Fri Sep 09 08:56:38 2005 +0000
     4.3 @@ -154,12 +154,12 @@ static int map_frontend_pages(netif_t *n
     4.4      pgprot_t      prot = __pgprot(_KERNPG_TABLE);
     4.5      int           err;
     4.6  
     4.7 -    err = direct_remap_area_pages(&init_mm, localaddr,
     4.8 -				  tx_ring_ref<<PAGE_SHIFT, PAGE_SIZE,
     4.9 +    err = direct_remap_pfn_range(&init_mm, localaddr,
    4.10 +				  tx_ring_ref, PAGE_SIZE,
    4.11  				  prot, netif->domid); 
    4.12      
    4.13 -    err |= direct_remap_area_pages(&init_mm, localaddr + PAGE_SIZE,
    4.14 -				  rx_ring_ref<<PAGE_SHIFT, PAGE_SIZE,
    4.15 +    err |= direct_remap_pfn_range(&init_mm, localaddr + PAGE_SIZE,
    4.16 +				  rx_ring_ref, PAGE_SIZE,
    4.17  				  prot, netif->domid);
    4.18  
    4.19      if (err)
     5.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Fri Sep 09 08:56:14 2005 +0000
     5.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Fri Sep 09 08:56:38 2005 +0000
     5.3 @@ -297,7 +297,7 @@ static void net_rx_action(unsigned long 
     5.4          mmuext->mfn = old_mfn;
     5.5          mmuext++;
     5.6  #endif
     5.7 -        mmu->ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
     5.8 +        mmu->ptr = ((unsigned long long)new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
     5.9          mmu->val = __pa(vdata) >> PAGE_SHIFT;  
    5.10          mmu++;
    5.11  
     6.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Fri Sep 09 08:56:14 2005 +0000
     6.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Fri Sep 09 08:56:38 2005 +0000
     6.3 @@ -618,7 +618,7 @@ static int netif_poll(struct net_device 
     6.4  
     6.5          /* Remap the page. */
     6.6  #ifdef CONFIG_XEN_NETDEV_GRANT
     6.7 -        mmu->ptr = mfn << PAGE_SHIFT | MMU_MACHPHYS_UPDATE;
     6.8 +        mmu->ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
     6.9  #else
    6.10          mmu->ptr  = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
    6.11  #endif
     7.1 --- a/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c	Fri Sep 09 08:56:14 2005 +0000
     7.2 +++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c	Fri Sep 09 08:56:38 2005 +0000
     7.3 @@ -116,9 +116,9 @@ static int privcmd_ioctl(struct inode *i
     7.4                  if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
     7.5                      return -EINVAL;
     7.6  
     7.7 -                if ( (rc = direct_remap_area_pages(vma->vm_mm, 
     7.8 +                if ( (rc = direct_remap_pfn_range(vma->vm_mm, 
     7.9                                                     msg[j].va&PAGE_MASK, 
    7.10 -                                                   msg[j].mfn<<PAGE_SHIFT, 
    7.11 +                                                   msg[j].mfn, 
    7.12                                                     msg[j].npages<<PAGE_SHIFT, 
    7.13                                                     vma->vm_page_prot,
    7.14                                                     mmapcmd.dom)) < 0 )
     8.1 --- a/linux-2.6-xen-sparse/drivers/xen/usbback/interface.c	Fri Sep 09 08:56:14 2005 +0000
     8.2 +++ b/linux-2.6-xen-sparse/drivers/xen/usbback/interface.c	Fri Sep 09 08:56:38 2005 +0000
     8.3 @@ -161,8 +161,8 @@ void usbif_connect(usbif_be_connect_t *c
     8.4      }
     8.5  
     8.6      prot = __pgprot(_KERNPG_TABLE);
     8.7 -    error = direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(vma->addr),
     8.8 -                                    shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
     8.9 +    error = direct_remap_pfn_range(&init_mm, VMALLOC_VMADDR(vma->addr),
    8.10 +                                    shmem_frame, PAGE_SIZE,
    8.11                                      prot, domid);
    8.12      if ( error != 0 )
    8.13      {
     9.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h	Fri Sep 09 08:56:14 2005 +0000
     9.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h	Fri Sep 09 08:56:38 2005 +0000
     9.3 @@ -460,9 +460,9 @@ void make_pages_writable(void *va, unsig
     9.4  #define kern_addr_valid(addr)	(1)
     9.5  #endif /* !CONFIG_DISCONTIGMEM */
     9.6  
     9.7 -int direct_remap_area_pages(struct mm_struct *mm,
     9.8 +int direct_remap_pfn_range(struct mm_struct *mm,
     9.9                              unsigned long address, 
    9.10 -                            unsigned long machine_addr,
    9.11 +                            unsigned long mfn,
    9.12                              unsigned long size, 
    9.13                              pgprot_t prot,
    9.14                              domid_t  domid);
    9.15 @@ -474,10 +474,10 @@ int touch_pte_range(struct mm_struct *mm
    9.16                      unsigned long size);
    9.17  
    9.18  #define io_remap_page_range(vma,from,phys,size,prot) \
    9.19 -direct_remap_area_pages(vma->vm_mm,from,phys,size,prot,DOMID_IO)
    9.20 +direct_remap_pfn_range(vma->vm_mm,from,phys>>PAGE_SHIFT,size,prot,DOMID_IO)
    9.21  
    9.22  #define io_remap_pfn_range(vma,from,pfn,size,prot) \
    9.23 -direct_remap_area_pages(vma->vm_mm,from,pfn<<PAGE_SHIFT,size,prot,DOMID_IO)
    9.24 +direct_remap_pfn_range(vma->vm_mm,from,pfn,size,prot,DOMID_IO)
    9.25  
    9.26  #define MK_IOSPACE_PFN(space, pfn)	(pfn)
    9.27  #define GET_IOSPACE(pfn)		0
    10.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h	Fri Sep 09 08:56:14 2005 +0000
    10.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h	Fri Sep 09 08:56:38 2005 +0000
    10.3 @@ -526,28 +526,26 @@ extern int kern_addr_valid(unsigned long
    10.4  
    10.5  #define DOMID_LOCAL (0xFFFFU)
    10.6  
    10.7 -int direct_remap_area_pages(struct mm_struct *mm,
    10.8 +int direct_remap_pfn_range(struct mm_struct *mm,
    10.9                              unsigned long address,
   10.10 -                            unsigned long machine_addr,
   10.11 +                            unsigned long mfn,
   10.12                              unsigned long size,
   10.13                              pgprot_t prot,
   10.14                              domid_t  domid);
   10.15 -int __direct_remap_area_pages(struct mm_struct *mm,
   10.16 -                              unsigned long address,
   10.17 -                              unsigned long size,
   10.18 -                              mmu_update_t *v);
   10.19 +
   10.20  int create_lookup_pte_addr(struct mm_struct *mm,
   10.21                             unsigned long address,
   10.22                             unsigned long *ptep);
   10.23 +
   10.24  int touch_pte_range(struct mm_struct *mm,
   10.25                      unsigned long address,
   10.26                      unsigned long size);
   10.27  
   10.28  #define io_remap_page_range(vma, vaddr, paddr, size, prot)		\
   10.29 -		direct_remap_area_pages((vma)->vm_mm,vaddr,paddr,size,prot,DOMID_IO)
   10.30 +		direct_remap_pfn_range((vma)->vm_mm,vaddr,paddr>>PAGE_SHIFT,size,prot,DOMID_IO)
   10.31  
   10.32  #define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
   10.33 -		direct_remap_area_pages((vma)->vm_mm,vaddr,(pfn)<<PAGE_SHIFT,size,prot,DOMID_IO)
   10.34 +		direct_remap_pfn_range((vma)->vm_mm,vaddr,pfn,size,prot,DOMID_IO)
   10.35  
   10.36  #define MK_IOSPACE_PFN(space, pfn)	(pfn)
   10.37  #define GET_IOSPACE(pfn)		0
    11.1 --- a/tools/libxc/xc_domain.c	Fri Sep 09 08:56:14 2005 +0000
    11.2 +++ b/tools/libxc/xc_domain.c	Fri Sep 09 08:56:38 2005 +0000
    11.3 @@ -262,23 +262,66 @@ int xc_domain_setmaxmem(int xc_handle,
    11.4  
    11.5  int xc_domain_memory_increase_reservation(int xc_handle,
    11.6                                            u32 domid, 
    11.7 -                                          unsigned int mem_kb)
    11.8 +                                          unsigned long nr_extents,
    11.9 +                                          unsigned int extent_order,
   11.10 +                                          unsigned int address_bits,
   11.11 +					  unsigned long *extent_start)
   11.12  {
   11.13      int err;
   11.14 -    unsigned int npages = mem_kb / (PAGE_SIZE/1024);
   11.15      struct xen_memory_reservation reservation = {
   11.16 -        .nr_extents   = npages,
   11.17 -        .extent_order = 0,
   11.18 +        .extent_start = extent_start, /* may be NULL */
   11.19 +        .nr_extents   = nr_extents,
   11.20 +        .extent_order = extent_order,  
   11.21 +        .address_bits = address_bits,
   11.22          .domid        = domid
   11.23      };
   11.24  
   11.25      err = xc_memory_op(xc_handle, XENMEM_increase_reservation, &reservation);
   11.26 -    if (err == npages)
   11.27 +    if (err == nr_extents)
   11.28          return 0;
   11.29  
   11.30      if (err > 0) {
   11.31 +        fprintf(stderr,"Failed alocation for dom %d : %ld pages order %d addr_bits %d\n",
   11.32 +                                 domid, nr_extents, extent_order, address_bits);
   11.33          errno = ENOMEM;
   11.34          err = -1;
   11.35      }
   11.36      return err;
   11.37  }
   11.38 +
   11.39 +int xc_domain_memory_decrease_reservation(int xc_handle,
   11.40 +                                          u32 domid, 
   11.41 +                                          unsigned long nr_extents,
   11.42 +                                          unsigned int extent_order,
   11.43 +					  unsigned long *extent_start)
   11.44 +{
   11.45 +    int err;
   11.46 +    struct xen_memory_reservation reservation = {
   11.47 +        .extent_start = extent_start, 
   11.48 +        .nr_extents   = nr_extents,
   11.49 +        .extent_order = extent_order,  
   11.50 +        .address_bits = 0,
   11.51 +        .domid        = domid
   11.52 +    };
   11.53 +
   11.54 +    if (extent_start == NULL)
   11.55 +    {
   11.56 +        fprintf(stderr,"decrease_reservation extent_start is NULL!\n");
   11.57 +        errno = EINVAL;
   11.58 +        err = -1;
   11.59 +	goto out;
   11.60 +    }
   11.61 +
   11.62 +    err = xc_memory_op(xc_handle, XENMEM_increase_reservation, &reservation);
   11.63 +    if (err == nr_extents)
   11.64 +        return 0;
   11.65 +
   11.66 +    if (err > 0) {
   11.67 +        fprintf(stderr,"Failed de-alocation for dom %d : %ld pages order %d\n",
   11.68 +                                 domid, nr_extents, extent_order);
   11.69 +        errno = EBUSY;
   11.70 +        err = -1;
   11.71 +    }
   11.72 +out:
   11.73 +    return err;
   11.74 +}
    12.1 --- a/tools/libxc/xc_linux_build.c	Fri Sep 09 08:56:14 2005 +0000
    12.2 +++ b/tools/libxc/xc_linux_build.c	Fri Sep 09 08:56:38 2005 +0000
    12.3 @@ -57,7 +57,7 @@ static int probeimageformat(char *image,
    12.4  }
    12.5  
    12.6  #define alloc_pt(ltab, vltab) \
    12.7 -        ltab = page_array[ppt_alloc++] << PAGE_SHIFT; \
    12.8 +        ltab = (unsigned long long)(page_array[ppt_alloc++]) << PAGE_SHIFT; \
    12.9          if (vltab != NULL) { \
   12.10              munmap(vltab, PAGE_SIZE); \
   12.11          } \
   12.12 @@ -128,18 +128,37 @@ static int setup_pg_tables_pae(int xc_ha
   12.13      l1_pgentry_64_t *vl1tab=NULL, *vl1e=NULL;
   12.14      l2_pgentry_64_t *vl2tab=NULL, *vl2e=NULL;
   12.15      l3_pgentry_64_t *vl3tab=NULL, *vl3e=NULL;
   12.16 -    unsigned long l1tab = 0;
   12.17 -    unsigned long l2tab = 0;
   12.18 -    unsigned long l3tab = 0;
   12.19 +    unsigned long long l1tab = 0;
   12.20 +    unsigned long long l2tab = 0;
   12.21 +    unsigned long long l3tab = 0;
   12.22      unsigned long ppt_alloc;
   12.23      unsigned long count;
   12.24  
   12.25      /* First allocate page for page dir. */
   12.26      ppt_alloc = (vpt_start - dsi_v_start) >> PAGE_SHIFT;
   12.27 +
   12.28 +    if ( page_array[ppt_alloc] > 0xfffff )
   12.29 +    {
   12.30 +	unsigned long nmfn;
   12.31 +	nmfn = xc_make_page_below_4G( xc_handle, dom, page_array[ppt_alloc] );
   12.32 +	if ( nmfn == 0 )
   12.33 +	{
   12.34 +	    fprintf(stderr, "Couldn't get a page below 4GB :-(\n");
   12.35 +	    goto error_out;
   12.36 +	}
   12.37 +	page_array[ppt_alloc] = nmfn;
   12.38 +    }
   12.39 +
   12.40      alloc_pt(l3tab, vl3tab);
   12.41      vl3e = &vl3tab[l3_table_offset_pae(dsi_v_start)];
   12.42      ctxt->ctrlreg[3] = l3tab;
   12.43 -    
   12.44 +
   12.45 +    if(l3tab>0xfffff000ULL)
   12.46 +    {
   12.47 +        fprintf(stderr,"L3TAB = %llx above 4GB!\n",l3tab);
   12.48 +        goto error_out;
   12.49 +    }
   12.50 + 
   12.51      for ( count = 0; count < ((v_end-dsi_v_start)>>PAGE_SHIFT); count++)
   12.52      {
   12.53          if ( !((unsigned long)vl1e & (PAGE_SIZE-1)) )
   12.54 @@ -525,12 +544,14 @@ static int setup_guest(int xc_handle,
   12.55      physmap = physmap_e = xc_map_foreign_range(
   12.56          xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
   12.57          page_array[physmap_pfn++]);
   12.58 +
   12.59      for ( count = 0; count < nr_pages; count++ )
   12.60      {
   12.61          if ( xc_add_mmu_update(xc_handle, mmu,
   12.62 -			       (page_array[count] << PAGE_SHIFT) | 
   12.63 +			       ((unsigned long long)page_array[count] << PAGE_SHIFT) | 
   12.64  			       MMU_MACHPHYS_UPDATE, count) )
   12.65          {
   12.66 +            fprintf(stderr,"m2p update failure p=%lx m=%lx\n",count,page_array[count] ); 
   12.67              munmap(physmap, PAGE_SIZE);
   12.68              goto error_out;
   12.69          }
    13.1 --- a/tools/libxc/xc_linux_restore.c	Fri Sep 09 08:56:14 2005 +0000
    13.2 +++ b/tools/libxc/xc_linux_restore.c	Fri Sep 09 08:56:38 2005 +0000
    13.3 @@ -149,9 +149,9 @@ int xc_linux_restore(int xc_handle, int 
    13.4      }
    13.5  
    13.6      err = xc_domain_memory_increase_reservation(xc_handle, dom,
    13.7 -                                                nr_pfns * PAGE_SIZE / 1024);
    13.8 +                                                nr_pfns, 0, 0, NULL);
    13.9      if (err != 0) {
   13.10 -        ERR("Failed to increate reservation by %lx\n", 
   13.11 +        ERR("Failed to increase reservation by %lx\n", 
   13.12              nr_pfns * PAGE_SIZE / 1024); 
   13.13          errno = ENOMEM;
   13.14          goto out;
    14.1 --- a/tools/libxc/xc_private.c	Fri Sep 09 08:56:14 2005 +0000
    14.2 +++ b/tools/libxc/xc_private.c	Fri Sep 09 08:56:38 2005 +0000
    14.3 @@ -116,7 +116,7 @@ int xc_mmuext_op(
    14.4  
    14.5      if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 )
    14.6      {
    14.7 -	fprintf(stderr, "Dom_mem operation failed (rc=%ld errno=%d)-- need to"
    14.8 +	fprintf(stderr, "Dom_mmuext operation failed (rc=%ld errno=%d)-- need to"
    14.9                      " rebuild the user-space tool set?\n",ret,errno);
   14.10      }
   14.11  
   14.12 @@ -172,7 +172,7 @@ xc_mmu_t *xc_init_mmu_updates(int xc_han
   14.13  }
   14.14  
   14.15  int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu, 
   14.16 -		      unsigned long ptr, unsigned long val)
   14.17 +		      unsigned long long ptr, unsigned long long val)
   14.18  {
   14.19      mmu->updates[mmu->idx].ptr = ptr;
   14.20      mmu->updates[mmu->idx].val = val;
   14.21 @@ -229,7 +229,7 @@ int xc_memory_op(int xc_handle,
   14.22  
   14.23      if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 )
   14.24      {
   14.25 -	fprintf(stderr, "Dom_mem operation failed (rc=%ld errno=%d)-- need to"
   14.26 +	fprintf(stderr, "hypercall failed (rc=%ld errno=%d)-- need to"
   14.27                  " rebuild the user-space tool set?\n",ret,errno);
   14.28      }
   14.29  
   14.30 @@ -427,3 +427,21 @@ int xc_version(int xc_handle, int cmd, v
   14.31  {
   14.32      return do_xen_version(xc_handle, cmd, arg);
   14.33  }
   14.34 +
   14.35 +unsigned long xc_make_page_below_4G(int xc_handle, u32 domid, 
   14.36 +				    unsigned long mfn)
   14.37 +{
   14.38 +    unsigned long new_mfn;
   14.39 +    if ( xc_domain_memory_decrease_reservation( 
   14.40 +	xc_handle, domid, 1, 0, &mfn ) != 1 )
   14.41 +    {
   14.42 +	fprintf(stderr,"xc_make_page_below_4G decrease failed. mfn=%lx\n",mfn);
   14.43 +	return 0;
   14.44 +    }
   14.45 +    if ( xc_domain_memory_increase_reservation( xc_handle, domid, 1, 0, 32, &new_mfn ) != 1 )
   14.46 +    {
   14.47 +	fprintf(stderr,"xc_make_page_below_4G increase failed. mfn=%lx\n",mfn);
   14.48 +	return 0;
   14.49 +    }
   14.50 +    return new_mfn;
   14.51 +}
    15.1 --- a/tools/libxc/xenctrl.h	Fri Sep 09 08:56:14 2005 +0000
    15.2 +++ b/tools/libxc/xenctrl.h	Fri Sep 09 08:56:38 2005 +0000
    15.3 @@ -387,7 +387,19 @@ int xc_domain_setmaxmem(int xc_handle,
    15.4  
    15.5  int xc_domain_memory_increase_reservation(int xc_handle,
    15.6                                            u32 domid, 
    15.7 -                                          unsigned int mem_kb);
    15.8 +                                          unsigned long nr_extents,
    15.9 +                                          unsigned int extent_order,
   15.10 +                                          unsigned int address_bits,
   15.11 +					  unsigned long *extent_start);
   15.12 +
   15.13 +int xc_domain_memory_decrease_reservation(int xc_handle,
   15.14 +                                          u32 domid, 
   15.15 +                                          unsigned long nr_extents,
   15.16 +                                          unsigned int extent_order,
   15.17 +					  unsigned long *extent_start);
   15.18 +
   15.19 +unsigned long xc_make_page_below_4G(int xc_handle, u32 domid, 
   15.20 +				    unsigned long mfn);
   15.21  
   15.22  typedef dom0_perfc_desc_t xc_perfc_desc_t;
   15.23  /* IMPORTANT: The caller is responsible for mlock()'ing the @desc array. */
   15.24 @@ -521,7 +533,7 @@ struct xc_mmu {
   15.25  typedef struct xc_mmu xc_mmu_t;
   15.26  xc_mmu_t *xc_init_mmu_updates(int xc_handle, domid_t dom);
   15.27  int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu, 
   15.28 -                   unsigned long ptr, unsigned long val);
   15.29 +                   unsigned long long ptr, unsigned long long val);
   15.30  int xc_finish_mmu_updates(int xc_handle, xc_mmu_t *mmu);
   15.31  
   15.32  #endif
    16.1 --- a/tools/python/xen/lowlevel/xc/xc.c	Fri Sep 09 08:56:14 2005 +0000
    16.2 +++ b/tools/python/xen/lowlevel/xc/xc.c	Fri Sep 09 08:56:38 2005 +0000
    16.3 @@ -841,14 +841,21 @@ static PyObject *pyxc_domain_memory_incr
    16.4  
    16.5      u32 dom;
    16.6      unsigned long mem_kb;
    16.7 +    unsigned int extent_order = 0 , address_bits = 0;
    16.8 +    unsigned long nr_extents;
    16.9  
   16.10 -    static char *kwd_list[] = { "dom", "mem_kb", NULL };
   16.11 +    static char *kwd_list[] = { "dom", "mem_kb", "extent_order", "address_bits", NULL };
   16.12  
   16.13 -    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "ii", kwd_list, 
   16.14 -                                      &dom, &mem_kb) )
   16.15 +    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "il|ii", kwd_list, 
   16.16 +                                      &dom, &mem_kb, &extent_order, &address_bits) )
   16.17          return NULL;
   16.18  
   16.19 -    if ( xc_domain_memory_increase_reservation(xc->xc_handle, dom, mem_kb) )
   16.20 +    /* round down to nearest power of 2. Assume callers using extent_order>0
   16.21 +       know what they are doing */
   16.22 +    nr_extents = (mem_kb / (XC_PAGE_SIZE/1024)) >> extent_order;
   16.23 +    if ( xc_domain_memory_increase_reservation(xc->xc_handle, dom, 
   16.24 +					       nr_extents, extent_order, 
   16.25 +					       address_bits, NULL) )
   16.26          return PyErr_SetFromErrno(xc_error);
   16.27      
   16.28      Py_INCREF(zero);
    17.1 --- a/tools/python/xen/xend/image.py	Fri Sep 09 08:56:14 2005 +0000
    17.2 +++ b/tools/python/xen/xend/image.py	Fri Sep 09 08:56:38 2005 +0000
    17.3 @@ -159,7 +159,12 @@ class ImageHandler:
    17.4          xc.domain_setmaxmem(dom, mem_kb)
    17.5  
    17.6          try:
    17.7 -            xc.domain_memory_increase_reservation(dom, mem_kb)
    17.8 +            # Give the domain some memory below 4GB
    17.9 +            lmem_kb = 0
   17.10 +            if lmem_kb > 0:
   17.11 +                xc.domain_memory_increase_reservation(dom, min(lmem_kb,mem_kb), 0, 32)
   17.12 +            if mem_kb > lmem_kb:
   17.13 +                xc.domain_memory_increase_reservation(dom, mem_kb-lmem_kb, 0, 0)
   17.14          except:
   17.15              xc.domain_destroy(dom)
   17.16              raise
    18.1 --- a/xen/arch/x86/domain.c	Fri Sep 09 08:56:14 2005 +0000
    18.2 +++ b/xen/arch/x86/domain.c	Fri Sep 09 08:56:38 2005 +0000
    18.3 @@ -381,11 +381,13 @@ static int vmx_final_setup_guest(
    18.4  out:
    18.5      free_vmcs(vmcs);
    18.6      if(v->arch.arch_vmx.io_bitmap_a != 0) {
    18.7 -        free_xenheap_pages(v->arch.arch_vmx.io_bitmap_a, get_order(0x1000));
    18.8 +        free_xenheap_pages(
    18.9 +            v->arch.arch_vmx.io_bitmap_a, get_order_from_bytes(0x1000));
   18.10          v->arch.arch_vmx.io_bitmap_a = 0;
   18.11      }
   18.12      if(v->arch.arch_vmx.io_bitmap_b != 0) {
   18.13 -        free_xenheap_pages(v->arch.arch_vmx.io_bitmap_b, get_order(0x1000));
   18.14 +        free_xenheap_pages(
   18.15 +            v->arch.arch_vmx.io_bitmap_b, get_order_from_bytes(0x1000));
   18.16          v->arch.arch_vmx.io_bitmap_b = 0;
   18.17      }
   18.18      v->arch.arch_vmx.vmcs = 0;
   18.19 @@ -972,11 +974,13 @@ static void vmx_relinquish_resources(str
   18.20      BUG_ON(v->arch.arch_vmx.vmcs == NULL);
   18.21      free_vmcs(v->arch.arch_vmx.vmcs);
   18.22      if(v->arch.arch_vmx.io_bitmap_a != 0) {
   18.23 -        free_xenheap_pages(v->arch.arch_vmx.io_bitmap_a, get_order(0x1000));
   18.24 +        free_xenheap_pages(
   18.25 +            v->arch.arch_vmx.io_bitmap_a, get_order_from_bytes(0x1000));
   18.26          v->arch.arch_vmx.io_bitmap_a = 0;
   18.27      }
   18.28      if(v->arch.arch_vmx.io_bitmap_b != 0) {
   18.29 -        free_xenheap_pages(v->arch.arch_vmx.io_bitmap_b, get_order(0x1000));
   18.30 +        free_xenheap_pages(
   18.31 +            v->arch.arch_vmx.io_bitmap_b, get_order_from_bytes(0x1000));
   18.32          v->arch.arch_vmx.io_bitmap_b = 0;
   18.33      }
   18.34      v->arch.arch_vmx.vmcs = 0;
    19.1 --- a/xen/arch/x86/domain_build.c	Fri Sep 09 08:56:14 2005 +0000
    19.2 +++ b/xen/arch/x86/domain_build.c	Fri Sep 09 08:56:38 2005 +0000
    19.3 @@ -75,15 +75,12 @@ static struct pfn_info *alloc_chunk(stru
    19.4      struct pfn_info *page;
    19.5      unsigned int order;
    19.6      /*
    19.7 -     * Allocate up to 2MB at a time:
    19.8 -     *  1. This prevents overflow of get_order() when allocating more than
    19.9 -     *     4GB to domain 0 on a PAE machine.
   19.10 -     *  2. It prevents allocating very large chunks from DMA pools before
   19.11 -     *     the >4GB pool is fully depleted.
   19.12 +     * Allocate up to 2MB at a time: It prevents allocating very large chunks
   19.13 +     * from DMA pools before the >4GB pool is fully depleted.
   19.14       */
   19.15      if ( max_pages > (2UL << (20 - PAGE_SHIFT)) )
   19.16          max_pages = 2UL << (20 - PAGE_SHIFT);
   19.17 -    order = get_order(max_pages << PAGE_SHIFT);
   19.18 +    order = get_order_from_pages(max_pages);
   19.19      if ( (max_pages & (max_pages-1)) != 0 )
   19.20          order--;
   19.21      while ( (page = alloc_domheap_pages(d, order, 0)) == NULL )
   19.22 @@ -252,7 +249,7 @@ int construct_dom0(struct domain *d,
   19.23  #endif
   19.24      }
   19.25  
   19.26 -    order = get_order(v_end - dsi.v_start);
   19.27 +    order = get_order_from_bytes(v_end - dsi.v_start);
   19.28      if ( (1UL << order) > nr_pages )
   19.29          panic("Domain 0 allocation is too small for kernel image.\n");
   19.30  
    20.1 --- a/xen/arch/x86/vmx_vmcs.c	Fri Sep 09 08:56:14 2005 +0000
    20.2 +++ b/xen/arch/x86/vmx_vmcs.c	Fri Sep 09 08:56:38 2005 +0000
    20.3 @@ -44,7 +44,7 @@ struct vmcs_struct *alloc_vmcs(void)
    20.4  
    20.5      rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
    20.6      vmcs_size = vmx_msr_high & 0x1fff;
    20.7 -    vmcs = alloc_xenheap_pages(get_order(vmcs_size)); 
    20.8 +    vmcs = alloc_xenheap_pages(get_order_from_bytes(vmcs_size)); 
    20.9      memset((char *)vmcs, 0, vmcs_size); /* don't remove this */
   20.10  
   20.11      vmcs->vmcs_revision_id = vmx_msr_low;
   20.12 @@ -55,7 +55,7 @@ void free_vmcs(struct vmcs_struct *vmcs)
   20.13  {
   20.14      int order;
   20.15  
   20.16 -    order = get_order(vmcs_size);
   20.17 +    order = get_order_from_bytes(vmcs_size);
   20.18      free_xenheap_pages(vmcs, order);
   20.19  }
   20.20  
   20.21 @@ -76,8 +76,8 @@ static inline int construct_vmcs_control
   20.22      error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS);
   20.23  
   20.24      /* need to use 0x1000 instead of PAGE_SIZE */
   20.25 -    io_bitmap_a = (void*) alloc_xenheap_pages(get_order(0x1000)); 
   20.26 -    io_bitmap_b = (void*) alloc_xenheap_pages(get_order(0x1000)); 
   20.27 +    io_bitmap_a = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000)); 
   20.28 +    io_bitmap_b = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000)); 
   20.29      memset(io_bitmap_a, 0xff, 0x1000);
   20.30      /* don't bother debug port access */
   20.31      clear_bit(PC_DEBUG_PORT, io_bitmap_a);
    21.1 --- a/xen/arch/x86/x86_32/mm.c	Fri Sep 09 08:56:14 2005 +0000
    21.2 +++ b/xen/arch/x86/x86_32/mm.c	Fri Sep 09 08:56:38 2005 +0000
    21.3 @@ -118,7 +118,8 @@ void __init paging_init(void)
    21.4      }
    21.5  
    21.6      /* Set up mapping cache for domain pages. */
    21.7 -    mapcache_order = get_order(MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER));
    21.8 +    mapcache_order = get_order_from_bytes(
    21.9 +        MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER));
   21.10      mapcache = alloc_xenheap_pages(mapcache_order);
   21.11      memset(mapcache, 0, PAGE_SIZE << mapcache_order);
   21.12      for ( i = 0; i < (MAPCACHE_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
    22.1 --- a/xen/common/grant_table.c	Fri Sep 09 08:56:14 2005 +0000
    22.2 +++ b/xen/common/grant_table.c	Fri Sep 09 08:56:38 2005 +0000
    22.3 @@ -399,7 +399,7 @@ static int
    22.4      {
    22.5          int              i;
    22.6          grant_mapping_t *new_mt;
    22.7 -        grant_table_t   *lgt      = ld->grant_table;
    22.8 +        grant_table_t   *lgt = ld->grant_table;
    22.9  
   22.10          if ( (lgt->maptrack_limit << 1) > MAPTRACK_MAX_ENTRIES )
   22.11          {
   22.12 @@ -437,9 +437,8 @@ static int
   22.13              ref, dom, dev_hst_ro_flags);
   22.14  #endif
   22.15  
   22.16 -    if ( 0 <= ( rc = __gnttab_activate_grant_ref( ld, led, rd, ref,
   22.17 -                                                  dev_hst_ro_flags,
   22.18 -                                                  addr, &frame)))
   22.19 +    if ( (rc = __gnttab_activate_grant_ref(ld, led, rd, ref, dev_hst_ro_flags,
   22.20 +                                           addr, &frame)) >= 0 )
   22.21      {
   22.22          /*
   22.23           * Only make the maptrack live _after_ writing the pte, in case we 
   22.24 @@ -807,7 +806,8 @@ gnttab_donate(gnttab_donate_t *uop, unsi
   22.25      int i;
   22.26      int result = GNTST_okay;
   22.27  
   22.28 -    for (i = 0; i < count; i++) {
   22.29 +    for ( i = 0; i < count; i++ )
   22.30 +    {
   22.31          gnttab_donate_t *gop = &uop[i];
   22.32  #if GRANT_DEBUG
   22.33          printk("gnttab_donate: i=%d mfn=%lx domid=%d gref=%08x\n",
   22.34 @@ -815,19 +815,24 @@ gnttab_donate(gnttab_donate_t *uop, unsi
   22.35  #endif
   22.36          page = &frame_table[gop->mfn];
   22.37          
   22.38 -        if (unlikely(IS_XEN_HEAP_FRAME(page))) { 
   22.39 +        if ( unlikely(IS_XEN_HEAP_FRAME(page)))
   22.40 +        { 
   22.41              printk("gnttab_donate: xen heap frame mfn=%lx\n", 
   22.42                     (unsigned long) gop->mfn);
   22.43              gop->status = GNTST_bad_virt_addr;
   22.44              continue;
   22.45          }
   22.46 -        if (unlikely(!pfn_valid(page_to_pfn(page)))) {
   22.47 +        
   22.48 +        if ( unlikely(!pfn_valid(page_to_pfn(page))) )
   22.49 +        {
   22.50              printk("gnttab_donate: invalid pfn for mfn=%lx\n", 
   22.51                     (unsigned long) gop->mfn);
   22.52              gop->status = GNTST_bad_virt_addr;
   22.53              continue;
   22.54          }
   22.55 -        if (unlikely((e = find_domain_by_id(gop->domid)) == NULL)) {
   22.56 +
   22.57 +        if ( unlikely((e = find_domain_by_id(gop->domid)) == NULL) )
   22.58 +        {
   22.59              printk("gnttab_donate: can't find domain %d\n", gop->domid);
   22.60              gop->status = GNTST_bad_domain;
   22.61              continue;
   22.62 @@ -881,48 +886,23 @@ gnttab_donate(gnttab_donate_t *uop, unsi
   22.63           * headroom.  Also, a domain mustn't have PGC_allocated
   22.64           * pages when it is dying.
   22.65           */
   22.66 -#ifdef GRANT_DEBUG
   22.67 -        if (unlikely(e->tot_pages >= e->max_pages)) {
   22.68 -            printk("gnttab_dontate: no headroom tot_pages=%d max_pages=%d\n",
   22.69 -                   e->tot_pages, e->max_pages);
   22.70 -            spin_unlock(&e->page_alloc_lock);
   22.71 -            put_domain(e);
   22.72 -            gop->status = result = GNTST_general_error;
   22.73 -            break;
   22.74 -        }
   22.75 -        if (unlikely(test_bit(DOMFLAGS_DYING, &e->domain_flags))) {
   22.76 -            printk("gnttab_donate: target domain is dying\n");
   22.77 +        if ( unlikely(test_bit(DOMFLAGS_DYING, &e->domain_flags)) ||
   22.78 +             unlikely(e->tot_pages >= e->max_pages) ||
   22.79 +             unlikely(!gnttab_prepare_for_transfer(e, d, gop->handle)) )
   22.80 +        {
   22.81 +            DPRINTK("gnttab_donate: Transferee has no reservation headroom "
   22.82 +                    "(%d,%d) or provided a bad grant ref (%08x) or "
   22.83 +                    "is dying (%lx)\n",
   22.84 +                    e->tot_pages, e->max_pages, gop->handle, e->domain_flags);
   22.85              spin_unlock(&e->page_alloc_lock);
   22.86              put_domain(e);
   22.87              gop->status = result = GNTST_general_error;
   22.88              break;
   22.89          }
   22.90 -        if (unlikely(!gnttab_prepare_for_transfer(e, d, gop->handle))) {
   22.91 -            printk("gnttab_donate: gnttab_prepare_for_transfer fails.\n");
   22.92 -            spin_unlock(&e->page_alloc_lock);
   22.93 -            put_domain(e);
   22.94 -            gop->status = result = GNTST_general_error;
   22.95 -            break;
   22.96 -        }
   22.97 -#else
   22.98 -        ASSERT(e->tot_pages <= e->max_pages);
   22.99 -        if (unlikely(test_bit(DOMFLAGS_DYING, &e->domain_flags)) ||
  22.100 -            unlikely(e->tot_pages == e->max_pages) ||
  22.101 -            unlikely(!gnttab_prepare_for_transfer(e, d, gop->handle))) {
  22.102 -            printk("gnttab_donate: Transferee has no reservation headroom (%d,"
  22.103 -                   "%d) or provided a bad grant ref (%08x) or is dying (%p)\n",
  22.104 -                   e->tot_pages, e->max_pages, gop->handle, e->d_flags);
  22.105 -            spin_unlock(&e->page_alloc_lock);
  22.106 -            put_domain(e);
  22.107 -            /* XXX SMH: better error return here would be useful */
  22.108 -            gop->status = result = GNTST_general_error;
  22.109 -            break;
  22.110 -        }
  22.111 -#endif
  22.112 +
  22.113          /* Okay, add the page to 'e'. */
  22.114 -        if (unlikely(e->tot_pages++ == 0)) {
  22.115 +        if ( unlikely(e->tot_pages++ == 0) )
  22.116              get_knownalive_domain(e);
  22.117 -        }
  22.118          list_add_tail(&page->list, &e->page_list);
  22.119          page_set_owner(page, e);
  22.120          
  22.121 @@ -938,6 +918,7 @@ gnttab_donate(gnttab_donate_t *uop, unsi
  22.122          
  22.123          gop->status = GNTST_okay;
  22.124      }
  22.125 +
  22.126      return result;
  22.127  }
  22.128  
  22.129 @@ -957,38 +938,38 @@ do_grant_table_op(
  22.130      
  22.131      rc = -EFAULT;
  22.132      switch ( cmd )
  22.133 -        {
  22.134 -        case GNTTABOP_map_grant_ref:
  22.135 -            if ( unlikely(!array_access_ok(
  22.136 -                              uop, count, sizeof(gnttab_map_grant_ref_t))) )
  22.137 -                goto out;
  22.138 -            rc = gnttab_map_grant_ref((gnttab_map_grant_ref_t *)uop, count);
  22.139 -            break;
  22.140 -        case GNTTABOP_unmap_grant_ref:
  22.141 -            if ( unlikely(!array_access_ok(
  22.142 -                              uop, count, sizeof(gnttab_unmap_grant_ref_t))) )
  22.143 -                goto out;
  22.144 -            rc = gnttab_unmap_grant_ref((gnttab_unmap_grant_ref_t *)uop, 
  22.145 -                                        count);
  22.146 -            break;
  22.147 -        case GNTTABOP_setup_table:
  22.148 -            rc = gnttab_setup_table((gnttab_setup_table_t *)uop, count);
  22.149 -            break;
  22.150 +    {
  22.151 +    case GNTTABOP_map_grant_ref:
  22.152 +        if ( unlikely(!array_access_ok(
  22.153 +            uop, count, sizeof(gnttab_map_grant_ref_t))) )
  22.154 +            goto out;
  22.155 +        rc = gnttab_map_grant_ref((gnttab_map_grant_ref_t *)uop, count);
  22.156 +        break;
  22.157 +    case GNTTABOP_unmap_grant_ref:
  22.158 +        if ( unlikely(!array_access_ok(
  22.159 +            uop, count, sizeof(gnttab_unmap_grant_ref_t))) )
  22.160 +            goto out;
  22.161 +        rc = gnttab_unmap_grant_ref(
  22.162 +            (gnttab_unmap_grant_ref_t *)uop, count);
  22.163 +        break;
  22.164 +    case GNTTABOP_setup_table:
  22.165 +        rc = gnttab_setup_table((gnttab_setup_table_t *)uop, count);
  22.166 +        break;
  22.167  #if GRANT_DEBUG
  22.168 -        case GNTTABOP_dump_table:
  22.169 -            rc = gnttab_dump_table((gnttab_dump_table_t *)uop);
  22.170 -            break;
  22.171 +    case GNTTABOP_dump_table:
  22.172 +        rc = gnttab_dump_table((gnttab_dump_table_t *)uop);
  22.173 +        break;
  22.174  #endif
  22.175 -        case GNTTABOP_donate:
  22.176 -            if (unlikely(!array_access_ok(uop, count, 
  22.177 -                                          sizeof(gnttab_donate_t))))
  22.178 -                goto out;
  22.179 -            rc = gnttab_donate(uop, count);
  22.180 -            break;
  22.181 -        default:
  22.182 -            rc = -ENOSYS;
  22.183 -            break;
  22.184 -        }
  22.185 +    case GNTTABOP_donate:
  22.186 +        if (unlikely(!array_access_ok(
  22.187 +            uop, count, sizeof(gnttab_donate_t))))
  22.188 +            goto out;
  22.189 +        rc = gnttab_donate(uop, count);
  22.190 +        break;
  22.191 +    default:
  22.192 +        rc = -ENOSYS;
  22.193 +        break;
  22.194 +    }
  22.195      
  22.196    out:
  22.197      UNLOCK_BIGLOCK(d);
  22.198 @@ -1021,17 +1002,17 @@ gnttab_check_unmap(
  22.199      lgt = ld->grant_table;
  22.200      
  22.201  #if GRANT_DEBUG_VERBOSE
  22.202 -    if ( ld->domain_id != 0 ) {
  22.203 -            DPRINTK("Foreign unref rd(%d) ld(%d) frm(%lx) flgs(%x).\n",
  22.204 -                    rd->domain_id, ld->domain_id, frame, readonly);
  22.205 -      }
  22.206 +    if ( ld->domain_id != 0 )
  22.207 +        DPRINTK("Foreign unref rd(%d) ld(%d) frm(%lx) flgs(%x).\n",
  22.208 +                rd->domain_id, ld->domain_id, frame, readonly);
  22.209  #endif
  22.210      
  22.211      /* Fast exit if we're not mapping anything using grant tables */
  22.212      if ( lgt->map_count == 0 )
  22.213          return 0;
  22.214      
  22.215 -    if ( get_domain(rd) == 0 ) {
  22.216 +    if ( get_domain(rd) == 0 )
  22.217 +    {
  22.218          DPRINTK("gnttab_check_unmap: couldn't get_domain rd(%d)\n",
  22.219                  rd->domain_id);
  22.220          return 0;
  22.221 @@ -1268,8 +1249,11 @@ grant_table_create(
  22.222      for ( i = 0; i < NR_GRANT_FRAMES; i++ )
  22.223      {
  22.224          SHARE_PFN_WITH_DOMAIN(
  22.225 -            virt_to_page((char *)(t->shared)+(i*PAGE_SIZE)), d);
  22.226 -        set_pfn_from_mfn((virt_to_phys(t->shared) >> PAGE_SHIFT) + i, INVALID_M2P_ENTRY);
  22.227 +            virt_to_page((char *)t->shared + (i * PAGE_SIZE)),
  22.228 +            d);
  22.229 +        set_pfn_from_mfn(
  22.230 +            (virt_to_phys(t->shared) >> PAGE_SHIFT) + i,
  22.231 +            INVALID_M2P_ENTRY);
  22.232      }
  22.233  
  22.234      /* Okay, install the structure. */
  22.235 @@ -1306,57 +1290,53 @@ gnttab_release_dev_mappings(grant_table_
  22.236      {
  22.237          map = &gt->maptrack[handle];
  22.238  
  22.239 -        if ( map->ref_and_flags & GNTMAP_device_map )
  22.240 -        {
  22.241 -            dom = map->domid;
  22.242 -            ref = map->ref_and_flags >> MAPTRACK_REF_SHIFT;
  22.243 +        if ( !(map->ref_and_flags & GNTMAP_device_map) )
  22.244 +            continue;
  22.245  
  22.246 -            DPRINTK("Grant release (%hu) ref:(%hu) flags:(%x) dom:(%hu)\n",
  22.247 -                    handle, ref,
  22.248 -                    map->ref_and_flags & MAPTRACK_GNTMAP_MASK, dom);
  22.249 +        dom = map->domid;
  22.250 +        ref = map->ref_and_flags >> MAPTRACK_REF_SHIFT;
  22.251  
  22.252 -            if ( unlikely((rd = find_domain_by_id(dom)) == NULL) ||
  22.253 -                 unlikely(ld == rd) )
  22.254 +        DPRINTK("Grant release (%hu) ref:(%hu) flags:(%x) dom:(%hu)\n",
  22.255 +                handle, ref, map->ref_and_flags & MAPTRACK_GNTMAP_MASK, dom);
  22.256 +
  22.257 +        if ( unlikely((rd = find_domain_by_id(dom)) == NULL) ||
  22.258 +             unlikely(ld == rd) )
  22.259 +        {
  22.260 +            if ( rd != NULL )
  22.261 +                put_domain(rd);
  22.262 +            printk(KERN_WARNING "Grant release: No dom%d\n", dom);
  22.263 +            continue;
  22.264 +        }
  22.265 +
  22.266 +        act = &rd->grant_table->active[ref];
  22.267 +        sha = &rd->grant_table->shared[ref];
  22.268 +
  22.269 +        spin_lock(&rd->grant_table->lock);
  22.270 +
  22.271 +        if ( act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask) )
  22.272 +        {
  22.273 +            frame = act->frame;
  22.274 +
  22.275 +            if ( ( (act->pin & GNTPIN_hstw_mask) == 0 ) &&
  22.276 +                 ( (act->pin & GNTPIN_devw_mask) >  0 ) )
  22.277              {
  22.278 -                if ( rd != NULL )
  22.279 -                    put_domain(rd);
  22.280 -
  22.281 -                printk(KERN_WARNING "Grant release: No dom%d\n", dom);
  22.282 -                continue;
  22.283 +                clear_bit(_GTF_writing, &sha->flags);
  22.284 +                put_page_type(&frame_table[frame]);
  22.285              }
  22.286  
  22.287 -            act = &rd->grant_table->active[ref];
  22.288 -            sha = &rd->grant_table->shared[ref];
  22.289 -
  22.290 -            spin_lock(&rd->grant_table->lock);
  22.291 -
  22.292 -            if ( act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask) )
  22.293 +            map->ref_and_flags &= ~GNTMAP_device_map;
  22.294 +            act->pin &= ~(GNTPIN_devw_mask | GNTPIN_devr_mask);
  22.295 +            if ( act->pin == 0 )
  22.296              {
  22.297 -                frame = act->frame;
  22.298 -
  22.299 -                if ( ( (act->pin & GNTPIN_hstw_mask) == 0 ) &&
  22.300 -                     ( (act->pin & GNTPIN_devw_mask) >  0 ) )
  22.301 -                {
  22.302 -                    clear_bit(_GTF_writing, &sha->flags);
  22.303 -                    put_page_type(&frame_table[frame]);
  22.304 -                }
  22.305 +                clear_bit(_GTF_reading, &sha->flags);
  22.306 +                map->ref_and_flags = 0;
  22.307 +                put_page(&frame_table[frame]);
  22.308 +            }
  22.309 +        }
  22.310  
  22.311 -                act->pin &= ~(GNTPIN_devw_mask | GNTPIN_devr_mask);
  22.312 +        spin_unlock(&rd->grant_table->lock);
  22.313  
  22.314 -                if ( act->pin == 0 )
  22.315 -                {
  22.316 -                    clear_bit(_GTF_reading, &sha->flags);
  22.317 -                    map->ref_and_flags = 0;
  22.318 -                    put_page(&frame_table[frame]);
  22.319 -                }
  22.320 -                else
  22.321 -                    map->ref_and_flags &= ~GNTMAP_device_map;
  22.322 -            }
  22.323 -
  22.324 -            spin_unlock(&rd->grant_table->lock);
  22.325 -
  22.326 -            put_domain(rd);
  22.327 -        }
  22.328 +        put_domain(rd);
  22.329      }
  22.330  }
  22.331  
    23.1 --- a/xen/common/memory.c	Fri Sep 09 08:56:14 2005 +0000
    23.2 +++ b/xen/common/memory.c	Fri Sep 09 08:56:38 2005 +0000
    23.3 @@ -31,8 +31,8 @@ increase_reservation(
    23.4      struct pfn_info *page;
    23.5      unsigned long    i;
    23.6  
    23.7 -    if ( (extent_list != NULL)
    23.8 -         && !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
    23.9 +    if ( (extent_list != NULL) &&
   23.10 +         !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
   23.11          return 0;
   23.12  
   23.13      if ( (extent_order != 0) && !IS_CAPABLE_PHYSDEV(current->domain) )
   23.14 @@ -52,13 +52,14 @@ increase_reservation(
   23.15          if ( unlikely((page = alloc_domheap_pages(
   23.16              d, extent_order, flags)) == NULL) )
   23.17          {
   23.18 -            DPRINTK("Could not allocate a frame\n");
   23.19 +            DPRINTK("Could not allocate order=%d extent: id=%d flags=%x\n",
   23.20 +                    extent_order, d->domain_id, flags);
   23.21              return i;
   23.22          }
   23.23  
   23.24          /* Inform the domain of the new page's machine address. */ 
   23.25 -        if ( (extent_list != NULL)
   23.26 -             && (__put_user(page_to_pfn(page), &extent_list[i]) != 0) )
   23.27 +        if ( (extent_list != NULL) &&
   23.28 +             (__put_user(page_to_pfn(page), &extent_list[i]) != 0) )
   23.29              return i;
   23.30      }
   23.31  
   23.32 @@ -152,8 +153,9 @@ long do_memory_op(int cmd, void *arg)
   23.33              reservation.extent_start += start_extent;
   23.34          reservation.nr_extents -= start_extent;
   23.35  
   23.36 -        if ( unlikely(reservation.address_bits != 0)
   23.37 -             && (reservation.address_bits > (get_order(max_page)+PAGE_SHIFT)) )
   23.38 +        if ( (reservation.address_bits != 0) &&
   23.39 +             (reservation.address_bits <
   23.40 +              (get_order_from_pages(max_page) + PAGE_SHIFT)) )
   23.41          {
   23.42              if ( reservation.address_bits < 31 )
   23.43                  return -ENOMEM;
    24.1 --- a/xen/common/page_alloc.c	Fri Sep 09 08:56:14 2005 +0000
    24.2 +++ b/xen/common/page_alloc.c	Fri Sep 09 08:56:38 2005 +0000
    24.3 @@ -216,7 +216,7 @@ unsigned long alloc_boot_pages(unsigned 
    24.4  #define NR_ZONES    3
    24.5  
    24.6  
    24.7 -#define MAX_DMADOM_PFN 0x7FFFF /* 31 addressable bits */
    24.8 +#define MAX_DMADOM_PFN 0x7FFFFUL /* 31 addressable bits */
    24.9  #define pfn_dom_zone_type(_pfn)                                 \
   24.10      (((_pfn) <= MAX_DMADOM_PFN) ? MEMZONE_DMADOM : MEMZONE_DOM)
   24.11  
   24.12 @@ -485,43 +485,40 @@ void free_xenheap_pages(void *v, unsigne
   24.13  
   24.14  void init_domheap_pages(physaddr_t ps, physaddr_t pe)
   24.15  {
   24.16 +    unsigned long s_tot, e_tot, s_dma, e_dma, s_nrm, e_nrm;
   24.17 +
   24.18      ASSERT(!in_irq());
   24.19  
   24.20 -    ps = round_pgup(ps) >> PAGE_SHIFT;
   24.21 -    pe = round_pgdown(pe) >> PAGE_SHIFT;
   24.22 -    if ( pe <= ps )
   24.23 -        return;
   24.24 +    s_tot = round_pgup(ps) >> PAGE_SHIFT;
   24.25 +    e_tot = round_pgdown(pe) >> PAGE_SHIFT;
   24.26  
   24.27 -    if ( (ps < MAX_DMADOM_PFN) && (pe > MAX_DMADOM_PFN) )
   24.28 -    {
   24.29 -        init_heap_pages(
   24.30 -            MEMZONE_DMADOM, pfn_to_page(ps), MAX_DMADOM_PFN - ps);
   24.31 -        init_heap_pages(
   24.32 -            MEMZONE_DOM, pfn_to_page(MAX_DMADOM_PFN), pe - MAX_DMADOM_PFN);
   24.33 -    }
   24.34 -    else
   24.35 -    {
   24.36 -        init_heap_pages(pfn_dom_zone_type(ps), pfn_to_page(ps), pe - ps);
   24.37 -    }
   24.38 +    s_dma = min(s_tot, MAX_DMADOM_PFN + 1);
   24.39 +    e_dma = min(e_tot, MAX_DMADOM_PFN + 1);
   24.40 +    if ( s_dma < e_dma )
   24.41 +        init_heap_pages(MEMZONE_DMADOM, pfn_to_page(s_dma), e_dma - s_dma);
   24.42 +
   24.43 +    s_nrm = max(s_tot, MAX_DMADOM_PFN + 1);
   24.44 +    e_nrm = max(e_tot, MAX_DMADOM_PFN + 1);
   24.45 +    if ( s_nrm < e_nrm )
   24.46 +        init_heap_pages(MEMZONE_DOM, pfn_to_page(s_nrm), e_nrm - s_nrm);
   24.47  }
   24.48  
   24.49  
   24.50  struct pfn_info *alloc_domheap_pages(
   24.51      struct domain *d, unsigned int order, unsigned int flags)
   24.52  {
   24.53 -    struct pfn_info *pg;
   24.54 +    struct pfn_info *pg = NULL;
   24.55      cpumask_t mask;
   24.56      int i;
   24.57  
   24.58      ASSERT(!in_irq());
   24.59  
   24.60 -    pg = NULL;
   24.61 -    if (! (flags & ALLOC_DOM_DMA))
   24.62 +    if ( !(flags & ALLOC_DOM_DMA) )
   24.63          pg = alloc_heap_pages(MEMZONE_DOM, order);
   24.64 -    if (pg == NULL) {
   24.65 -        if ( unlikely((pg = alloc_heap_pages(MEMZONE_DMADOM, order)) == NULL) )
   24.66 +
   24.67 +    if ( pg == NULL )
   24.68 +        if ( (pg = alloc_heap_pages(MEMZONE_DMADOM, order)) == NULL )
   24.69              return NULL;
   24.70 -    }
   24.71  
   24.72      mask = pg->u.free.cpumask;
   24.73      tlbflush_filter(mask, pg->tlbflush_timestamp);
    25.1 --- a/xen/common/trace.c	Fri Sep 09 08:56:14 2005 +0000
    25.2 +++ b/xen/common/trace.c	Fri Sep 09 08:56:38 2005 +0000
    25.3 @@ -66,7 +66,7 @@ void init_trace_bufs(void)
    25.4      }
    25.5  
    25.6      nr_pages = num_online_cpus() * opt_tbuf_size;
    25.7 -    order    = get_order(nr_pages * PAGE_SIZE);
    25.8 +    order    = get_order_from_pages(nr_pages);
    25.9      
   25.10      if ( (rawbuf = alloc_xenheap_pages(order)) == NULL )
   25.11      {
    26.1 --- a/xen/common/xmalloc.c	Fri Sep 09 08:56:14 2005 +0000
    26.2 +++ b/xen/common/xmalloc.c	Fri Sep 09 08:56:38 2005 +0000
    26.3 @@ -86,7 +86,7 @@ static void *xmalloc_new_page(size_t siz
    26.4  static void *xmalloc_whole_pages(size_t size)
    26.5  {
    26.6      struct xmalloc_hdr *hdr;
    26.7 -    unsigned int pageorder = get_order(size);
    26.8 +    unsigned int pageorder = get_order_from_bytes(size);
    26.9  
   26.10      hdr = alloc_xenheap_pages(pageorder);
   26.11      if ( hdr == NULL )
   26.12 @@ -159,7 +159,7 @@ void xfree(const void *p)
   26.13      /* Big allocs free directly. */
   26.14      if ( hdr->size >= PAGE_SIZE )
   26.15      {
   26.16 -        free_xenheap_pages(hdr, get_order(hdr->size));
   26.17 +        free_xenheap_pages(hdr, get_order_from_bytes(hdr->size));
   26.18          return;
   26.19      }
   26.20  
    27.1 --- a/xen/drivers/char/console.c	Fri Sep 09 08:56:14 2005 +0000
    27.2 +++ b/xen/drivers/char/console.c	Fri Sep 09 08:56:38 2005 +0000
    27.3 @@ -627,7 +627,7 @@ static int __init debugtrace_init(void)
    27.4      if ( bytes == 0 )
    27.5          return 0;
    27.6  
    27.7 -    order = get_order(bytes);
    27.8 +    order = get_order_from_bytes(bytes);
    27.9      debugtrace_buf = alloc_xenheap_pages(order);
   27.10      ASSERT(debugtrace_buf != NULL);
   27.11  
    28.1 --- a/xen/drivers/char/serial.c	Fri Sep 09 08:56:14 2005 +0000
    28.2 +++ b/xen/drivers/char/serial.c	Fri Sep 09 08:56:38 2005 +0000
    28.3 @@ -366,8 +366,9 @@ void serial_register_uart(int idx, struc
    28.4  void serial_async_transmit(struct serial_port *port)
    28.5  {
    28.6      BUG_ON(!port->driver->tx_empty);
    28.7 -    if ( !port->txbuf )
    28.8 -        port->txbuf = alloc_xenheap_pages(get_order(SERIAL_TXBUFSZ));
    28.9 +    if ( port->txbuf == NULL )
   28.10 +        port->txbuf = alloc_xenheap_pages(
   28.11 +            get_order_from_bytes(SERIAL_TXBUFSZ));
   28.12  }
   28.13  
   28.14  /*
    29.1 --- a/xen/include/asm-x86/page.h	Fri Sep 09 08:56:14 2005 +0000
    29.2 +++ b/xen/include/asm-x86/page.h	Fri Sep 09 08:56:38 2005 +0000
    29.3 @@ -280,7 +280,7 @@ extern void paging_init(void);
    29.4  
    29.5  #ifndef __ASSEMBLY__
    29.6  
    29.7 -static __inline__ int get_order(unsigned long size)
    29.8 +static inline int get_order_from_bytes(physaddr_t size)
    29.9  {
   29.10      int order;
   29.11      size = (size-1) >> PAGE_SHIFT;
   29.12 @@ -289,6 +289,15 @@ static __inline__ int get_order(unsigned
   29.13      return order;
   29.14  }
   29.15  
   29.16 +static inline int get_order_from_pages(unsigned long nr_pages)
   29.17 +{
   29.18 +    int order;
   29.19 +    nr_pages--;
   29.20 +    for ( order = 0; nr_pages; order++ )
   29.21 +        nr_pages >>= 1;
   29.22 +    return order;
   29.23 +}
   29.24 +
   29.25  /* Allocator functions for Xen pagetables. */
   29.26  struct pfn_info *alloc_xen_pagetable(void);
   29.27  void free_xen_pagetable(struct pfn_info *pg);