ia64/xen-unstable

changeset 6716:2704a88c3295

merge?
author cl349@firebug.cl.cam.ac.uk
date Fri Sep 09 08:56:38 2005 +0000 (2005-09-09)
parents 22c30df92b11 4cdf880c9463
children cdfa7dd00c44
files linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64 linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c linux-2.6-xen-sparse/drivers/xen/netback/interface.c linux-2.6-xen-sparse/drivers/xen/netback/netback.c linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c linux-2.6-xen-sparse/drivers/xen/usbback/interface.c linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h tools/libxc/xc_domain.c tools/libxc/xc_linux_build.c tools/libxc/xc_linux_restore.c tools/libxc/xc_private.c tools/libxc/xenctrl.h tools/python/xen/lowlevel/xc/xc.c tools/python/xen/xend/image.py xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/vmx_vmcs.c xen/arch/x86/x86_32/mm.c xen/common/grant_table.c xen/common/memory.c xen/common/page_alloc.c xen/common/trace.c xen/common/xmalloc.c xen/drivers/char/console.c xen/drivers/char/serial.c xen/include/asm-x86/page.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64	Fri Sep 09 08:56:14 2005 +0000
     1.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64	Fri Sep 09 08:56:38 2005 +0000
     1.3 @@ -2202,7 +2202,7 @@ CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
     1.4  CONFIG_PROC_FS=y
     1.5  CONFIG_PROC_KCORE=y
     1.6  CONFIG_SYSFS=y
     1.7 -# CONFIG_DEVFS_FS is not set
     1.8 +CONFIG_DEVFS_FS=y
     1.9  CONFIG_DEVPTS_FS_XATTR=y
    1.10  CONFIG_DEVPTS_FS_SECURITY=y
    1.11  CONFIG_TMPFS=y
     2.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c	Fri Sep 09 08:56:14 2005 +0000
     2.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c	Fri Sep 09 08:56:38 2005 +0000
     2.3 @@ -22,13 +22,14 @@
     2.4  #define ISA_START_ADDRESS	0x0
     2.5  #define ISA_END_ADDRESS		0x100000
     2.6  
     2.7 +#if 0 /* not PAE safe */
     2.8  /* These hacky macros avoid phys->machine translations. */
     2.9  #define __direct_pte(x) ((pte_t) { (x) } )
    2.10  #define __direct_mk_pte(page_nr,pgprot) \
    2.11    __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
    2.12  #define direct_mk_pte_phys(physpage, pgprot) \
    2.13    __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
    2.14 -
    2.15 +#endif
    2.16  
    2.17  static int direct_remap_area_pte_fn(pte_t *pte, 
    2.18  				    struct page *pte_page,
    2.19 @@ -37,16 +38,16 @@ static int direct_remap_area_pte_fn(pte_
    2.20  {
    2.21  	mmu_update_t **v = (mmu_update_t **)data;
    2.22  
    2.23 -	(*v)->ptr = ((maddr_t)pfn_to_mfn(page_to_pfn(pte_page)) <<
    2.24 +	(*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pte_page)) <<
    2.25  		     PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
    2.26  	(*v)++;
    2.27  
    2.28  	return 0;
    2.29  }
    2.30  
    2.31 -int direct_remap_area_pages(struct mm_struct *mm,
    2.32 +int direct_remap_pfn_range(struct mm_struct *mm,
    2.33  			    unsigned long address, 
    2.34 -			    unsigned long machine_addr,
    2.35 +			    unsigned long mfn,
    2.36  			    unsigned long size, 
    2.37  			    pgprot_t prot,
    2.38  			    domid_t  domid)
    2.39 @@ -77,9 +78,9 @@ int direct_remap_area_pages(struct mm_st
    2.40  		 * Fill in the machine address: PTE ptr is done later by
    2.41  		 * __direct_remap_area_pages(). 
    2.42  		 */
    2.43 -		v->val = pte_val_ma(pfn_pte_ma(machine_addr >> PAGE_SHIFT, prot));
    2.44 +		v->val = pte_val_ma(pfn_pte_ma(mfn, prot));
    2.45  
    2.46 -		machine_addr += PAGE_SIZE;
    2.47 +		mfn++;
    2.48  		address += PAGE_SIZE; 
    2.49  		v++;
    2.50  	}
    2.51 @@ -97,8 +98,10 @@ int direct_remap_area_pages(struct mm_st
    2.52  	return 0;
    2.53  }
    2.54  
    2.55 -EXPORT_SYMBOL(direct_remap_area_pages);
    2.56 +EXPORT_SYMBOL(direct_remap_pfn_range);
    2.57  
    2.58 +
    2.59 +/* FIXME: This is horribly broken on PAE */ 
    2.60  static int lookup_pte_fn(
    2.61  	pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
    2.62  {
    2.63 @@ -218,7 +221,7 @@ void __iomem * __ioremap(unsigned long p
    2.64  #ifdef __x86_64__
    2.65  	flags |= _PAGE_USER;
    2.66  #endif
    2.67 -	if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr,
    2.68 +	if (direct_remap_pfn_range(&init_mm, (unsigned long) addr, phys_addr>>PAGE_SHIFT,
    2.69  				    size, __pgprot(flags), domid)) {
    2.70  		vunmap((void __force *) addr);
    2.71  		return NULL;
     3.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c	Fri Sep 09 08:56:14 2005 +0000
     3.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c	Fri Sep 09 08:56:38 2005 +0000
     3.3 @@ -182,7 +182,7 @@ int blkif_ioctl(struct inode *inode, str
     3.4  static int blkif_queue_request(struct request *req)
     3.5  {
     3.6  	struct blkfront_info *info = req->rq_disk->private_data;
     3.7 -	unsigned long buffer_ma;
     3.8 +	unsigned long buffer_mfn;
     3.9  	blkif_request_t *ring_req;
    3.10  	struct bio *bio;
    3.11  	struct bio_vec *bvec;
    3.12 @@ -221,7 +221,7 @@ static int blkif_queue_request(struct re
    3.13  		bio_for_each_segment (bvec, bio, idx) {
    3.14  			BUG_ON(ring_req->nr_segments
    3.15  			       == BLKIF_MAX_SEGMENTS_PER_REQUEST);
    3.16 -			buffer_ma = page_to_phys(bvec->bv_page);
    3.17 +			buffer_mfn = page_to_phys(bvec->bv_page) >> PAGE_SHIFT;
    3.18  			fsect = bvec->bv_offset >> 9;
    3.19  			lsect = fsect + (bvec->bv_len >> 9) - 1;
    3.20  			/* install a grant reference. */
    3.21 @@ -231,11 +231,11 @@ static int blkif_queue_request(struct re
    3.22  			gnttab_grant_foreign_access_ref(
    3.23  				ref,
    3.24  				info->backend_id,
    3.25 -				buffer_ma >> PAGE_SHIFT,
    3.26 +				buffer_mfn,
    3.27  				rq_data_dir(req) );
    3.28  
    3.29  			info->shadow[id].frame[ring_req->nr_segments] =
    3.30 -				buffer_ma >> PAGE_SHIFT;
    3.31 +				buffer_mfn;
    3.32  
    3.33  			ring_req->frame_and_sects[ring_req->nr_segments] =
    3.34  				blkif_fas_from_gref(ref, fsect, lsect);
     4.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Fri Sep 09 08:56:14 2005 +0000
     4.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Fri Sep 09 08:56:38 2005 +0000
     4.3 @@ -154,12 +154,12 @@ static int map_frontend_pages(netif_t *n
     4.4      pgprot_t      prot = __pgprot(_KERNPG_TABLE);
     4.5      int           err;
     4.6  
     4.7 -    err = direct_remap_area_pages(&init_mm, localaddr,
     4.8 -				  tx_ring_ref<<PAGE_SHIFT, PAGE_SIZE,
     4.9 +    err = direct_remap_pfn_range(&init_mm, localaddr,
    4.10 +				  tx_ring_ref, PAGE_SIZE,
    4.11  				  prot, netif->domid); 
    4.12      
    4.13 -    err |= direct_remap_area_pages(&init_mm, localaddr + PAGE_SIZE,
    4.14 -				  rx_ring_ref<<PAGE_SHIFT, PAGE_SIZE,
    4.15 +    err |= direct_remap_pfn_range(&init_mm, localaddr + PAGE_SIZE,
    4.16 +				  rx_ring_ref, PAGE_SIZE,
    4.17  				  prot, netif->domid);
    4.18  
    4.19      if (err)
     5.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Fri Sep 09 08:56:14 2005 +0000
     5.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Fri Sep 09 08:56:38 2005 +0000
     5.3 @@ -297,7 +297,7 @@ static void net_rx_action(unsigned long 
     5.4          mmuext->mfn = old_mfn;
     5.5          mmuext++;
     5.6  #endif
     5.7 -        mmu->ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
     5.8 +        mmu->ptr = ((unsigned long long)new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
     5.9          mmu->val = __pa(vdata) >> PAGE_SHIFT;  
    5.10          mmu++;
    5.11  
     6.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Fri Sep 09 08:56:14 2005 +0000
     6.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Fri Sep 09 08:56:38 2005 +0000
     6.3 @@ -618,7 +618,7 @@ static int netif_poll(struct net_device 
     6.4  
     6.5          /* Remap the page. */
     6.6  #ifdef CONFIG_XEN_NETDEV_GRANT
     6.7 -        mmu->ptr = mfn << PAGE_SHIFT | MMU_MACHPHYS_UPDATE;
     6.8 +        mmu->ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
     6.9  #else
    6.10          mmu->ptr  = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
    6.11  #endif
     7.1 --- a/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c	Fri Sep 09 08:56:14 2005 +0000
     7.2 +++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c	Fri Sep 09 08:56:38 2005 +0000
     7.3 @@ -116,9 +116,9 @@ static int privcmd_ioctl(struct inode *i
     7.4                  if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
     7.5                      return -EINVAL;
     7.6  
     7.7 -                if ( (rc = direct_remap_area_pages(vma->vm_mm, 
     7.8 +                if ( (rc = direct_remap_pfn_range(vma->vm_mm, 
     7.9                                                     msg[j].va&PAGE_MASK, 
    7.10 -                                                   msg[j].mfn<<PAGE_SHIFT, 
    7.11 +                                                   msg[j].mfn, 
    7.12                                                     msg[j].npages<<PAGE_SHIFT, 
    7.13                                                     vma->vm_page_prot,
    7.14                                                     mmapcmd.dom)) < 0 )
    10.1 --- a/linux-2.6-xen-sparse/drivers/xen/usbback/interface.c	Fri Sep 09 08:56:14 2005 +0000
    10.2 +++ b/linux-2.6-xen-sparse/drivers/xen/usbback/interface.c	Fri Sep 09 08:56:38 2005 +0000
    10.3 @@ -161,8 +161,8 @@ void usbif_connect(usbif_be_connect_t *c
    10.4      }
    10.5  
    10.6      prot = __pgprot(_KERNPG_TABLE);
    10.7 -    error = direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(vma->addr),
    10.8 -                                    shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
    10.9 +    error = direct_remap_pfn_range(&init_mm, VMALLOC_VMADDR(vma->addr),
   10.10 +                                    shmem_frame, PAGE_SIZE,
   10.11                                      prot, domid);
   10.12      if ( error != 0 )
   10.13      {
    11.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h	Fri Sep 09 08:56:14 2005 +0000
    11.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h	Fri Sep 09 08:56:38 2005 +0000
    11.3 @@ -460,9 +460,9 @@ void make_pages_writable(void *va, unsig
    11.4  #define kern_addr_valid(addr)	(1)
    11.5  #endif /* !CONFIG_DISCONTIGMEM */
    11.6  
    11.7 -int direct_remap_area_pages(struct mm_struct *mm,
    11.8 +int direct_remap_pfn_range(struct mm_struct *mm,
    11.9                              unsigned long address, 
   11.10 -                            unsigned long machine_addr,
   11.11 +                            unsigned long mfn,
   11.12                              unsigned long size, 
   11.13                              pgprot_t prot,
   11.14                              domid_t  domid);
   11.15 @@ -474,10 +474,10 @@ int touch_pte_range(struct mm_struct *mm
   11.16                      unsigned long size);
   11.17  
   11.18  #define io_remap_page_range(vma,from,phys,size,prot) \
   11.19 -direct_remap_area_pages(vma->vm_mm,from,phys,size,prot,DOMID_IO)
   11.20 +direct_remap_pfn_range(vma->vm_mm,from,phys>>PAGE_SHIFT,size,prot,DOMID_IO)
   11.21  
   11.22  #define io_remap_pfn_range(vma,from,pfn,size,prot) \
   11.23 -direct_remap_area_pages(vma->vm_mm,from,pfn<<PAGE_SHIFT,size,prot,DOMID_IO)
   11.24 +direct_remap_pfn_range(vma->vm_mm,from,pfn,size,prot,DOMID_IO)
   11.25  
   11.26  #define MK_IOSPACE_PFN(space, pfn)	(pfn)
   11.27  #define GET_IOSPACE(pfn)		0
    12.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h	Fri Sep 09 08:56:14 2005 +0000
    12.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h	Fri Sep 09 08:56:38 2005 +0000
    12.3 @@ -526,28 +526,26 @@ extern int kern_addr_valid(unsigned long
    12.4  
    12.5  #define DOMID_LOCAL (0xFFFFU)
    12.6  
    12.7 -int direct_remap_area_pages(struct mm_struct *mm,
    12.8 +int direct_remap_pfn_range(struct mm_struct *mm,
    12.9                              unsigned long address,
   12.10 -                            unsigned long machine_addr,
   12.11 +                            unsigned long mfn,
   12.12                              unsigned long size,
   12.13                              pgprot_t prot,
   12.14                              domid_t  domid);
   12.15 -int __direct_remap_area_pages(struct mm_struct *mm,
   12.16 -                              unsigned long address,
   12.17 -                              unsigned long size,
   12.18 -                              mmu_update_t *v);
   12.19 +
   12.20  int create_lookup_pte_addr(struct mm_struct *mm,
   12.21                             unsigned long address,
   12.22                             unsigned long *ptep);
   12.23 +
   12.24  int touch_pte_range(struct mm_struct *mm,
   12.25                      unsigned long address,
   12.26                      unsigned long size);
   12.27  
   12.28  #define io_remap_page_range(vma, vaddr, paddr, size, prot)		\
   12.29 -		direct_remap_area_pages((vma)->vm_mm,vaddr,paddr,size,prot,DOMID_IO)
   12.30 +		direct_remap_pfn_range((vma)->vm_mm,vaddr,paddr>>PAGE_SHIFT,size,prot,DOMID_IO)
   12.31  
   12.32  #define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
   12.33 -		direct_remap_area_pages((vma)->vm_mm,vaddr,(pfn)<<PAGE_SHIFT,size,prot,DOMID_IO)
   12.34 +		direct_remap_pfn_range((vma)->vm_mm,vaddr,pfn,size,prot,DOMID_IO)
   12.35  
   12.36  #define MK_IOSPACE_PFN(space, pfn)	(pfn)
   12.37  #define GET_IOSPACE(pfn)		0
    13.1 --- a/tools/libxc/xc_domain.c	Fri Sep 09 08:56:14 2005 +0000
    13.2 +++ b/tools/libxc/xc_domain.c	Fri Sep 09 08:56:38 2005 +0000
    13.3 @@ -262,23 +262,66 @@ int xc_domain_setmaxmem(int xc_handle,
    13.4  
    13.5  int xc_domain_memory_increase_reservation(int xc_handle,
    13.6                                            u32 domid, 
    13.7 -                                          unsigned int mem_kb)
    13.8 +                                          unsigned long nr_extents,
    13.9 +                                          unsigned int extent_order,
   13.10 +                                          unsigned int address_bits,
   13.11 +					  unsigned long *extent_start)
   13.12  {
   13.13      int err;
   13.14 -    unsigned int npages = mem_kb / (PAGE_SIZE/1024);
   13.15      struct xen_memory_reservation reservation = {
   13.16 -        .nr_extents   = npages,
   13.17 -        .extent_order = 0,
   13.18 +        .extent_start = extent_start, /* may be NULL */
   13.19 +        .nr_extents   = nr_extents,
   13.20 +        .extent_order = extent_order,  
   13.21 +        .address_bits = address_bits,
   13.22          .domid        = domid
   13.23      };
   13.24  
   13.25      err = xc_memory_op(xc_handle, XENMEM_increase_reservation, &reservation);
   13.26 -    if (err == npages)
   13.27 +    if (err == nr_extents)
   13.28          return 0;
   13.29  
   13.30      if (err > 0) {
   13.31 +        fprintf(stderr,"Failed alocation for dom %d : %ld pages order %d addr_bits %d\n",
   13.32 +                                 domid, nr_extents, extent_order, address_bits);
   13.33          errno = ENOMEM;
   13.34          err = -1;
   13.35      }
   13.36      return err;
   13.37  }
   13.38 +
   13.39 +int xc_domain_memory_decrease_reservation(int xc_handle,
   13.40 +                                          u32 domid, 
   13.41 +                                          unsigned long nr_extents,
   13.42 +                                          unsigned int extent_order,
   13.43 +					  unsigned long *extent_start)
   13.44 +{
   13.45 +    int err;
   13.46 +    struct xen_memory_reservation reservation = {
   13.47 +        .extent_start = extent_start, 
   13.48 +        .nr_extents   = nr_extents,
   13.49 +        .extent_order = extent_order,  
   13.50 +        .address_bits = 0,
   13.51 +        .domid        = domid
   13.52 +    };
   13.53 +
   13.54 +    if (extent_start == NULL)
   13.55 +    {
   13.56 +        fprintf(stderr,"decrease_reservation extent_start is NULL!\n");
   13.57 +        errno = EINVAL;
   13.58 +        err = -1;
   13.59 +	goto out;
   13.60 +    }
   13.61 +
   13.62 +    err = xc_memory_op(xc_handle, XENMEM_increase_reservation, &reservation);
   13.63 +    if (err == nr_extents)
   13.64 +        return 0;
   13.65 +
   13.66 +    if (err > 0) {
   13.67 +        fprintf(stderr,"Failed de-alocation for dom %d : %ld pages order %d\n",
   13.68 +                                 domid, nr_extents, extent_order);
   13.69 +        errno = EBUSY;
   13.70 +        err = -1;
   13.71 +    }
   13.72 +out:
   13.73 +    return err;
   13.74 +}
    14.1 --- a/tools/libxc/xc_linux_build.c	Fri Sep 09 08:56:14 2005 +0000
    14.2 +++ b/tools/libxc/xc_linux_build.c	Fri Sep 09 08:56:38 2005 +0000
    14.3 @@ -57,7 +57,7 @@ static int probeimageformat(char *image,
    14.4  }
    14.5  
    14.6  #define alloc_pt(ltab, vltab) \
    14.7 -        ltab = page_array[ppt_alloc++] << PAGE_SHIFT; \
    14.8 +        ltab = (unsigned long long)(page_array[ppt_alloc++]) << PAGE_SHIFT; \
    14.9          if (vltab != NULL) { \
   14.10              munmap(vltab, PAGE_SIZE); \
   14.11          } \
   14.12 @@ -128,18 +128,37 @@ static int setup_pg_tables_pae(int xc_ha
   14.13      l1_pgentry_64_t *vl1tab=NULL, *vl1e=NULL;
   14.14      l2_pgentry_64_t *vl2tab=NULL, *vl2e=NULL;
   14.15      l3_pgentry_64_t *vl3tab=NULL, *vl3e=NULL;
   14.16 -    unsigned long l1tab = 0;
   14.17 -    unsigned long l2tab = 0;
   14.18 -    unsigned long l3tab = 0;
   14.19 +    unsigned long long l1tab = 0;
   14.20 +    unsigned long long l2tab = 0;
   14.21 +    unsigned long long l3tab = 0;
   14.22      unsigned long ppt_alloc;
   14.23      unsigned long count;
   14.24  
   14.25      /* First allocate page for page dir. */
   14.26      ppt_alloc = (vpt_start - dsi_v_start) >> PAGE_SHIFT;
   14.27 +
   14.28 +    if ( page_array[ppt_alloc] > 0xfffff )
   14.29 +    {
   14.30 +	unsigned long nmfn;
   14.31 +	nmfn = xc_make_page_below_4G( xc_handle, dom, page_array[ppt_alloc] );
   14.32 +	if ( nmfn == 0 )
   14.33 +	{
   14.34 +	    fprintf(stderr, "Couldn't get a page below 4GB :-(\n");
   14.35 +	    goto error_out;
   14.36 +	}
   14.37 +	page_array[ppt_alloc] = nmfn;
   14.38 +    }
   14.39 +
   14.40      alloc_pt(l3tab, vl3tab);
   14.41      vl3e = &vl3tab[l3_table_offset_pae(dsi_v_start)];
   14.42      ctxt->ctrlreg[3] = l3tab;
   14.43 -    
   14.44 +
   14.45 +    if(l3tab>0xfffff000ULL)
   14.46 +    {
   14.47 +        fprintf(stderr,"L3TAB = %llx above 4GB!\n",l3tab);
   14.48 +        goto error_out;
   14.49 +    }
   14.50 + 
   14.51      for ( count = 0; count < ((v_end-dsi_v_start)>>PAGE_SHIFT); count++)
   14.52      {
   14.53          if ( !((unsigned long)vl1e & (PAGE_SIZE-1)) )
   14.54 @@ -525,12 +544,14 @@ static int setup_guest(int xc_handle,
   14.55      physmap = physmap_e = xc_map_foreign_range(
   14.56          xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
   14.57          page_array[physmap_pfn++]);
   14.58 +
   14.59      for ( count = 0; count < nr_pages; count++ )
   14.60      {
   14.61          if ( xc_add_mmu_update(xc_handle, mmu,
   14.62 -			       (page_array[count] << PAGE_SHIFT) | 
   14.63 +			       ((unsigned long long)page_array[count] << PAGE_SHIFT) | 
   14.64  			       MMU_MACHPHYS_UPDATE, count) )
   14.65          {
   14.66 +            fprintf(stderr,"m2p update failure p=%lx m=%lx\n",count,page_array[count] ); 
   14.67              munmap(physmap, PAGE_SIZE);
   14.68              goto error_out;
   14.69          }
    15.1 --- a/tools/libxc/xc_linux_restore.c	Fri Sep 09 08:56:14 2005 +0000
    15.2 +++ b/tools/libxc/xc_linux_restore.c	Fri Sep 09 08:56:38 2005 +0000
    15.3 @@ -149,9 +149,9 @@ int xc_linux_restore(int xc_handle, int 
    15.4      }
    15.5  
    15.6      err = xc_domain_memory_increase_reservation(xc_handle, dom,
    15.7 -                                                nr_pfns * PAGE_SIZE / 1024);
    15.8 +                                                nr_pfns, 0, 0, NULL);
    15.9      if (err != 0) {
   15.10 -        ERR("Failed to increate reservation by %lx\n", 
   15.11 +        ERR("Failed to increase reservation by %lx\n", 
   15.12              nr_pfns * PAGE_SIZE / 1024); 
   15.13          errno = ENOMEM;
   15.14          goto out;
    16.1 --- a/tools/libxc/xc_private.c	Fri Sep 09 08:56:14 2005 +0000
    16.2 +++ b/tools/libxc/xc_private.c	Fri Sep 09 08:56:38 2005 +0000
    16.3 @@ -116,7 +116,7 @@ int xc_mmuext_op(
    16.4  
    16.5      if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 )
    16.6      {
    16.7 -	fprintf(stderr, "Dom_mem operation failed (rc=%ld errno=%d)-- need to"
    16.8 +	fprintf(stderr, "Dom_mmuext operation failed (rc=%ld errno=%d)-- need to"
    16.9                      " rebuild the user-space tool set?\n",ret,errno);
   16.10      }
   16.11  
   16.12 @@ -172,7 +172,7 @@ xc_mmu_t *xc_init_mmu_updates(int xc_han
   16.13  }
   16.14  
   16.15  int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu, 
   16.16 -		      unsigned long ptr, unsigned long val)
   16.17 +		      unsigned long long ptr, unsigned long long val)
   16.18  {
   16.19      mmu->updates[mmu->idx].ptr = ptr;
   16.20      mmu->updates[mmu->idx].val = val;
   16.21 @@ -229,7 +229,7 @@ int xc_memory_op(int xc_handle,
   16.22  
   16.23      if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 )
   16.24      {
   16.25 -	fprintf(stderr, "Dom_mem operation failed (rc=%ld errno=%d)-- need to"
   16.26 +	fprintf(stderr, "hypercall failed (rc=%ld errno=%d)-- need to"
   16.27                  " rebuild the user-space tool set?\n",ret,errno);
   16.28      }
   16.29  
   16.30 @@ -427,3 +427,21 @@ int xc_version(int xc_handle, int cmd, v
   16.31  {
   16.32      return do_xen_version(xc_handle, cmd, arg);
   16.33  }
   16.34 +
   16.35 +unsigned long xc_make_page_below_4G(int xc_handle, u32 domid, 
   16.36 +				    unsigned long mfn)
   16.37 +{
   16.38 +    unsigned long new_mfn;
   16.39 +    if ( xc_domain_memory_decrease_reservation( 
   16.40 +	xc_handle, domid, 1, 0, &mfn ) != 1 )
   16.41 +    {
   16.42 +	fprintf(stderr,"xc_make_page_below_4G decrease failed. mfn=%lx\n",mfn);
   16.43 +	return 0;
   16.44 +    }
   16.45 +    if ( xc_domain_memory_increase_reservation( xc_handle, domid, 1, 0, 32, &new_mfn ) != 1 )
   16.46 +    {
   16.47 +	fprintf(stderr,"xc_make_page_below_4G increase failed. mfn=%lx\n",mfn);
   16.48 +	return 0;
   16.49 +    }
   16.50 +    return new_mfn;
   16.51 +}
    17.1 --- a/tools/libxc/xenctrl.h	Fri Sep 09 08:56:14 2005 +0000
    17.2 +++ b/tools/libxc/xenctrl.h	Fri Sep 09 08:56:38 2005 +0000
    17.3 @@ -387,7 +387,19 @@ int xc_domain_setmaxmem(int xc_handle,
    17.4  
    17.5  int xc_domain_memory_increase_reservation(int xc_handle,
    17.6                                            u32 domid, 
    17.7 -                                          unsigned int mem_kb);
    17.8 +                                          unsigned long nr_extents,
    17.9 +                                          unsigned int extent_order,
   17.10 +                                          unsigned int address_bits,
   17.11 +					  unsigned long *extent_start);
   17.12 +
   17.13 +int xc_domain_memory_decrease_reservation(int xc_handle,
   17.14 +                                          u32 domid, 
   17.15 +                                          unsigned long nr_extents,
   17.16 +                                          unsigned int extent_order,
   17.17 +					  unsigned long *extent_start);
   17.18 +
   17.19 +unsigned long xc_make_page_below_4G(int xc_handle, u32 domid, 
   17.20 +				    unsigned long mfn);
   17.21  
   17.22  typedef dom0_perfc_desc_t xc_perfc_desc_t;
   17.23  /* IMPORTANT: The caller is responsible for mlock()'ing the @desc array. */
   17.24 @@ -521,7 +533,7 @@ struct xc_mmu {
   17.25  typedef struct xc_mmu xc_mmu_t;
   17.26  xc_mmu_t *xc_init_mmu_updates(int xc_handle, domid_t dom);
   17.27  int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu, 
   17.28 -                   unsigned long ptr, unsigned long val);
   17.29 +                   unsigned long long ptr, unsigned long long val);
   17.30  int xc_finish_mmu_updates(int xc_handle, xc_mmu_t *mmu);
   17.31  
   17.32  #endif
    18.1 --- a/tools/python/xen/lowlevel/xc/xc.c	Fri Sep 09 08:56:14 2005 +0000
    18.2 +++ b/tools/python/xen/lowlevel/xc/xc.c	Fri Sep 09 08:56:38 2005 +0000
    18.3 @@ -841,14 +841,21 @@ static PyObject *pyxc_domain_memory_incr
    18.4  
    18.5      u32 dom;
    18.6      unsigned long mem_kb;
    18.7 -
    18.8 -    static char *kwd_list[] = { "dom", "mem_kb", NULL };
    18.9 +    unsigned int extent_order = 0 , address_bits = 0;
   18.10 +    unsigned long nr_extents;
   18.11  
   18.12 -    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "ii", kwd_list, 
   18.13 -                                      &dom, &mem_kb) )
   18.14 +    static char *kwd_list[] = { "dom", "mem_kb", "extent_order", "address_bits", NULL };
   18.15 +
   18.16 +    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "il|ii", kwd_list, 
   18.17 +                                      &dom, &mem_kb, &extent_order, &address_bits) )
   18.18          return NULL;
   18.19  
   18.20 -    if ( xc_domain_memory_increase_reservation(xc->xc_handle, dom, mem_kb) )
   18.21 +    /* round down to nearest power of 2. Assume callers using extent_order>0
   18.22 +       know what they are doing */
   18.23 +    nr_extents = (mem_kb / (XC_PAGE_SIZE/1024)) >> extent_order;
   18.24 +    if ( xc_domain_memory_increase_reservation(xc->xc_handle, dom, 
   18.25 +					       nr_extents, extent_order, 
   18.26 +					       address_bits, NULL) )
   18.27          return PyErr_SetFromErrno(xc_error);
   18.28      
   18.29      Py_INCREF(zero);
    19.1 --- a/tools/python/xen/xend/image.py	Fri Sep 09 08:56:14 2005 +0000
    19.2 +++ b/tools/python/xen/xend/image.py	Fri Sep 09 08:56:38 2005 +0000
    19.3 @@ -159,7 +159,12 @@ class ImageHandler:
    19.4          xc.domain_setmaxmem(dom, mem_kb)
    19.5  
    19.6          try:
    19.7 -            xc.domain_memory_increase_reservation(dom, mem_kb)
    19.8 +            # Give the domain some memory below 4GB
    19.9 +            lmem_kb = 0
   19.10 +            if lmem_kb > 0:
   19.11 +                xc.domain_memory_increase_reservation(dom, min(lmem_kb,mem_kb), 0, 32)
   19.12 +            if mem_kb > lmem_kb:
   19.13 +                xc.domain_memory_increase_reservation(dom, mem_kb-lmem_kb, 0, 0)
   19.14          except:
   19.15              xc.domain_destroy(dom)
   19.16              raise
    20.1 --- a/xen/arch/x86/domain.c	Fri Sep 09 08:56:14 2005 +0000
    20.2 +++ b/xen/arch/x86/domain.c	Fri Sep 09 08:56:38 2005 +0000
    20.3 @@ -381,11 +381,13 @@ static int vmx_final_setup_guest(
    20.4  out:
    20.5      free_vmcs(vmcs);
    20.6      if(v->arch.arch_vmx.io_bitmap_a != 0) {
    20.7 -        free_xenheap_pages(v->arch.arch_vmx.io_bitmap_a, get_order(0x1000));
    20.8 +        free_xenheap_pages(
    20.9 +            v->arch.arch_vmx.io_bitmap_a, get_order_from_bytes(0x1000));
   20.10          v->arch.arch_vmx.io_bitmap_a = 0;
   20.11      }
   20.12      if(v->arch.arch_vmx.io_bitmap_b != 0) {
   20.13 -        free_xenheap_pages(v->arch.arch_vmx.io_bitmap_b, get_order(0x1000));
   20.14 +        free_xenheap_pages(
   20.15 +            v->arch.arch_vmx.io_bitmap_b, get_order_from_bytes(0x1000));
   20.16          v->arch.arch_vmx.io_bitmap_b = 0;
   20.17      }
   20.18      v->arch.arch_vmx.vmcs = 0;
   20.19 @@ -972,11 +974,13 @@ static void vmx_relinquish_resources(str
   20.20      BUG_ON(v->arch.arch_vmx.vmcs == NULL);
   20.21      free_vmcs(v->arch.arch_vmx.vmcs);
   20.22      if(v->arch.arch_vmx.io_bitmap_a != 0) {
   20.23 -        free_xenheap_pages(v->arch.arch_vmx.io_bitmap_a, get_order(0x1000));
   20.24 +        free_xenheap_pages(
   20.25 +            v->arch.arch_vmx.io_bitmap_a, get_order_from_bytes(0x1000));
   20.26          v->arch.arch_vmx.io_bitmap_a = 0;
   20.27      }
   20.28      if(v->arch.arch_vmx.io_bitmap_b != 0) {
   20.29 -        free_xenheap_pages(v->arch.arch_vmx.io_bitmap_b, get_order(0x1000));
   20.30 +        free_xenheap_pages(
   20.31 +            v->arch.arch_vmx.io_bitmap_b, get_order_from_bytes(0x1000));
   20.32          v->arch.arch_vmx.io_bitmap_b = 0;
   20.33      }
   20.34      v->arch.arch_vmx.vmcs = 0;
    21.1 --- a/xen/arch/x86/domain_build.c	Fri Sep 09 08:56:14 2005 +0000
    21.2 +++ b/xen/arch/x86/domain_build.c	Fri Sep 09 08:56:38 2005 +0000
    21.3 @@ -75,15 +75,12 @@ static struct pfn_info *alloc_chunk(stru
    21.4      struct pfn_info *page;
    21.5      unsigned int order;
    21.6      /*
    21.7 -     * Allocate up to 2MB at a time:
    21.8 -     *  1. This prevents overflow of get_order() when allocating more than
    21.9 -     *     4GB to domain 0 on a PAE machine.
   21.10 -     *  2. It prevents allocating very large chunks from DMA pools before
   21.11 -     *     the >4GB pool is fully depleted.
   21.12 +     * Allocate up to 2MB at a time: It prevents allocating very large chunks
   21.13 +     * from DMA pools before the >4GB pool is fully depleted.
   21.14       */
   21.15      if ( max_pages > (2UL << (20 - PAGE_SHIFT)) )
   21.16          max_pages = 2UL << (20 - PAGE_SHIFT);
   21.17 -    order = get_order(max_pages << PAGE_SHIFT);
   21.18 +    order = get_order_from_pages(max_pages);
   21.19      if ( (max_pages & (max_pages-1)) != 0 )
   21.20          order--;
   21.21      while ( (page = alloc_domheap_pages(d, order, 0)) == NULL )
   21.22 @@ -252,7 +249,7 @@ int construct_dom0(struct domain *d,
   21.23  #endif
   21.24      }
   21.25  
   21.26 -    order = get_order(v_end - dsi.v_start);
   21.27 +    order = get_order_from_bytes(v_end - dsi.v_start);
   21.28      if ( (1UL << order) > nr_pages )
   21.29          panic("Domain 0 allocation is too small for kernel image.\n");
   21.30  
    22.1 --- a/xen/arch/x86/vmx_vmcs.c	Fri Sep 09 08:56:14 2005 +0000
    22.2 +++ b/xen/arch/x86/vmx_vmcs.c	Fri Sep 09 08:56:38 2005 +0000
    22.3 @@ -44,7 +44,7 @@ struct vmcs_struct *alloc_vmcs(void)
    22.4  
    22.5      rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
    22.6      vmcs_size = vmx_msr_high & 0x1fff;
    22.7 -    vmcs = alloc_xenheap_pages(get_order(vmcs_size)); 
    22.8 +    vmcs = alloc_xenheap_pages(get_order_from_bytes(vmcs_size)); 
    22.9      memset((char *)vmcs, 0, vmcs_size); /* don't remove this */
   22.10  
   22.11      vmcs->vmcs_revision_id = vmx_msr_low;
   22.12 @@ -55,7 +55,7 @@ void free_vmcs(struct vmcs_struct *vmcs)
   22.13  {
   22.14      int order;
   22.15  
   22.16 -    order = get_order(vmcs_size);
   22.17 +    order = get_order_from_bytes(vmcs_size);
   22.18      free_xenheap_pages(vmcs, order);
   22.19  }
   22.20  
   22.21 @@ -76,8 +76,8 @@ static inline int construct_vmcs_control
   22.22      error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS);
   22.23  
   22.24      /* need to use 0x1000 instead of PAGE_SIZE */
   22.25 -    io_bitmap_a = (void*) alloc_xenheap_pages(get_order(0x1000)); 
   22.26 -    io_bitmap_b = (void*) alloc_xenheap_pages(get_order(0x1000)); 
   22.27 +    io_bitmap_a = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000)); 
   22.28 +    io_bitmap_b = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000)); 
   22.29      memset(io_bitmap_a, 0xff, 0x1000);
   22.30      /* don't bother debug port access */
   22.31      clear_bit(PC_DEBUG_PORT, io_bitmap_a);
    23.1 --- a/xen/arch/x86/x86_32/mm.c	Fri Sep 09 08:56:14 2005 +0000
    23.2 +++ b/xen/arch/x86/x86_32/mm.c	Fri Sep 09 08:56:38 2005 +0000
    23.3 @@ -118,7 +118,8 @@ void __init paging_init(void)
    23.4      }
    23.5  
    23.6      /* Set up mapping cache for domain pages. */
    23.7 -    mapcache_order = get_order(MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER));
    23.8 +    mapcache_order = get_order_from_bytes(
    23.9 +        MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER));
   23.10      mapcache = alloc_xenheap_pages(mapcache_order);
   23.11      memset(mapcache, 0, PAGE_SIZE << mapcache_order);
   23.12      for ( i = 0; i < (MAPCACHE_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
    24.1 --- a/xen/common/grant_table.c	Fri Sep 09 08:56:14 2005 +0000
    24.2 +++ b/xen/common/grant_table.c	Fri Sep 09 08:56:38 2005 +0000
    24.3 @@ -399,7 +399,7 @@ static int
    24.4      {
    24.5          int              i;
    24.6          grant_mapping_t *new_mt;
    24.7 -        grant_table_t   *lgt      = ld->grant_table;
    24.8 +        grant_table_t   *lgt = ld->grant_table;
    24.9  
   24.10          if ( (lgt->maptrack_limit << 1) > MAPTRACK_MAX_ENTRIES )
   24.11          {
   24.12 @@ -437,9 +437,8 @@ static int
   24.13              ref, dom, dev_hst_ro_flags);
   24.14  #endif
   24.15  
   24.16 -    if ( 0 <= ( rc = __gnttab_activate_grant_ref( ld, led, rd, ref,
   24.17 -                                                  dev_hst_ro_flags,
   24.18 -                                                  addr, &frame)))
   24.19 +    if ( (rc = __gnttab_activate_grant_ref(ld, led, rd, ref, dev_hst_ro_flags,
   24.20 +                                           addr, &frame)) >= 0 )
   24.21      {
   24.22          /*
   24.23           * Only make the maptrack live _after_ writing the pte, in case we 
   24.24 @@ -807,7 +806,8 @@ gnttab_donate(gnttab_donate_t *uop, unsi
   24.25      int i;
   24.26      int result = GNTST_okay;
   24.27  
   24.28 -    for (i = 0; i < count; i++) {
   24.29 +    for ( i = 0; i < count; i++ )
   24.30 +    {
   24.31          gnttab_donate_t *gop = &uop[i];
   24.32  #if GRANT_DEBUG
   24.33          printk("gnttab_donate: i=%d mfn=%lx domid=%d gref=%08x\n",
   24.34 @@ -815,19 +815,24 @@ gnttab_donate(gnttab_donate_t *uop, unsi
   24.35  #endif
   24.36          page = &frame_table[gop->mfn];
   24.37          
   24.38 -        if (unlikely(IS_XEN_HEAP_FRAME(page))) { 
   24.39 +        if ( unlikely(IS_XEN_HEAP_FRAME(page)))
   24.40 +        { 
   24.41              printk("gnttab_donate: xen heap frame mfn=%lx\n", 
   24.42                     (unsigned long) gop->mfn);
   24.43              gop->status = GNTST_bad_virt_addr;
   24.44              continue;
   24.45          }
   24.46 -        if (unlikely(!pfn_valid(page_to_pfn(page)))) {
   24.47 +        
   24.48 +        if ( unlikely(!pfn_valid(page_to_pfn(page))) )
   24.49 +        {
   24.50              printk("gnttab_donate: invalid pfn for mfn=%lx\n", 
   24.51                     (unsigned long) gop->mfn);
   24.52              gop->status = GNTST_bad_virt_addr;
   24.53              continue;
   24.54          }
   24.55 -        if (unlikely((e = find_domain_by_id(gop->domid)) == NULL)) {
   24.56 +
   24.57 +        if ( unlikely((e = find_domain_by_id(gop->domid)) == NULL) )
   24.58 +        {
   24.59              printk("gnttab_donate: can't find domain %d\n", gop->domid);
   24.60              gop->status = GNTST_bad_domain;
   24.61              continue;
   24.62 @@ -881,48 +886,23 @@ gnttab_donate(gnttab_donate_t *uop, unsi
   24.63           * headroom.  Also, a domain mustn't have PGC_allocated
   24.64           * pages when it is dying.
   24.65           */
   24.66 -#ifdef GRANT_DEBUG
   24.67 -        if (unlikely(e->tot_pages >= e->max_pages)) {
   24.68 -            printk("gnttab_dontate: no headroom tot_pages=%d max_pages=%d\n",
   24.69 -                   e->tot_pages, e->max_pages);
   24.70 -            spin_unlock(&e->page_alloc_lock);
   24.71 -            put_domain(e);
   24.72 -            gop->status = result = GNTST_general_error;
   24.73 -            break;
   24.74 -        }
   24.75 -        if (unlikely(test_bit(DOMFLAGS_DYING, &e->domain_flags))) {
   24.76 -            printk("gnttab_donate: target domain is dying\n");
   24.77 +        if ( unlikely(test_bit(DOMFLAGS_DYING, &e->domain_flags)) ||
   24.78 +             unlikely(e->tot_pages >= e->max_pages) ||
   24.79 +             unlikely(!gnttab_prepare_for_transfer(e, d, gop->handle)) )
   24.80 +        {
   24.81 +            DPRINTK("gnttab_donate: Transferee has no reservation headroom "
   24.82 +                    "(%d,%d) or provided a bad grant ref (%08x) or "
   24.83 +                    "is dying (%lx)\n",
   24.84 +                    e->tot_pages, e->max_pages, gop->handle, e->domain_flags);
   24.85              spin_unlock(&e->page_alloc_lock);
   24.86              put_domain(e);
   24.87              gop->status = result = GNTST_general_error;
   24.88              break;
   24.89          }
   24.90 -        if (unlikely(!gnttab_prepare_for_transfer(e, d, gop->handle))) {
   24.91 -            printk("gnttab_donate: gnttab_prepare_for_transfer fails.\n");
   24.92 -            spin_unlock(&e->page_alloc_lock);
   24.93 -            put_domain(e);
   24.94 -            gop->status = result = GNTST_general_error;
   24.95 -            break;
   24.96 -        }
   24.97 -#else
   24.98 -        ASSERT(e->tot_pages <= e->max_pages);
   24.99 -        if (unlikely(test_bit(DOMFLAGS_DYING, &e->domain_flags)) ||
  24.100 -            unlikely(e->tot_pages == e->max_pages) ||
  24.101 -            unlikely(!gnttab_prepare_for_transfer(e, d, gop->handle))) {
  24.102 -            printk("gnttab_donate: Transferee has no reservation headroom (%d,"
  24.103 -                   "%d) or provided a bad grant ref (%08x) or is dying (%p)\n",
  24.104 -                   e->tot_pages, e->max_pages, gop->handle, e->d_flags);
  24.105 -            spin_unlock(&e->page_alloc_lock);
  24.106 -            put_domain(e);
  24.107 -            /* XXX SMH: better error return here would be useful */
  24.108 -            gop->status = result = GNTST_general_error;
  24.109 -            break;
  24.110 -        }
  24.111 -#endif
  24.112 +
  24.113          /* Okay, add the page to 'e'. */
  24.114 -        if (unlikely(e->tot_pages++ == 0)) {
  24.115 +        if ( unlikely(e->tot_pages++ == 0) )
  24.116              get_knownalive_domain(e);
  24.117 -        }
  24.118          list_add_tail(&page->list, &e->page_list);
  24.119          page_set_owner(page, e);
  24.120          
  24.121 @@ -938,6 +918,7 @@ gnttab_donate(gnttab_donate_t *uop, unsi
  24.122          
  24.123          gop->status = GNTST_okay;
  24.124      }
  24.125 +
  24.126      return result;
  24.127  }
  24.128  
  24.129 @@ -957,38 +938,38 @@ do_grant_table_op(
  24.130      
  24.131      rc = -EFAULT;
  24.132      switch ( cmd )
  24.133 -        {
  24.134 -        case GNTTABOP_map_grant_ref:
  24.135 -            if ( unlikely(!array_access_ok(
  24.136 -                              uop, count, sizeof(gnttab_map_grant_ref_t))) )
  24.137 -                goto out;
  24.138 -            rc = gnttab_map_grant_ref((gnttab_map_grant_ref_t *)uop, count);
  24.139 -            break;
  24.140 -        case GNTTABOP_unmap_grant_ref:
  24.141 -            if ( unlikely(!array_access_ok(
  24.142 -                              uop, count, sizeof(gnttab_unmap_grant_ref_t))) )
  24.143 -                goto out;
  24.144 -            rc = gnttab_unmap_grant_ref((gnttab_unmap_grant_ref_t *)uop, 
  24.145 -                                        count);
  24.146 -            break;
  24.147 -        case GNTTABOP_setup_table:
  24.148 -            rc = gnttab_setup_table((gnttab_setup_table_t *)uop, count);
  24.149 -            break;
  24.150 +    {
  24.151 +    case GNTTABOP_map_grant_ref:
  24.152 +        if ( unlikely(!array_access_ok(
  24.153 +            uop, count, sizeof(gnttab_map_grant_ref_t))) )
  24.154 +            goto out;
  24.155 +        rc = gnttab_map_grant_ref((gnttab_map_grant_ref_t *)uop, count);
  24.156 +        break;
  24.157 +    case GNTTABOP_unmap_grant_ref:
  24.158 +        if ( unlikely(!array_access_ok(
  24.159 +            uop, count, sizeof(gnttab_unmap_grant_ref_t))) )
  24.160 +            goto out;
  24.161 +        rc = gnttab_unmap_grant_ref(
  24.162 +            (gnttab_unmap_grant_ref_t *)uop, count);
  24.163 +        break;
  24.164 +    case GNTTABOP_setup_table:
  24.165 +        rc = gnttab_setup_table((gnttab_setup_table_t *)uop, count);
  24.166 +        break;
  24.167  #if GRANT_DEBUG
  24.168 -        case GNTTABOP_dump_table:
  24.169 -            rc = gnttab_dump_table((gnttab_dump_table_t *)uop);
  24.170 -            break;
  24.171 +    case GNTTABOP_dump_table:
  24.172 +        rc = gnttab_dump_table((gnttab_dump_table_t *)uop);
  24.173 +        break;
  24.174  #endif
  24.175 -        case GNTTABOP_donate:
  24.176 -            if (unlikely(!array_access_ok(uop, count, 
  24.177 -                                          sizeof(gnttab_donate_t))))
  24.178 -                goto out;
  24.179 -            rc = gnttab_donate(uop, count);
  24.180 -            break;
  24.181 -        default:
  24.182 -            rc = -ENOSYS;
  24.183 -            break;
  24.184 -        }
  24.185 +    case GNTTABOP_donate:
  24.186 +        if (unlikely(!array_access_ok(
  24.187 +            uop, count, sizeof(gnttab_donate_t))))
  24.188 +            goto out;
  24.189 +        rc = gnttab_donate(uop, count);
  24.190 +        break;
  24.191 +    default:
  24.192 +        rc = -ENOSYS;
  24.193 +        break;
  24.194 +    }
  24.195      
  24.196    out:
  24.197      UNLOCK_BIGLOCK(d);
  24.198 @@ -1021,17 +1002,17 @@ gnttab_check_unmap(
  24.199      lgt = ld->grant_table;
  24.200      
  24.201  #if GRANT_DEBUG_VERBOSE
  24.202 -    if ( ld->domain_id != 0 ) {
  24.203 -            DPRINTK("Foreign unref rd(%d) ld(%d) frm(%lx) flgs(%x).\n",
  24.204 -                    rd->domain_id, ld->domain_id, frame, readonly);
  24.205 -      }
  24.206 +    if ( ld->domain_id != 0 )
  24.207 +        DPRINTK("Foreign unref rd(%d) ld(%d) frm(%lx) flgs(%x).\n",
  24.208 +                rd->domain_id, ld->domain_id, frame, readonly);
  24.209  #endif
  24.210      
  24.211      /* Fast exit if we're not mapping anything using grant tables */
  24.212      if ( lgt->map_count == 0 )
  24.213          return 0;
  24.214      
  24.215 -    if ( get_domain(rd) == 0 ) {
  24.216 +    if ( get_domain(rd) == 0 )
  24.217 +    {
  24.218          DPRINTK("gnttab_check_unmap: couldn't get_domain rd(%d)\n",
  24.219                  rd->domain_id);
  24.220          return 0;
  24.221 @@ -1268,8 +1249,11 @@ grant_table_create(
  24.222      for ( i = 0; i < NR_GRANT_FRAMES; i++ )
  24.223      {
  24.224          SHARE_PFN_WITH_DOMAIN(
  24.225 -            virt_to_page((char *)(t->shared)+(i*PAGE_SIZE)), d);
  24.226 -        set_pfn_from_mfn((virt_to_phys(t->shared) >> PAGE_SHIFT) + i, INVALID_M2P_ENTRY);
  24.227 +            virt_to_page((char *)t->shared + (i * PAGE_SIZE)),
  24.228 +            d);
  24.229 +        set_pfn_from_mfn(
  24.230 +            (virt_to_phys(t->shared) >> PAGE_SHIFT) + i,
  24.231 +            INVALID_M2P_ENTRY);
  24.232      }
  24.233  
  24.234      /* Okay, install the structure. */
  24.235 @@ -1306,57 +1290,53 @@ gnttab_release_dev_mappings(grant_table_
  24.236      {
  24.237          map = &gt->maptrack[handle];
  24.238  
  24.239 -        if ( map->ref_and_flags & GNTMAP_device_map )
  24.240 -        {
  24.241 -            dom = map->domid;
  24.242 -            ref = map->ref_and_flags >> MAPTRACK_REF_SHIFT;
  24.243 +        if ( !(map->ref_and_flags & GNTMAP_device_map) )
  24.244 +            continue;
  24.245  
  24.246 -            DPRINTK("Grant release (%hu) ref:(%hu) flags:(%x) dom:(%hu)\n",
  24.247 -                    handle, ref,
  24.248 -                    map->ref_and_flags & MAPTRACK_GNTMAP_MASK, dom);
  24.249 +        dom = map->domid;
  24.250 +        ref = map->ref_and_flags >> MAPTRACK_REF_SHIFT;
  24.251 +
  24.252 +        DPRINTK("Grant release (%hu) ref:(%hu) flags:(%x) dom:(%hu)\n",
  24.253 +                handle, ref, map->ref_and_flags & MAPTRACK_GNTMAP_MASK, dom);
  24.254  
  24.255 -            if ( unlikely((rd = find_domain_by_id(dom)) == NULL) ||
  24.256 -                 unlikely(ld == rd) )
  24.257 +        if ( unlikely((rd = find_domain_by_id(dom)) == NULL) ||
  24.258 +             unlikely(ld == rd) )
  24.259 +        {
  24.260 +            if ( rd != NULL )
  24.261 +                put_domain(rd);
  24.262 +            printk(KERN_WARNING "Grant release: No dom%d\n", dom);
  24.263 +            continue;
  24.264 +        }
  24.265 +
  24.266 +        act = &rd->grant_table->active[ref];
  24.267 +        sha = &rd->grant_table->shared[ref];
  24.268 +
  24.269 +        spin_lock(&rd->grant_table->lock);
  24.270 +
  24.271 +        if ( act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask) )
  24.272 +        {
  24.273 +            frame = act->frame;
  24.274 +
  24.275 +            if ( ( (act->pin & GNTPIN_hstw_mask) == 0 ) &&
  24.276 +                 ( (act->pin & GNTPIN_devw_mask) >  0 ) )
  24.277              {
  24.278 -                if ( rd != NULL )
  24.279 -                    put_domain(rd);
  24.280 -
  24.281 -                printk(KERN_WARNING "Grant release: No dom%d\n", dom);
  24.282 -                continue;
  24.283 +                clear_bit(_GTF_writing, &sha->flags);
  24.284 +                put_page_type(&frame_table[frame]);
  24.285              }
  24.286  
  24.287 -            act = &rd->grant_table->active[ref];
  24.288 -            sha = &rd->grant_table->shared[ref];
  24.289 -
  24.290 -            spin_lock(&rd->grant_table->lock);
  24.291 -
  24.292 -            if ( act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask) )
  24.293 +            map->ref_and_flags &= ~GNTMAP_device_map;
  24.294 +            act->pin &= ~(GNTPIN_devw_mask | GNTPIN_devr_mask);
  24.295 +            if ( act->pin == 0 )
  24.296              {
  24.297 -                frame = act->frame;
  24.298 -
  24.299 -                if ( ( (act->pin & GNTPIN_hstw_mask) == 0 ) &&
  24.300 -                     ( (act->pin & GNTPIN_devw_mask) >  0 ) )
  24.301 -                {
  24.302 -                    clear_bit(_GTF_writing, &sha->flags);
  24.303 -                    put_page_type(&frame_table[frame]);
  24.304 -                }
  24.305 +                clear_bit(_GTF_reading, &sha->flags);
  24.306 +                map->ref_and_flags = 0;
  24.307 +                put_page(&frame_table[frame]);
  24.308 +            }
  24.309 +        }
  24.310  
  24.311 -                act->pin &= ~(GNTPIN_devw_mask | GNTPIN_devr_mask);
  24.312 +        spin_unlock(&rd->grant_table->lock);
  24.313  
  24.314 -                if ( act->pin == 0 )
  24.315 -                {
  24.316 -                    clear_bit(_GTF_reading, &sha->flags);
  24.317 -                    map->ref_and_flags = 0;
  24.318 -                    put_page(&frame_table[frame]);
  24.319 -                }
  24.320 -                else
  24.321 -                    map->ref_and_flags &= ~GNTMAP_device_map;
  24.322 -            }
  24.323 -
  24.324 -            spin_unlock(&rd->grant_table->lock);
  24.325 -
  24.326 -            put_domain(rd);
  24.327 -        }
  24.328 +        put_domain(rd);
  24.329      }
  24.330  }
  24.331  
    25.1 --- a/xen/common/memory.c	Fri Sep 09 08:56:14 2005 +0000
    25.2 +++ b/xen/common/memory.c	Fri Sep 09 08:56:38 2005 +0000
    25.3 @@ -31,8 +31,8 @@ increase_reservation(
    25.4      struct pfn_info *page;
    25.5      unsigned long    i;
    25.6  
    25.7 -    if ( (extent_list != NULL)
    25.8 -         && !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
    25.9 +    if ( (extent_list != NULL) &&
   25.10 +         !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
   25.11          return 0;
   25.12  
   25.13      if ( (extent_order != 0) && !IS_CAPABLE_PHYSDEV(current->domain) )
   25.14 @@ -52,13 +52,14 @@ increase_reservation(
   25.15          if ( unlikely((page = alloc_domheap_pages(
   25.16              d, extent_order, flags)) == NULL) )
   25.17          {
   25.18 -            DPRINTK("Could not allocate a frame\n");
   25.19 +            DPRINTK("Could not allocate order=%d extent: id=%d flags=%x\n",
   25.20 +                    extent_order, d->domain_id, flags);
   25.21              return i;
   25.22          }
   25.23  
   25.24          /* Inform the domain of the new page's machine address. */ 
   25.25 -        if ( (extent_list != NULL)
   25.26 -             && (__put_user(page_to_pfn(page), &extent_list[i]) != 0) )
   25.27 +        if ( (extent_list != NULL) &&
   25.28 +             (__put_user(page_to_pfn(page), &extent_list[i]) != 0) )
   25.29              return i;
   25.30      }
   25.31  
   25.32 @@ -152,8 +153,9 @@ long do_memory_op(int cmd, void *arg)
   25.33              reservation.extent_start += start_extent;
   25.34          reservation.nr_extents -= start_extent;
   25.35  
   25.36 -        if ( unlikely(reservation.address_bits != 0)
   25.37 -             && (reservation.address_bits > (get_order(max_page)+PAGE_SHIFT)) )
   25.38 +        if ( (reservation.address_bits != 0) &&
   25.39 +             (reservation.address_bits <
   25.40 +              (get_order_from_pages(max_page) + PAGE_SHIFT)) )
   25.41          {
   25.42              if ( reservation.address_bits < 31 )
   25.43                  return -ENOMEM;
    26.1 --- a/xen/common/page_alloc.c	Fri Sep 09 08:56:14 2005 +0000
    26.2 +++ b/xen/common/page_alloc.c	Fri Sep 09 08:56:38 2005 +0000
    26.3 @@ -216,7 +216,7 @@ unsigned long alloc_boot_pages(unsigned 
    26.4  #define NR_ZONES    3
    26.5  
    26.6  
    26.7 -#define MAX_DMADOM_PFN 0x7FFFF /* 31 addressable bits */
    26.8 +#define MAX_DMADOM_PFN 0x7FFFFUL /* 31 addressable bits */
    26.9  #define pfn_dom_zone_type(_pfn)                                 \
   26.10      (((_pfn) <= MAX_DMADOM_PFN) ? MEMZONE_DMADOM : MEMZONE_DOM)
   26.11  
   26.12 @@ -485,43 +485,40 @@ void free_xenheap_pages(void *v, unsigne
   26.13  
   26.14  void init_domheap_pages(physaddr_t ps, physaddr_t pe)
   26.15  {
   26.16 +    unsigned long s_tot, e_tot, s_dma, e_dma, s_nrm, e_nrm;
   26.17 +
   26.18      ASSERT(!in_irq());
   26.19  
   26.20 -    ps = round_pgup(ps) >> PAGE_SHIFT;
   26.21 -    pe = round_pgdown(pe) >> PAGE_SHIFT;
   26.22 -    if ( pe <= ps )
   26.23 -        return;
   26.24 +    s_tot = round_pgup(ps) >> PAGE_SHIFT;
   26.25 +    e_tot = round_pgdown(pe) >> PAGE_SHIFT;
   26.26  
   26.27 -    if ( (ps < MAX_DMADOM_PFN) && (pe > MAX_DMADOM_PFN) )
   26.28 -    {
   26.29 -        init_heap_pages(
   26.30 -            MEMZONE_DMADOM, pfn_to_page(ps), MAX_DMADOM_PFN - ps);
   26.31 -        init_heap_pages(
   26.32 -            MEMZONE_DOM, pfn_to_page(MAX_DMADOM_PFN), pe - MAX_DMADOM_PFN);
   26.33 -    }
   26.34 -    else
   26.35 -    {
   26.36 -        init_heap_pages(pfn_dom_zone_type(ps), pfn_to_page(ps), pe - ps);
   26.37 -    }
   26.38 +    s_dma = min(s_tot, MAX_DMADOM_PFN + 1);
   26.39 +    e_dma = min(e_tot, MAX_DMADOM_PFN + 1);
   26.40 +    if ( s_dma < e_dma )
   26.41 +        init_heap_pages(MEMZONE_DMADOM, pfn_to_page(s_dma), e_dma - s_dma);
   26.42 +
   26.43 +    s_nrm = max(s_tot, MAX_DMADOM_PFN + 1);
   26.44 +    e_nrm = max(e_tot, MAX_DMADOM_PFN + 1);
   26.45 +    if ( s_nrm < e_nrm )
   26.46 +        init_heap_pages(MEMZONE_DOM, pfn_to_page(s_nrm), e_nrm - s_nrm);
   26.47  }
   26.48  
   26.49  
   26.50  struct pfn_info *alloc_domheap_pages(
   26.51      struct domain *d, unsigned int order, unsigned int flags)
   26.52  {
   26.53 -    struct pfn_info *pg;
   26.54 +    struct pfn_info *pg = NULL;
   26.55      cpumask_t mask;
   26.56      int i;
   26.57  
   26.58      ASSERT(!in_irq());
   26.59  
   26.60 -    pg = NULL;
   26.61 -    if (! (flags & ALLOC_DOM_DMA))
   26.62 +    if ( !(flags & ALLOC_DOM_DMA) )
   26.63          pg = alloc_heap_pages(MEMZONE_DOM, order);
   26.64 -    if (pg == NULL) {
   26.65 -        if ( unlikely((pg = alloc_heap_pages(MEMZONE_DMADOM, order)) == NULL) )
   26.66 +
   26.67 +    if ( pg == NULL )
   26.68 +        if ( (pg = alloc_heap_pages(MEMZONE_DMADOM, order)) == NULL )
   26.69              return NULL;
   26.70 -    }
   26.71  
   26.72      mask = pg->u.free.cpumask;
   26.73      tlbflush_filter(mask, pg->tlbflush_timestamp);
    27.1 --- a/xen/common/trace.c	Fri Sep 09 08:56:14 2005 +0000
    27.2 +++ b/xen/common/trace.c	Fri Sep 09 08:56:38 2005 +0000
    27.3 @@ -66,7 +66,7 @@ void init_trace_bufs(void)
    27.4      }
    27.5  
    27.6      nr_pages = num_online_cpus() * opt_tbuf_size;
    27.7 -    order    = get_order(nr_pages * PAGE_SIZE);
    27.8 +    order    = get_order_from_pages(nr_pages);
    27.9      
   27.10      if ( (rawbuf = alloc_xenheap_pages(order)) == NULL )
   27.11      {
    28.1 --- a/xen/common/xmalloc.c	Fri Sep 09 08:56:14 2005 +0000
    28.2 +++ b/xen/common/xmalloc.c	Fri Sep 09 08:56:38 2005 +0000
    28.3 @@ -86,7 +86,7 @@ static void *xmalloc_new_page(size_t siz
    28.4  static void *xmalloc_whole_pages(size_t size)
    28.5  {
    28.6      struct xmalloc_hdr *hdr;
    28.7 -    unsigned int pageorder = get_order(size);
    28.8 +    unsigned int pageorder = get_order_from_bytes(size);
    28.9  
   28.10      hdr = alloc_xenheap_pages(pageorder);
   28.11      if ( hdr == NULL )
   28.12 @@ -159,7 +159,7 @@ void xfree(const void *p)
   28.13      /* Big allocs free directly. */
   28.14      if ( hdr->size >= PAGE_SIZE )
   28.15      {
   28.16 -        free_xenheap_pages(hdr, get_order(hdr->size));
   28.17 +        free_xenheap_pages(hdr, get_order_from_bytes(hdr->size));
   28.18          return;
   28.19      }
   28.20  
    29.1 --- a/xen/drivers/char/console.c	Fri Sep 09 08:56:14 2005 +0000
    29.2 +++ b/xen/drivers/char/console.c	Fri Sep 09 08:56:38 2005 +0000
    29.3 @@ -627,7 +627,7 @@ static int __init debugtrace_init(void)
    29.4      if ( bytes == 0 )
    29.5          return 0;
    29.6  
    29.7 -    order = get_order(bytes);
    29.8 +    order = get_order_from_bytes(bytes);
    29.9      debugtrace_buf = alloc_xenheap_pages(order);
   29.10      ASSERT(debugtrace_buf != NULL);
   29.11  
    30.1 --- a/xen/drivers/char/serial.c	Fri Sep 09 08:56:14 2005 +0000
    30.2 +++ b/xen/drivers/char/serial.c	Fri Sep 09 08:56:38 2005 +0000
    30.3 @@ -366,8 +366,9 @@ void serial_register_uart(int idx, struc
    30.4  void serial_async_transmit(struct serial_port *port)
    30.5  {
    30.6      BUG_ON(!port->driver->tx_empty);
    30.7 -    if ( !port->txbuf )
    30.8 -        port->txbuf = alloc_xenheap_pages(get_order(SERIAL_TXBUFSZ));
    30.9 +    if ( port->txbuf == NULL )
   30.10 +        port->txbuf = alloc_xenheap_pages(
   30.11 +            get_order_from_bytes(SERIAL_TXBUFSZ));
   30.12  }
   30.13  
   30.14  /*
    31.1 --- a/xen/include/asm-x86/page.h	Fri Sep 09 08:56:14 2005 +0000
    31.2 +++ b/xen/include/asm-x86/page.h	Fri Sep 09 08:56:38 2005 +0000
    31.3 @@ -280,7 +280,7 @@ extern void paging_init(void);
    31.4  
    31.5  #ifndef __ASSEMBLY__
    31.6  
    31.7 -static __inline__ int get_order(unsigned long size)
    31.8 +static inline int get_order_from_bytes(physaddr_t size)
    31.9  {
   31.10      int order;
   31.11      size = (size-1) >> PAGE_SHIFT;
   31.12 @@ -289,6 +289,15 @@ static __inline__ int get_order(unsigned
   31.13      return order;
   31.14  }
   31.15  
   31.16 +static inline int get_order_from_pages(unsigned long nr_pages)
   31.17 +{
   31.18 +    int order;
   31.19 +    nr_pages--;
   31.20 +    for ( order = 0; nr_pages; order++ )
   31.21 +        nr_pages >>= 1;
   31.22 +    return order;
   31.23 +}
   31.24 +
   31.25  /* Allocator functions for Xen pagetables. */
   31.26  struct pfn_info *alloc_xen_pagetable(void);
   31.27  void free_xen_pagetable(struct pfn_info *pg);