ia64/xen-unstable

changeset 2588:1e99cd1cb3a3

bitkeeper revision 1.1159.1.199 (415eb976aT_IbGUyZQZwMzprjVdnPA)

Merge freefall.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into freefall.cl.cam.ac.uk:/auto/groups/xeno/users/cl349/BK/xeno.bk-nbsd
author cl349@freefall.cl.cam.ac.uk
date Sat Oct 02 14:21:42 2004 +0000 (2004-10-02)
parents 9d1a7223d45e 0f3e0ef73bd5
children 80fce5fd8284
files linux-2.4.27-xen-sparse/drivers/char/mem.c linux-2.6.8.1-xen-sparse/drivers/char/mem.c tools/libxc/xc_linux_build.c tools/libxc/xc_linux_restore.c tools/libxc/xc_netbsd_build.c tools/libxc/xc_private.c tools/libxc/xc_private.h tools/xentrace/xentrace.c
line diff
     1.1 --- a/linux-2.4.27-xen-sparse/drivers/char/mem.c	Fri Oct 01 16:31:30 2004 +0000
     1.2 +++ b/linux-2.4.27-xen-sparse/drivers/char/mem.c	Sat Oct 02 14:21:42 2004 +0000
     1.3 @@ -237,26 +237,15 @@ static int mmap_mem(struct file * file, 
     1.4  	if (!(start_info.flags & SIF_PRIVILEGED))
     1.5  		return -ENXIO;
     1.6  
     1.7 -	if (file->private_data == NULL)
     1.8 -		file->private_data = (void *)(unsigned long)DOMID_IO;
     1.9 -
    1.10  	/* DONTCOPY is essential for Xen as copy_page_range is broken. */
    1.11  	vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
    1.12  	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
    1.13  	if (direct_remap_area_pages(vma->vm_mm, vma->vm_start, offset, 
    1.14  				vma->vm_end-vma->vm_start, vma->vm_page_prot,
    1.15 -				(domid_t)file->private_data))
    1.16 +				DOMID_IO))
    1.17  		return -EAGAIN;
    1.18  	return 0;
    1.19  }
    1.20 -static int ioctl_mem(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg)
    1.21 -{
    1.22 -	switch (cmd) {
    1.23 -	case _IO('M', 1): file->private_data = (void *)arg; break;
    1.24 -	default: return -ENOSYS;
    1.25 -	}
    1.26 -	return 0;
    1.27 -}
    1.28  #endif /* CONFIG_XEN */
    1.29  
    1.30  /*
    1.31 @@ -675,9 +664,6 @@ static struct file_operations mem_fops =
    1.32  	write:		write_mem,
    1.33  	mmap:		mmap_mem,
    1.34  	open:		open_mem,
    1.35 -#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
    1.36 -	ioctl:		ioctl_mem,
    1.37 -#endif
    1.38  };
    1.39  
    1.40  static struct file_operations kmem_fops = {
     2.1 --- a/linux-2.6.8.1-xen-sparse/drivers/char/mem.c	Fri Oct 01 16:31:30 2004 +0000
     2.2 +++ b/linux-2.6.8.1-xen-sparse/drivers/char/mem.c	Sat Oct 02 14:21:42 2004 +0000
     2.3 @@ -6,20 +6,6 @@
     2.4   *  Added devfs support. 
     2.5   *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
     2.6   *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
     2.7 - *
     2.8 - *  MODIFIED FOR XEN by Keir Fraser, 10th July 2003.
     2.9 - *  Linux running on Xen has strange semantics for /dev/mem and /dev/kmem!!
    2.10 - *   1. mmap will not work on /dev/kmem
    2.11 - *   2. mmap on /dev/mem interprets the 'file offset' as a machine address
    2.12 - *      rather than a physical address.
    2.13 - *  I don't believe anyone sane mmaps /dev/kmem, but /dev/mem is mmapped
    2.14 - *  to get at memory-mapped I/O spaces (eg. the VESA X server does this).
    2.15 - *  For this to work at all we need to expect machine addresses.
    2.16 - *  Reading/writing of /dev/kmem expects kernel virtual addresses, as usual.
    2.17 - *  Reading/writing of /dev/mem expects 'physical addresses' as usual -- this
    2.18 - *  is because /dev/mem can only read/write existing kernel mappings, which
    2.19 - *  will be normal RAM, and we should present pseudo-physical layout for all
    2.20 - *  except I/O (which is the sticky case that mmap is hacked to deal with).
    2.21   */
    2.22  
    2.23  #include <linux/config.h>
    2.24 @@ -208,9 +194,9 @@ static ssize_t write_mem(struct file * f
    2.25  	return do_write_mem(__va(p), p, buf, count, ppos);
    2.26  }
    2.27  
    2.28 -#if !defined(CONFIG_XEN)
    2.29  static int mmap_mem(struct file * file, struct vm_area_struct * vma)
    2.30  {
    2.31 +#if !defined(CONFIG_XEN)
    2.32  	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
    2.33  	int uncached;
    2.34  
    2.35 @@ -233,43 +219,26 @@ static int mmap_mem(struct file * file, 
    2.36  			     vma->vm_page_prot))
    2.37  		return -EAGAIN;
    2.38  	return 0;
    2.39 -}
    2.40  #elif !defined(CONFIG_XEN_PRIVILEGED_GUEST)
    2.41 -static int mmap_mem(struct file * file, struct vm_area_struct * vma)
    2.42 -{
    2.43  	return -ENXIO;
    2.44 -}
    2.45  #else
    2.46 -static int mmap_mem(struct file * file, struct vm_area_struct * vma)
    2.47 -{
    2.48  	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
    2.49  
    2.50  	if (!(start_info.flags & SIF_PRIVILEGED))
    2.51  		return -ENXIO;
    2.52  
    2.53 -	if (file->private_data == NULL)
    2.54 -		file->private_data = (void *)(unsigned long)DOMID_IO;
    2.55 +	/* Currently we're not smart about setting PTE cacheability. */
    2.56 +	vma->vm_flags |= VM_RESERVED | VM_IO;
    2.57 +	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
    2.58  
    2.59 -	/* DONTCOPY is essential for Xen as copy_page_range is broken. */
    2.60 -	vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
    2.61 -	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
    2.62  	if (direct_remap_area_pages(vma->vm_mm, vma->vm_start, offset, 
    2.63  				vma->vm_end-vma->vm_start, vma->vm_page_prot,
    2.64 -				(domid_t)(unsigned long)file->private_data))
    2.65 +				DOMID_IO))
    2.66  		return -EAGAIN;
    2.67  	return 0;
    2.68 +#endif
    2.69  }
    2.70  
    2.71 -static int ioctl_mem(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg)
    2.72 -{
    2.73 -	switch (cmd) {
    2.74 -	case _IO('M', 1): file->private_data = (void *)arg; break;
    2.75 -	default: return -ENOSYS;
    2.76 -	}
    2.77 -	return 0;
    2.78 -}
    2.79 -#endif /* CONFIG_XEN */
    2.80 -
    2.81  extern long vread(char *buf, char *addr, unsigned long count);
    2.82  extern long vwrite(char *buf, char *addr, unsigned long count);
    2.83  
    2.84 @@ -640,18 +609,13 @@ static struct file_operations mem_fops =
    2.85  	.write		= write_mem,
    2.86  	.mmap		= mmap_mem,
    2.87  	.open		= open_mem,
    2.88 -#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
    2.89 -	.ioctl		= ioctl_mem,
    2.90 -#endif
    2.91  };
    2.92  
    2.93  static struct file_operations kmem_fops = {
    2.94  	.llseek		= memory_lseek,
    2.95  	.read		= read_kmem,
    2.96  	.write		= write_kmem,
    2.97 -#if !defined(CONFIG_XEN)
    2.98  	.mmap		= mmap_kmem,
    2.99 -#endif
   2.100  	.open		= open_kmem,
   2.101  };
   2.102  
     3.1 --- a/tools/libxc/xc_linux_build.c	Fri Oct 01 16:31:30 2004 +0000
     3.2 +++ b/tools/libxc/xc_linux_build.c	Sat Oct 02 14:21:42 2004 +0000
     3.3 @@ -29,13 +29,17 @@ struct domain_setup_info
     3.4      unsigned long symtab_len;
     3.5  };
     3.6  
     3.7 -static int parseelfimage(char *elfbase, 
     3.8 -                         unsigned long elfsize,
     3.9 -                         struct domain_setup_info *dsi);
    3.10 -static int loadelfimage(char *elfbase, void *pmh, unsigned long *parray,
    3.11 -                        unsigned long vstart);
    3.12 -static int loadelfsymtab(char *elfbase, void *pmh, unsigned long *parray,
    3.13 -                         struct domain_setup_info *dsi);
    3.14 +static int
    3.15 +parseelfimage(
    3.16 +    char *elfbase, unsigned long elfsize, struct domain_setup_info *dsi);
    3.17 +static int
    3.18 +loadelfimage(
    3.19 +    char *elfbase, int xch, u32 dom, unsigned long *parray,
    3.20 +    unsigned long vstart);
    3.21 +static int
    3.22 +loadelfsymtab(
    3.23 +    char *elfbase, int xch, u32 dom, unsigned long *parray,
    3.24 +    struct domain_setup_info *dsi);
    3.25  
    3.26  static long get_tot_pages(int xc_handle, u32 domid)
    3.27  {
    3.28 @@ -69,15 +73,17 @@ static int get_pfn_list(int xc_handle,
    3.29      return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
    3.30  }
    3.31  
    3.32 -static int copy_to_domain_page(void *pm_handle,
    3.33 +static int copy_to_domain_page(int xc_handle,
    3.34 +                               u32 domid,
    3.35                                 unsigned long dst_pfn, 
    3.36                                 void *src_page)
    3.37  {
    3.38 -    void *vaddr = map_pfn_writeable(pm_handle, dst_pfn);
    3.39 +    void *vaddr = xc_map_foreign_range(
    3.40 +        xc_handle, domid, PAGE_SIZE, PROT_WRITE, dst_pfn);
    3.41      if ( vaddr == NULL )
    3.42          return -1;
    3.43      memcpy(vaddr, src_page, PAGE_SIZE);
    3.44 -    unmap_pfn(pm_handle, vaddr);
    3.45 +    munmap(vaddr, PAGE_SIZE);
    3.46      return 0;
    3.47  }
    3.48  
    3.49 @@ -102,7 +108,6 @@ static int setup_guestos(int xc_handle,
    3.50      start_info_t *start_info;
    3.51      shared_info_t *shared_info;
    3.52      mmu_t *mmu = NULL;
    3.53 -    void  *pm_handle=NULL;
    3.54      int rc;
    3.55  
    3.56      unsigned long nr_pt_pages;
    3.57 @@ -133,7 +138,7 @@ static int setup_guestos(int xc_handle,
    3.58                                VMASST_TYPE_writable_pagetables);
    3.59  
    3.60      if (dsi.load_bsd_symtab)
    3.61 -        loadelfsymtab(image, NULL, NULL, &dsi);
    3.62 +        loadelfsymtab(image, xc_handle, dom, NULL, &dsi);
    3.63  
    3.64      if ( (dsi.v_start & (PAGE_SIZE-1)) != 0 )
    3.65      {
    3.66 @@ -193,9 +198,6 @@ static int setup_guestos(int xc_handle,
    3.67          goto error_out;
    3.68      }
    3.69  
    3.70 -    if ( (pm_handle = init_pfn_mapper((domid_t)dom)) == NULL )
    3.71 -        goto error_out;
    3.72 -
    3.73      if ( (page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL )
    3.74      {
    3.75          PERROR("Could not allocate memory");
    3.76 @@ -208,10 +210,10 @@ static int setup_guestos(int xc_handle,
    3.77          goto error_out;
    3.78      }
    3.79  
    3.80 -    loadelfimage(image, pm_handle, page_array, dsi.v_start);
    3.81 +    loadelfimage(image, xc_handle, dom, page_array, dsi.v_start);
    3.82  
    3.83      if (dsi.load_bsd_symtab)
    3.84 -        loadelfsymtab(image, pm_handle, page_array, &dsi);
    3.85 +        loadelfsymtab(image, xc_handle, dom, page_array, &dsi);
    3.86  
    3.87      /* Load the initial ramdisk image. */
    3.88      if ( initrd_len != 0 )
    3.89 @@ -225,7 +227,7 @@ static int setup_guestos(int xc_handle,
    3.90                  PERROR("Error reading initrd image, could not");
    3.91                  goto error_out;
    3.92              }
    3.93 -            copy_to_domain_page(pm_handle, 
    3.94 +            copy_to_domain_page(xc_handle, dom,
    3.95                                  page_array[i>>PAGE_SHIFT], page);
    3.96          }
    3.97      }
    3.98 @@ -239,7 +241,9 @@ static int setup_guestos(int xc_handle,
    3.99      ctxt->pt_base = l2tab;
   3.100  
   3.101      /* Initialise the page tables. */
   3.102 -    if ( (vl2tab = map_pfn_writeable(pm_handle, l2tab >> PAGE_SHIFT)) == NULL )
   3.103 +    if ( (vl2tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, 
   3.104 +                                        PROT_READ|PROT_WRITE, 
   3.105 +                                        l2tab >> PAGE_SHIFT)) == NULL )
   3.106          goto error_out;
   3.107      memset(vl2tab, 0, PAGE_SIZE);
   3.108      vl2e = &vl2tab[l2_table_offset(dsi.v_start)];
   3.109 @@ -249,10 +253,14 @@ static int setup_guestos(int xc_handle,
   3.110          {
   3.111              l1tab = page_array[ppt_alloc++] << PAGE_SHIFT;
   3.112              if ( vl1tab != NULL )
   3.113 -                unmap_pfn(pm_handle, vl1tab);
   3.114 -            if ( (vl1tab = map_pfn_writeable(pm_handle, 
   3.115 -                                             l1tab >> PAGE_SHIFT)) == NULL )
   3.116 +                munmap(vl1tab, PAGE_SIZE);
   3.117 +            if ( (vl1tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   3.118 +                                                PROT_READ|PROT_WRITE,
   3.119 +                                                l1tab >> PAGE_SHIFT)) == NULL )
   3.120 +            {
   3.121 +                munmap(vl2tab, PAGE_SIZE);
   3.122                  goto error_out;
   3.123 +            }
   3.124              memset(vl1tab, 0, PAGE_SIZE);
   3.125              vl1e = &vl1tab[l1_table_offset(dsi.v_start + (count<<PAGE_SHIFT))];
   3.126              *vl2e++ = l1tab | L2_PROT;
   3.127 @@ -264,28 +272,33 @@ static int setup_guestos(int xc_handle,
   3.128              *vl1e &= ~_PAGE_RW;
   3.129          vl1e++;
   3.130      }
   3.131 -    unmap_pfn(pm_handle, vl1tab);
   3.132 -    unmap_pfn(pm_handle, vl2tab);
   3.133 +    munmap(vl1tab, PAGE_SIZE);
   3.134 +    munmap(vl2tab, PAGE_SIZE);
   3.135  
   3.136      /* Write the phys->machine and machine->phys table entries. */
   3.137      physmap_pfn = (vphysmap_start - dsi.v_start) >> PAGE_SHIFT;
   3.138 -    physmap = physmap_e = 
   3.139 -        map_pfn_writeable(pm_handle, page_array[physmap_pfn++]);
   3.140 +    physmap = physmap_e = xc_map_foreign_range(
   3.141 +        xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
   3.142 +        page_array[physmap_pfn++]);
   3.143      for ( count = 0; count < nr_pages; count++ )
   3.144      {
   3.145          if ( add_mmu_update(xc_handle, mmu,
   3.146                              (page_array[count] << PAGE_SHIFT) | 
   3.147                              MMU_MACHPHYS_UPDATE, count) )
   3.148 +        {
   3.149 +            munmap(physmap, PAGE_SIZE);
   3.150              goto error_out;
   3.151 +        }
   3.152          *physmap_e++ = page_array[count];
   3.153          if ( ((unsigned long)physmap_e & (PAGE_SIZE-1)) == 0 )
   3.154          {
   3.155 -            unmap_pfn(pm_handle, physmap);
   3.156 -            physmap = physmap_e = 
   3.157 -                map_pfn_writeable(pm_handle, page_array[physmap_pfn++]);
   3.158 +            munmap(physmap, PAGE_SIZE);
   3.159 +            physmap = physmap_e = xc_map_foreign_range(
   3.160 +                xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
   3.161 +                page_array[physmap_pfn++]);
   3.162          }
   3.163      }
   3.164 -    unmap_pfn(pm_handle, physmap);
   3.165 +    munmap(physmap, PAGE_SIZE);
   3.166      
   3.167      /*
   3.168       * Pin down l2tab addr as page dir page - causes hypervisor to provide
   3.169 @@ -295,8 +308,9 @@ static int setup_guestos(int xc_handle,
   3.170                          l2tab | MMU_EXTENDED_COMMAND, MMUEXT_PIN_L2_TABLE) )
   3.171          goto error_out;
   3.172  
   3.173 -    start_info = map_pfn_writeable(
   3.174 -        pm_handle, page_array[(vstartinfo_start-dsi.v_start)>>PAGE_SHIFT]);
   3.175 +    start_info = xc_map_foreign_range(
   3.176 +        xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
   3.177 +        page_array[(vstartinfo_start-dsi.v_start)>>PAGE_SHIFT]);
   3.178      memset(start_info, 0, sizeof(*start_info));
   3.179      start_info->nr_pages     = nr_pages;
   3.180      start_info->shared_info  = shared_info_frame << PAGE_SHIFT;
   3.181 @@ -312,22 +326,22 @@ static int setup_guestos(int xc_handle,
   3.182      }
   3.183      strncpy(start_info->cmd_line, cmdline, MAX_CMDLINE);
   3.184      start_info->cmd_line[MAX_CMDLINE-1] = '\0';
   3.185 -    unmap_pfn(pm_handle, start_info);
   3.186 +    munmap(start_info, PAGE_SIZE);
   3.187  
   3.188      /* shared_info page starts its life empty. */
   3.189 -    shared_info = map_pfn_writeable(pm_handle, shared_info_frame);
   3.190 +    shared_info = xc_map_foreign_range(
   3.191 +        xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, shared_info_frame);
   3.192      memset(shared_info, 0, sizeof(shared_info_t));
   3.193      /* Mask all upcalls... */
   3.194      for ( i = 0; i < MAX_VIRT_CPUS; i++ )
   3.195          shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
   3.196 -    unmap_pfn(pm_handle, shared_info);
   3.197 +    munmap(shared_info, PAGE_SIZE);
   3.198  
   3.199      /* Send the page update requests down to the hypervisor. */
   3.200      if ( finish_mmu_updates(xc_handle, mmu) )
   3.201          goto error_out;
   3.202  
   3.203      free(mmu);
   3.204 -    (void)close_pfn_mapper(pm_handle);
   3.205      free(page_array);
   3.206  
   3.207      *pvsi = vstartinfo_start;
   3.208 @@ -338,8 +352,6 @@ static int setup_guestos(int xc_handle,
   3.209   error_out:
   3.210      if ( mmu != NULL )
   3.211          free(mmu);
   3.212 -    if ( pm_handle != NULL )
   3.213 -        (void)close_pfn_mapper(pm_handle);
   3.214      if ( page_array != NULL )
   3.215          free(page_array);
   3.216      return -1;
   3.217 @@ -681,8 +693,10 @@ static int parseelfimage(char *elfbase,
   3.218      return 0;
   3.219  }
   3.220  
   3.221 -static int loadelfimage(char *elfbase, void *pmh, unsigned long *parray,
   3.222 -                        unsigned long vstart)
   3.223 +static int
   3.224 +loadelfimage(
   3.225 +    char *elfbase, int xch, u32 dom, unsigned long *parray,
   3.226 +    unsigned long vstart)
   3.227  {
   3.228      Elf_Ehdr *ehdr = (Elf_Ehdr *)elfbase;
   3.229      Elf_Phdr *phdr;
   3.230 @@ -700,32 +714,36 @@ static int loadelfimage(char *elfbase, v
   3.231          for ( done = 0; done < phdr->p_filesz; done += chunksz )
   3.232          {
   3.233              pa = (phdr->p_vaddr + done) - vstart;
   3.234 -            va = map_pfn_writeable(pmh, parray[pa>>PAGE_SHIFT]);
   3.235 +            va = xc_map_foreign_range(
   3.236 +                xch, dom, PAGE_SIZE, PROT_WRITE, parray[pa>>PAGE_SHIFT]);
   3.237              chunksz = phdr->p_filesz - done;
   3.238              if ( chunksz > (PAGE_SIZE - (pa & (PAGE_SIZE-1))) )
   3.239                  chunksz = PAGE_SIZE - (pa & (PAGE_SIZE-1));
   3.240              memcpy(va + (pa & (PAGE_SIZE-1)),
   3.241                     elfbase + phdr->p_offset + done, chunksz);
   3.242 -            unmap_pfn(pmh, va);
   3.243 +            munmap(va, PAGE_SIZE);
   3.244          }
   3.245  
   3.246          for ( ; done < phdr->p_memsz; done += chunksz )
   3.247          {
   3.248              pa = (phdr->p_vaddr + done) - vstart;
   3.249 -            va = map_pfn_writeable(pmh, parray[pa>>PAGE_SHIFT]);
   3.250 +            va = xc_map_foreign_range(
   3.251 +                xch, dom, PAGE_SIZE, PROT_WRITE, parray[pa>>PAGE_SHIFT]);
   3.252              chunksz = phdr->p_memsz - done;
   3.253              if ( chunksz > (PAGE_SIZE - (pa & (PAGE_SIZE-1))) )
   3.254                  chunksz = PAGE_SIZE - (pa & (PAGE_SIZE-1));
   3.255              memset(va + (pa & (PAGE_SIZE-1)), 0, chunksz);
   3.256 -            unmap_pfn(pmh, va);            
   3.257 +            munmap(va, PAGE_SIZE);
   3.258          }
   3.259      }
   3.260  
   3.261      return 0;
   3.262  }
   3.263  
   3.264 -static void map_memcpy(unsigned long dst, char *src, unsigned long size,
   3.265 -                       void *pmh, unsigned long *parray, unsigned long vstart)
   3.266 +static void
   3.267 +map_memcpy(
   3.268 +    unsigned long dst, char *src, unsigned long size,
   3.269 +    int xch, u32 dom, unsigned long *parray, unsigned long vstart)
   3.270  {
   3.271      char *va;
   3.272      unsigned long chunksz, done, pa;
   3.273 @@ -733,19 +751,22 @@ static void map_memcpy(unsigned long dst
   3.274      for ( done = 0; done < size; done += chunksz )
   3.275      {
   3.276          pa = dst + done - vstart;
   3.277 -        va = map_pfn_writeable(pmh, parray[pa>>PAGE_SHIFT]);
   3.278 +        va = xc_map_foreign_range(
   3.279 +            xch, dom, PAGE_SIZE, PROT_WRITE, parray[pa>>PAGE_SHIFT]);
   3.280          chunksz = size - done;
   3.281          if ( chunksz > (PAGE_SIZE - (pa & (PAGE_SIZE-1))) )
   3.282              chunksz = PAGE_SIZE - (pa & (PAGE_SIZE-1));
   3.283          memcpy(va + (pa & (PAGE_SIZE-1)), src + done, chunksz);
   3.284 -        unmap_pfn(pmh, va);
   3.285 +        munmap(va, PAGE_SIZE);
   3.286      }
   3.287  }
   3.288  
   3.289  #define ELFROUND (ELFSIZE / 8)
   3.290  
   3.291 -static int loadelfsymtab(char *elfbase, void *pmh, unsigned long *parray,
   3.292 -                         struct domain_setup_info *dsi)
   3.293 +static int
   3.294 +loadelfsymtab(
   3.295 +    char *elfbase, int xch, u32 dom, unsigned long *parray,
   3.296 +    struct domain_setup_info *dsi)
   3.297  {
   3.298      Elf_Ehdr *ehdr = (Elf_Ehdr *)elfbase, *sym_ehdr;
   3.299      Elf_Shdr *shdr;
   3.300 @@ -789,9 +810,9 @@ static int loadelfsymtab(char *elfbase, 
   3.301          if ( (shdr[h].sh_type == SHT_STRTAB) ||
   3.302               (shdr[h].sh_type == SHT_SYMTAB) )
   3.303          {
   3.304 -            if ( pmh != NULL )
   3.305 +            if ( parray != NULL )
   3.306                  map_memcpy(maxva, elfbase + shdr[h].sh_offset, shdr[h].sh_size,
   3.307 -                           pmh, parray, dsi->v_start);
   3.308 +                           xch, dom, parray, dsi->v_start);
   3.309  
   3.310              /* Mangled to be based on ELF header location. */
   3.311              shdr[h].sh_offset = maxva - dsi->symtab_addr;
   3.312 @@ -810,7 +831,8 @@ static int loadelfsymtab(char *elfbase, 
   3.313          goto out;
   3.314      }
   3.315  
   3.316 -    if ( pmh != NULL ) {
   3.317 +    if ( parray != NULL )
   3.318 +    {
   3.319          *(int *)p = maxva - dsi->symtab_addr;
   3.320          sym_ehdr = (Elf_Ehdr *)(p + sizeof(int));
   3.321          memcpy(sym_ehdr, ehdr, sizeof(Elf_Ehdr));
   3.322 @@ -822,7 +844,7 @@ static int loadelfsymtab(char *elfbase, 
   3.323  
   3.324          /* Copy total length, crafted ELF header and section header table */
   3.325          map_memcpy(symva, p, sizeof(int) + sizeof(Elf_Ehdr) +
   3.326 -                   ehdr->e_shnum * sizeof(Elf_Shdr), pmh, parray,
   3.327 +                   ehdr->e_shnum * sizeof(Elf_Shdr), xch, dom, parray,
   3.328                     dsi->v_start);
   3.329      }
   3.330  
     4.1 --- a/tools/libxc/xc_linux_restore.c	Fri Oct 01 16:31:30 2004 +0000
     4.2 +++ b/tools/libxc/xc_linux_restore.c	Sat Oct 02 14:21:42 2004 +0000
     4.3 @@ -130,8 +130,6 @@ int xc_linux_restore(int xc_handle, XcIO
     4.4  
     4.5      mmu_t *mmu = NULL;
     4.6  
     4.7 -    void *pm_handle = NULL;
     4.8 -
     4.9      /* used by debug verify code */
    4.10      unsigned long buf[PAGE_SIZE/sizeof(unsigned long)];
    4.11  
    4.12 @@ -245,9 +243,6 @@ int xc_linux_restore(int xc_handle, XcIO
    4.13      }
    4.14      shared_info_frame = op.u.getdomaininfo.shared_info_frame;
    4.15  
    4.16 -    if ( (pm_handle = init_pfn_mapper((domid_t)dom)) == NULL )
    4.17 -        goto out;
    4.18 -
    4.19      /* Build the pfn-to-mfn table. We choose MFN ordering returned by Xen. */
    4.20      if ( get_pfn_list(xc_handle, dom, pfn_to_mfn_table, nr_pfns) != nr_pfns )
    4.21      {
    4.22 @@ -511,6 +506,7 @@ int xc_linux_restore(int xc_handle, XcIO
    4.23      {
    4.24  	unsigned int count, *pfntab;
    4.25  	int rc;
    4.26 +
    4.27  	if ( xcio_read(ioctxt, &count, sizeof(count)) )
    4.28  	{
    4.29  	    xcio_error(ioctxt, "Error when reading from state file");
    4.30 @@ -518,7 +514,7 @@ int xc_linux_restore(int xc_handle, XcIO
    4.31  	}
    4.32  
    4.33  	pfntab = malloc( sizeof(unsigned int) * count );
    4.34 -	if ( !pfntab )
    4.35 +	if ( pfntab == NULL )
    4.36  	{
    4.37  	    xcio_error(ioctxt, "Out of memory");
    4.38  	    goto out;
    4.39 @@ -530,14 +526,14 @@ int xc_linux_restore(int xc_handle, XcIO
    4.40  	    goto out;
    4.41  	}
    4.42  
    4.43 -	for(i=0;i<count;i++)
    4.44 +	for ( i = 0; i < count; i++ )
    4.45  	{
    4.46  	    unsigned long pfn = pfntab[i];
    4.47  	    pfntab[i]=pfn_to_mfn_table[pfn];
    4.48  	    pfn_to_mfn_table[pfn] = 0x80000001;  // not in pmap
    4.49  	}
    4.50  
    4.51 -	if ( count>0 )
    4.52 +	if ( count > 0 )
    4.53  	{
    4.54  	    if ( (rc = do_dom_mem_op( xc_handle,
    4.55  				       MEMOP_decrease_reservation,
    4.56 @@ -550,12 +546,9 @@ int xc_linux_restore(int xc_handle, XcIO
    4.57  	    {
    4.58  		printf("Decreased reservation by %d pages\n", count);
    4.59  	    }
    4.60 -	}
    4.61 -	
    4.62 +	}	
    4.63      }
    4.64  
    4.65 -
    4.66 -
    4.67      if ( xcio_read(ioctxt, &ctxt,       sizeof(ctxt)) ||
    4.68           xcio_read(ioctxt, shared_info, PAGE_SIZE) )
    4.69      {
    4.70 @@ -571,11 +564,12 @@ int xc_linux_restore(int xc_handle, XcIO
    4.71          goto out;
    4.72      }
    4.73      ctxt.cpu_ctxt.esi = mfn = pfn_to_mfn_table[pfn];
    4.74 -    p_srec = map_pfn_writeable(pm_handle, mfn);
    4.75 +    p_srec = xc_map_foreign_range(
    4.76 +        xc_handle, dom, PAGE_SIZE, PROT_WRITE, mfn);
    4.77      p_srec->resume_info.nr_pages    = nr_pfns;
    4.78      p_srec->resume_info.shared_info = shared_info_frame << PAGE_SHIFT;
    4.79      p_srec->resume_info.flags       = 0;
    4.80 -    unmap_pfn(pm_handle, p_srec);
    4.81 +    munmap(p_srec, PAGE_SIZE);
    4.82  
    4.83      /* Uncanonicalise each GDT frame number. */
    4.84      if ( ctxt.gdt_ents > 8192 )
    4.85 @@ -606,17 +600,16 @@ int xc_linux_restore(int xc_handle, XcIO
    4.86      }
    4.87      ctxt.pt_base = pfn_to_mfn_table[pfn] << PAGE_SHIFT;
    4.88  
    4.89 -
    4.90      /* clear any pending events and the selector */
    4.91 -    memset( &(((shared_info_t *)shared_info)->evtchn_pending[0]),
    4.92 -            0, sizeof (((shared_info_t *)shared_info)->evtchn_pending)+
    4.93 -            sizeof(((shared_info_t *)shared_info)->evtchn_pending_sel) );
    4.94 +    memset(&(((shared_info_t *)shared_info)->evtchn_pending[0]),
    4.95 +           0, sizeof (((shared_info_t *)shared_info)->evtchn_pending)+
    4.96 +           sizeof(((shared_info_t *)shared_info)->evtchn_pending_sel));
    4.97  
    4.98      /* Copy saved contents of shared-info page. No checking needed. */
    4.99 -    ppage = map_pfn_writeable(pm_handle, shared_info_frame);
   4.100 +    ppage = xc_map_foreign_range(
   4.101 +        xc_handle, dom, PAGE_SIZE, PROT_WRITE, shared_info_frame);
   4.102      memcpy(ppage, shared_info, sizeof(shared_info_t));
   4.103 -    unmap_pfn(pm_handle, ppage);
   4.104 -
   4.105 +    munmap(ppage, PAGE_SIZE);
   4.106  
   4.107      /* Uncanonicalise the pfn-to-mfn table frame-number list. */
   4.108      for ( i = 0; i < (nr_pfns+1023)/1024; i++ )
   4.109 @@ -702,8 +695,6 @@ int xc_linux_restore(int xc_handle, XcIO
   4.110          xc_domain_destroy(xc_handle, dom);
   4.111      if ( mmu != NULL )
   4.112          free(mmu);
   4.113 -    if ( pm_handle != NULL )
   4.114 -        (void)close_pfn_mapper(pm_handle);
   4.115      if ( pfn_to_mfn_table != NULL )
   4.116          free(pfn_to_mfn_table);
   4.117      if ( pfn_type != NULL )
     5.1 --- a/tools/libxc/xc_netbsd_build.c	Fri Oct 01 16:31:30 2004 +0000
     5.2 +++ b/tools/libxc/xc_netbsd_build.c	Sat Oct 02 14:21:42 2004 +0000
     5.3 @@ -13,7 +13,7 @@
     5.4  #define DPRINTF(x)
     5.5  #endif
     5.6  
     5.7 -static int loadelfimage(gzFile, void *, unsigned long *, unsigned long,
     5.8 +static int loadelfimage(gzFile, int, u32, unsigned long *, unsigned long,
     5.9                          unsigned long *, unsigned long *,
    5.10                          unsigned long *, unsigned long *);
    5.11  
    5.12 @@ -77,12 +77,8 @@ static int setup_guestos(int xc_handle,
    5.13      shared_info_t *shared_info;
    5.14      unsigned long ksize;
    5.15      mmu_t *mmu = NULL;
    5.16 -    void  *pm_handle = NULL;
    5.17      int i;
    5.18  
    5.19 -    if ( (pm_handle = init_pfn_mapper((domid_t)dom)) == NULL )
    5.20 -        goto error_out;
    5.21 -
    5.22      if ( (page_array = malloc(tot_pages * sizeof(unsigned long))) == NULL )
    5.23      {
    5.24          PERROR("Could not allocate memory");
    5.25 @@ -95,7 +91,7 @@ static int setup_guestos(int xc_handle,
    5.26          goto error_out;
    5.27      }
    5.28  
    5.29 -    if (loadelfimage(kernel_gfd, pm_handle, page_array, tot_pages,
    5.30 +    if (loadelfimage(kernel_gfd, xc_handle, dom, page_array, tot_pages,
    5.31                       virt_load_addr, &ksize, &symtab_addr, &symtab_len))
    5.32          goto error_out;
    5.33  
    5.34 @@ -125,7 +121,9 @@ static int setup_guestos(int xc_handle,
    5.35          goto error_out;
    5.36      
    5.37      /* Initialise the page tables. */
    5.38 -    if ( (vl2tab = map_pfn_writeable(pm_handle, l2tab >> PAGE_SHIFT)) == NULL )
    5.39 +    if ( (vl2tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
    5.40 +                                        PROT_READ|PROT_WRITE,
    5.41 +                                        l2tab >> PAGE_SHIFT)) == NULL )
    5.42          goto error_out;
    5.43      memset(vl2tab, 0, PAGE_SIZE);
    5.44      vl2e = &vl2tab[l2_table_offset(*virt_load_addr)];
    5.45 @@ -135,10 +133,14 @@ static int setup_guestos(int xc_handle,
    5.46          {
    5.47              l1tab = page_array[alloc_index--] << PAGE_SHIFT;
    5.48              if ( vl1tab != NULL )
    5.49 -                unmap_pfn(pm_handle, vl1tab);
    5.50 -            if ( (vl1tab = map_pfn_writeable(pm_handle,
    5.51 -                                             l1tab >> PAGE_SHIFT)) == NULL )
    5.52 +                munmap(vl1tab, PAGE_SIZE);
    5.53 +            if ( (vl1tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
    5.54 +                                                PROT_READ|PROT_WRITE,
    5.55 +                                                l1tab >> PAGE_SHIFT)) == NULL )
    5.56 +            {
    5.57 +                munmap(vl2tab, PAGE_SIZE);
    5.58                  goto error_out;
    5.59 +            }
    5.60              memset(vl1tab, 0, PAGE_SIZE);
    5.61              vl1e = &vl1tab[l1_table_offset(*virt_load_addr + 
    5.62                                             (count<<PAGE_SHIFT))];
    5.63 @@ -153,10 +155,14 @@ static int setup_guestos(int xc_handle,
    5.64          if ( add_mmu_update(xc_handle, mmu,
    5.65                              (page_array[count] << PAGE_SHIFT) | 
    5.66                              MMU_MACHPHYS_UPDATE, count) )
    5.67 +        {
    5.68 +            munmap(vl1tab, PAGE_SIZE);
    5.69 +            munmap(vl2tab, PAGE_SIZE);
    5.70              goto error_out;
    5.71 +        }
    5.72      }
    5.73 -    unmap_pfn(pm_handle, vl1tab);
    5.74 -    unmap_pfn(pm_handle, vl2tab);
    5.75 +    munmap(vl1tab, PAGE_SIZE);
    5.76 +    munmap(vl2tab, PAGE_SIZE);
    5.77  
    5.78      /*
    5.79       * Pin down l2tab addr as page dir page - causes hypervisor to provide
    5.80 @@ -169,7 +175,8 @@ static int setup_guestos(int xc_handle,
    5.81      *virt_startinfo_addr =
    5.82          *virt_load_addr + ((alloc_index-1) << PAGE_SHIFT);
    5.83  
    5.84 -    start_info = map_pfn_writeable(pm_handle, page_array[alloc_index-1]);
    5.85 +    start_info = xc_map_foreign_range(
    5.86 +        xc_handle, dom, PAGE_SIZE, PROT_WRITE, page_array[alloc_index-1]);
    5.87      memset(start_info, 0, sizeof(*start_info));
    5.88      start_info->pt_base     = *virt_load_addr + ((tot_pages-1) << PAGE_SHIFT);
    5.89      start_info->mod_start   = symtab_addr;
    5.90 @@ -180,30 +187,28 @@ static int setup_guestos(int xc_handle,
    5.91      start_info->domain_controller_evtchn = control_evtchn;
    5.92      strncpy(start_info->cmd_line, cmdline, MAX_CMDLINE);
    5.93      start_info->cmd_line[MAX_CMDLINE-1] = '\0';
    5.94 -    unmap_pfn(pm_handle, start_info);
    5.95 +    munmap(start_info, PAGE_SIZE);
    5.96  
    5.97      /* shared_info page starts its life empty. */
    5.98 -    shared_info = map_pfn_writeable(pm_handle, shared_info_frame);
    5.99 +    shared_info = xc_map_foreign_range(
   5.100 +        xc_handle, dom, PAGE_SIZE, PROT_WRITE, shared_info_frame);
   5.101      memset(shared_info, 0, PAGE_SIZE);
   5.102      /* Mask all upcalls... */
   5.103      for ( i = 0; i < MAX_VIRT_CPUS; i++ )
   5.104          shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
   5.105 -    unmap_pfn(pm_handle, shared_info);
   5.106 +    munmap(shared_info, PAGE_SIZE);
   5.107  
   5.108      /* Send the page update requests down to the hypervisor. */
   5.109      if ( finish_mmu_updates(xc_handle, mmu) )
   5.110          goto error_out;
   5.111  
   5.112      free(mmu);
   5.113 -    (void)close_pfn_mapper(pm_handle);
   5.114      free(page_array);
   5.115      return 0;
   5.116  
   5.117   error_out:
   5.118      if ( mmu != NULL )
   5.119          free(mmu);
   5.120 -    if ( pm_handle != NULL )
   5.121 -        (void)close_pfn_mapper(pm_handle);
   5.122      if ( page_array == NULL )
   5.123          free(page_array);
   5.124      return -1;
   5.125 @@ -413,7 +418,7 @@ myseek(gzFile gfd, off_t offset, int whe
   5.126  #define IS_BSS(p) (p.p_filesz < p.p_memsz)
   5.127  
   5.128  static int
   5.129 -loadelfimage(gzFile kernel_gfd, void *pm_handle, unsigned long *page_array,
   5.130 +loadelfimage(gzFile kernel_gfd, int xch, u32 dom, unsigned long *page_array,
   5.131               unsigned long tot_pages, unsigned long *virt_load_addr,
   5.132               unsigned long *ksize, unsigned long *symtab_addr,
   5.133               unsigned long *symtab_len)
   5.134 @@ -512,9 +517,9 @@ loadelfimage(gzFile kernel_gfd, void *pm
   5.135                      goto out;
   5.136                  }
   5.137                  curpos += c;
   5.138 -                vaddr = map_pfn_writeable(pm_handle, 
   5.139 -                                          page_array[(iva - *virt_load_addr)
   5.140 -                                                    >> PAGE_SHIFT]);
   5.141 +                vaddr = xc_map_foreign_range(
   5.142 +                    xch, dom, PAGE_SIZE, PROT_WRITE, 
   5.143 +                    page_array[(iva - *virt_load_addr) >> PAGE_SHIFT]);
   5.144                  if ( vaddr == NULL )
   5.145                  {
   5.146                      ERROR("Couldn't map guest memory");
   5.147 @@ -523,7 +528,7 @@ loadelfimage(gzFile kernel_gfd, void *pm
   5.148                  DPRINTF(("copy page %p to %p, count 0x%x\n", (void *)iva,
   5.149                           vaddr + (iva & (PAGE_SIZE - 1)), c));
   5.150                  memcpy(vaddr + (iva & (PAGE_SIZE - 1)), page, c);
   5.151 -                unmap_pfn(pm_handle, vaddr);
   5.152 +                munmap(vaddr, PAGE_SIZE);
   5.153              }
   5.154  
   5.155              if ( phdr[h].p_vaddr + phdr[h].p_filesz > maxva )
   5.156 @@ -621,9 +626,9 @@ loadelfimage(gzFile kernel_gfd, void *pm
   5.157                  }
   5.158                  curpos += c;
   5.159  
   5.160 -                vaddr = map_pfn_writeable(pm_handle, 
   5.161 -                                          page_array[(maxva - *virt_load_addr)
   5.162 -                                                    >> PAGE_SHIFT]);
   5.163 +                vaddr = xc_map_foreign_range(
   5.164 +                    xch, dom, PAGE_SIZE, PROT_WRITE,
   5.165 +                    page_array[(maxva - *virt_load_addr) >> PAGE_SHIFT]);
   5.166                  if ( vaddr == NULL )
   5.167                  {
   5.168                      ERROR("Couldn't map guest memory");
   5.169 @@ -632,7 +637,7 @@ loadelfimage(gzFile kernel_gfd, void *pm
   5.170                  DPRINTF(("copy page %p to %p, count 0x%x\n", (void *)maxva,
   5.171                           vaddr + (maxva & (PAGE_SIZE - 1)), c));
   5.172                  memcpy(vaddr + (maxva & (PAGE_SIZE - 1)), page, c);
   5.173 -                unmap_pfn(pm_handle, vaddr);
   5.174 +                munmap(vaddr, PAGE_SIZE);
   5.175              }
   5.176  
   5.177              *symtab_len += shdr[h].sh_size;
   5.178 @@ -668,9 +673,9 @@ loadelfimage(gzFile kernel_gfd, void *pm
   5.179          c = PAGE_SIZE - (symva & (PAGE_SIZE - 1));
   5.180          if ( c > s - i )
   5.181              c = s - i;
   5.182 -        vaddr = map_pfn_writeable(pm_handle, 
   5.183 -                                  page_array[(symva - *virt_load_addr)
   5.184 -                                            >> PAGE_SHIFT]);
   5.185 +        vaddr = xc_map_foreign_range(
   5.186 +            xch, dom, PAGE_SIZE, PROT_WRITE,
   5.187 +            page_array[(symva - *virt_load_addr) >> PAGE_SHIFT]);
   5.188          if ( vaddr == NULL )
   5.189          {
   5.190              ERROR("Couldn't map guest memory");
   5.191 @@ -678,9 +683,8 @@ loadelfimage(gzFile kernel_gfd, void *pm
   5.192          }
   5.193          DPRINTF(("copy page %p to %p, count 0x%x\n", (void *)symva,
   5.194                   vaddr + (symva & (PAGE_SIZE - 1)), c));
   5.195 -        memcpy(vaddr + (symva & (PAGE_SIZE - 1)), p + i,
   5.196 -               c);
   5.197 -        unmap_pfn(pm_handle, vaddr);
   5.198 +        memcpy(vaddr + (symva & (PAGE_SIZE - 1)), p + i, c);
   5.199 +        munmap(vaddr, PAGE_SIZE);
   5.200      }
   5.201  
   5.202      *symtab_len = maxva - *symtab_addr;
     6.1 --- a/tools/libxc/xc_private.c	Fri Oct 01 16:31:30 2004 +0000
     6.2 +++ b/tools/libxc/xc_private.c	Sat Oct 02 14:21:42 2004 +0000
     6.3 @@ -6,133 +6,6 @@
     6.4  
     6.5  #include "xc_private.h"
     6.6  
     6.7 -#define MAX_EXTENTS 8
     6.8 -typedef struct {
     6.9 -    int fd;
    6.10 -    struct {
    6.11 -        void         *base; 
    6.12 -        unsigned long length;
    6.13 -    } extent[MAX_EXTENTS];
    6.14 -} mapper_desc_t;
    6.15 -
    6.16 -void *init_pfn_mapper(domid_t domid)
    6.17 -{
    6.18 -    int            fd = open("/dev/mem", O_RDWR);
    6.19 -    mapper_desc_t *desc;
    6.20 -
    6.21 -    if ( fd < 0 )
    6.22 -        return NULL;
    6.23 -
    6.24 -    if ( (desc = malloc(sizeof(*desc))) == NULL )
    6.25 -    {
    6.26 -        close(fd);
    6.27 -        return NULL;
    6.28 -    }
    6.29 -
    6.30 -    (void)ioctl(fd, _IO('M', 1), (unsigned long)domid);
    6.31 -
    6.32 -    memset(desc, 0, sizeof(*desc));
    6.33 -    desc->fd = fd;
    6.34 -
    6.35 -    return desc;
    6.36 -}
    6.37 -
    6.38 -int close_pfn_mapper(void *pm_handle)
    6.39 -{
    6.40 -    mapper_desc_t *desc = pm_handle;
    6.41 -    int            i;
    6.42 -
    6.43 -    for ( i = 0; i < MAX_EXTENTS; i++ )
    6.44 -    {
    6.45 -        if ( desc->extent[i].base != NULL )
    6.46 -            (void)munmap(desc->extent[i].base, desc->extent[i].length);
    6.47 -    }
    6.48 -
    6.49 -    close(desc->fd);
    6.50 -    free(desc);
    6.51 -
    6.52 -    return 0;
    6.53 -}
    6.54 -
    6.55 -static int get_free_offset(mapper_desc_t *desc)
    6.56 -{
    6.57 -    int i;
    6.58 -
    6.59 -    for ( i = 0; i < MAX_EXTENTS; i++ )
    6.60 -    {
    6.61 -        if ( desc->extent[i].base == NULL )
    6.62 -            break;
    6.63 -    }
    6.64 -
    6.65 -    if ( i == MAX_EXTENTS )
    6.66 -    {
    6.67 -        fprintf(stderr, "Extent overflow in map_pfn_*()!\n");
    6.68 -        fflush(stderr);
    6.69 -        *(int*)0=0; /* XXX */
    6.70 -    }
    6.71 -
    6.72 -    return i;
    6.73 -}
    6.74 -
    6.75 -void *map_pfn_writeable(void *pm_handle, unsigned long pfn)
    6.76 -{
    6.77 -    mapper_desc_t *desc = pm_handle;
    6.78 -    void          *vaddr;
    6.79 -    int            off;
    6.80 -
    6.81 -    vaddr = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE,
    6.82 -                 MAP_SHARED, desc->fd, pfn << PAGE_SHIFT);
    6.83 -    if ( vaddr == MAP_FAILED )
    6.84 -        return NULL;
    6.85 -
    6.86 -    off = get_free_offset(desc);
    6.87 -    desc->extent[off].base   = vaddr;
    6.88 -    desc->extent[off].length = PAGE_SIZE;
    6.89 -
    6.90 -    return vaddr;
    6.91 -}
    6.92 -
    6.93 -void *map_pfn_readonly(void *pm_handle, unsigned long pfn)
    6.94 -{
    6.95 -    mapper_desc_t *desc = pm_handle;
    6.96 -    void          *vaddr;
    6.97 -    int            off;
    6.98 -
    6.99 -    vaddr = mmap(NULL, PAGE_SIZE, PROT_READ,
   6.100 -                 MAP_SHARED, desc->fd, pfn << PAGE_SHIFT);
   6.101 -    if ( vaddr == MAP_FAILED )
   6.102 -        return NULL;
   6.103 -
   6.104 -    off = get_free_offset(desc);
   6.105 -    desc->extent[off].base   = vaddr;
   6.106 -    desc->extent[off].length = PAGE_SIZE;
   6.107 -
   6.108 -    return vaddr;
   6.109 -}
   6.110 -
   6.111 -void unmap_pfn(void *pm_handle, void *vaddr)
   6.112 -{
   6.113 -    mapper_desc_t *desc = pm_handle;
   6.114 -    int            i;
   6.115 -    unsigned long  len = 0;
   6.116 -
   6.117 -    for ( i = 0; i < MAX_EXTENTS; i++ )
   6.118 -    {
   6.119 -        if ( desc->extent[i].base == vaddr )
   6.120 -        {
   6.121 -            desc->extent[i].base = NULL;
   6.122 -            len = desc->extent[i].length;
   6.123 -        }
   6.124 -    }
   6.125 -
   6.126 -    if ( len == 0 )
   6.127 -        *(int*)0 = 0; /* XXX */
   6.128 -
   6.129 -    (void)munmap(vaddr, len);
   6.130 -}
   6.131 -
   6.132 -/*******************/
   6.133 -
   6.134  void *xc_map_foreign_batch(int xc_handle, u32 dom, int prot,
   6.135                             unsigned long *arr, int num )
   6.136  {
     7.1 --- a/tools/libxc/xc_private.h	Fri Oct 01 16:31:30 2004 +0000
     7.2 +++ b/tools/libxc/xc_private.h	Sat Oct 02 14:21:42 2004 +0000
     7.3 @@ -144,11 +144,6 @@ static inline int do_dom_mem_op(int     
     7.4  /*
     7.5   * PFN mapping.
     7.6   */
     7.7 -void *init_pfn_mapper(domid_t domid);
     7.8 -int close_pfn_mapper(void *pm_handle);
     7.9 -void *map_pfn_writeable(void *pm_handle, unsigned long pfn);
    7.10 -void *map_pfn_readonly(void *pm_handle, unsigned long pfn);
    7.11 -void unmap_pfn(void *pm_handle, void *vaddr);
    7.12  int get_pfn_type_batch(int xc_handle, u32 dom, int num, unsigned long *arr);
    7.13  unsigned long csum_page (void * page);
    7.14  
     8.1 --- a/tools/xentrace/xentrace.c	Fri Oct 01 16:31:30 2004 +0000
     8.2 +++ b/tools/xentrace/xentrace.c	Sat Oct 02 14:21:42 2004 +0000
     8.3 @@ -120,8 +120,7 @@ void get_tbufs(unsigned long *mach_addr,
     8.4   * @num:       number of trace buffers to map
     8.5   * @size:      size of each trace buffer
     8.6   *
     8.7 - * Maps the Xen trace buffers them into process address space by memory mapping
     8.8 - * /dev/mem.  Returns the location the buffers have been mapped to.
     8.9 + * Maps the Xen trace buffers them into process address space.
    8.10   */
    8.11  struct t_buf *map_tbufs(unsigned long tbufs_mach, unsigned int num,
    8.12                          unsigned long size)