ia64/xen-unstable

changeset 6031:8004acaa6684

Merge
author kaf24@firebug.cl.cam.ac.uk
date Thu Aug 04 16:53:30 2005 +0000 (2005-08-04)
parents 43f424818d6e 9f0eff879d89
children ebf05456ee11
files linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c linux-2.6-xen-sparse/drivers/xen/blktap/blktap.h linux-2.6-xen-sparse/drivers/xen/blktap/blktap_datapath.c linux-2.6-xen-sparse/drivers/xen/blktap/blktap_userdev.c tools/blktap/blktaplib.c tools/blktap/parallax/Makefile xen/arch/x86/x86_32/entry.S xen/arch/x86/x86_64/entry.S xen/include/public/io/blkif.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c	Thu Aug 04 16:53:11 2005 +0000
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c	Thu Aug 04 16:53:30 2005 +0000
     1.3 @@ -137,7 +137,7 @@ static vdisk_t *xlvbd_probe(int *ret)
     1.4      blkif_control_probe_send(&req, &rsp,
     1.5                               (unsigned long)(virt_to_machine(buf)));
     1.6  #else
     1.7 -    req.frame_and_sects[0] = blkif_fas(virt_to_machine(buf), 0, ((PAGE_SIZE/512)-1);
     1.8 +    req.frame_and_sects[0] = blkif_fas(virt_to_machine(buf), 0, (PAGE_SIZE/512)-1);
     1.9  
    1.10      blkif_control_send(&req, &rsp);
    1.11  #endif
     2.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.h	Thu Aug 04 16:53:11 2005 +0000
     2.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.h	Thu Aug 04 16:53:30 2005 +0000
     2.3 @@ -103,8 +103,6 @@ typedef struct {
     2.4      blkif_t       *blkif;
     2.5      unsigned long  id;
     2.6      int            nr_pages;
     2.7 -    unsigned long  mach_fas[BLKIF_MAX_SEGMENTS_PER_REQUEST];
     2.8 -    unsigned long  virt_fas[BLKIF_MAX_SEGMENTS_PER_REQUEST];
     2.9      int            next_free;
    2.10  } active_req_t;
    2.11  
    2.12 @@ -172,32 +170,7 @@ static inline int BLKTAP_MODE_VALID(unsi
    2.13  
    2.14  
    2.15  /* -------[ Mappings to User VMA ]------------------------------------ */
    2.16 -#define MAX_PENDING_REQS 64
    2.17  #define BATCH_PER_DOMAIN 16
    2.18 -extern struct vm_area_struct *blktap_vma;
    2.19 -
    2.20 -/* The following are from blkback.c and should probably be put in a
    2.21 - * header and included from there.
    2.22 - * The mmap area described here is where attached data pages eill be mapped.
    2.23 - */
    2.24 - 
    2.25 -extern unsigned long mmap_vstart;
    2.26 -#define MMAP_PAGES_PER_REQUEST \
    2.27 -    (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1)
    2.28 -#define MMAP_PAGES             \
    2.29 -    (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST)
    2.30 -#define MMAP_VADDR(_req,_seg)                        \
    2.31 -    (mmap_vstart +                                   \
    2.32 -     ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
    2.33 -     ((_seg) * PAGE_SIZE))
    2.34 -
    2.35 -/* immediately before the mmap area, we have a bunch of pages reserved
    2.36 - * for shared memory rings.
    2.37 - */
    2.38 -
    2.39 -#define RING_PAGES 3 /* Ctrl, Front, and Back */ 
    2.40 -extern unsigned long rings_vstart;
    2.41 -
    2.42  
    2.43  /* -------[ Here be globals ]----------------------------------------- */
    2.44  extern unsigned long blktap_mode;
     3.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap_datapath.c	Thu Aug 04 16:53:11 2005 +0000
     3.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap_datapath.c	Thu Aug 04 16:53:30 2005 +0000
     3.3 @@ -280,8 +280,6 @@ static int do_block_io_op(blkif_t *blkif
     3.4      int more_to_do = 0;
     3.5      int notify_be = 0, notify_user = 0;
     3.6      
     3.7 -    if (NR_ACTIVE_REQS == MAX_ACTIVE_REQS) return 1;
     3.8 -    
     3.9      /* lock both rings */
    3.10      spin_lock_irqsave(&blkif_io_lock, flags);
    3.11  
     4.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap_userdev.c	Thu Aug 04 16:53:11 2005 +0000
     4.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap_userdev.c	Thu Aug 04 16:53:30 2005 +0000
     4.3 @@ -19,6 +19,7 @@
     4.4  #include <linux/gfp.h>
     4.5  #include <linux/poll.h>
     4.6  #include <asm/pgalloc.h>
     4.7 +#include <asm/tlbflush.h>
     4.8  #include <asm-xen/xen-public/io/blkif.h> /* for control ring. */
     4.9  
    4.10  #include "blktap.h"
    4.11 @@ -33,11 +34,6 @@ unsigned long blktap_ring_ok; /* make th
    4.12  /* for poll: */
    4.13  static wait_queue_head_t blktap_wait;
    4.14  
    4.15 -/* Where things are inside the device mapping. */
    4.16 -struct vm_area_struct *blktap_vma = NULL;
    4.17 -unsigned long mmap_vstart;
    4.18 -unsigned long rings_vstart;
    4.19 -
    4.20  /* Rings up to user space. */
    4.21  static blkif_front_ring_t blktap_ufe_ring;
    4.22  static blkif_back_ring_t  blktap_ube_ring;
    4.23 @@ -47,6 +43,39 @@ static ctrl_front_ring_t  blktap_uctrl_r
    4.24  static int blktap_read_fe_ring(void);
    4.25  static int blktap_read_be_ring(void);
    4.26  
    4.27 +/* -------[ mmap region ]--------------------------------------------- */
    4.28 +/*
    4.29 + * We use a big chunk of address space to map in-flight requests into,
    4.30 + * and export this region up to user-space.  See the comments in blkback
    4.31 + * about this -- the two must be kept in sync if the tap is used as a 
    4.32 + * passthrough.
    4.33 + */
    4.34 +
    4.35 +#define MAX_PENDING_REQS 64
    4.36 +
    4.37 +/* immediately before the mmap area, we have a bunch of pages reserved
    4.38 + * for shared memory rings.
    4.39 + */
    4.40 +#define RING_PAGES 3 /* Ctrl, Front, and Back */ 
    4.41 +
    4.42 +/* Where things are inside the device mapping. */
    4.43 +struct vm_area_struct *blktap_vma = NULL;
    4.44 +unsigned long mmap_vstart;  /* Kernel pages for mapping in data. */
    4.45 +unsigned long rings_vstart; /* start of mmaped vma               */
    4.46 +unsigned long user_vstart;  /* start of user mappings            */
    4.47 +
    4.48 +#define MMAP_PAGES_PER_REQUEST \
    4.49 +    (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1)
    4.50 +#define MMAP_PAGES             \
    4.51 +    (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST)
    4.52 +#define MMAP_VADDR(_start, _req,_seg)                \
    4.53 +    ( _start +                                       \
    4.54 +     ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
    4.55 +     ((_seg) * PAGE_SIZE))
    4.56 +
    4.57 +
    4.58 +
    4.59 +
    4.60  /* -------[ blktap vm ops ]------------------------------------------- */
    4.61  
    4.62  static struct page *blktap_nopage(struct vm_area_struct *vma,
    4.63 @@ -76,8 +105,6 @@ static int blktap_open(struct inode *ino
    4.64      
    4.65      if ( test_and_set_bit(0, &blktap_dev_inuse) )
    4.66          return -EBUSY;
    4.67 -
    4.68 -    printk(KERN_ALERT "blktap open.\n");
    4.69      
    4.70      /* Allocate the ctrl ring. */
    4.71      csring = (ctrl_sring_t *)get_zeroed_page(GFP_KERNEL);
    4.72 @@ -128,7 +155,7 @@ static int blktap_release(struct inode *
    4.73      blktap_dev_inuse = 0;
    4.74      blktap_ring_ok = 0;
    4.75  
    4.76 -    printk(KERN_ALERT "blktap closed.\n");
    4.77 +    DPRINTK(KERN_ALERT "blktap closed.\n");
    4.78  
    4.79      /* Free the ring page. */
    4.80      ClearPageReserved(virt_to_page(blktap_uctrl_ring.sring));
    4.81 @@ -140,7 +167,7 @@ static int blktap_release(struct inode *
    4.82      ClearPageReserved(virt_to_page(blktap_ube_ring.sring));
    4.83      free_page((unsigned long) blktap_ube_ring.sring);
    4.84  
    4.85 -    /* Clear any active mappings. */
    4.86 +    /* Clear any active mappings and free foreign map table */
    4.87      if (blktap_vma != NULL) {
    4.88          zap_page_range(blktap_vma, blktap_vma->vm_start, 
    4.89                         blktap_vma->vm_end - blktap_vma->vm_start, NULL);
    4.90 @@ -151,21 +178,36 @@ static int blktap_release(struct inode *
    4.91  }
    4.92  
    4.93  /* Note on mmap:
    4.94 - * remap_pfn_range sets VM_IO on vma->vm_flags.  In trying to make libaio
    4.95 - * work to do direct page access from userspace, this ended up being a
    4.96 - * problem.  The bigger issue seems to be that there is no way to map
    4.97 - * a foreign page in to user space and have the virtual address of that 
    4.98 - * page map sanely down to a mfn.
    4.99 - * Removing the VM_IO flag results in a loop in get_user_pages, as 
   4.100 - * pfn_valid() always fails on a foreign page.
   4.101 + * We need to map pages to user space in a way that will allow the block
   4.102 + * subsystem set up direct IO to them.  This couldn't be done before, because
   4.103 + * there isn't really a sane way to make a user virtual address down to a 
   4.104 + * physical address when the page belongs to another domain.
   4.105 + *
   4.106 + * My first approach was to map the page in to kernel memory, add an entry
   4.107 + * for it in the physical frame list (using alloc_lomem_region as in blkback)
   4.108 + * and then attempt to map that page up to user space.  This is disallowed
   4.109 + * by xen though, which realizes that we don't really own the machine frame
   4.110 + * underlying the physical page.
   4.111 + *
   4.112 + * The new approach is to provide explicit support for this in xen linux.
   4.113 + * The VMA now has a flag, VM_FOREIGN, to indicate that it contains pages
   4.114 + * mapped from other vms.  vma->vm_private_data is set up as a mapping 
   4.115 + * from pages to actual page structs.  There is a new clause in get_user_pages
   4.116 + * that does the right thing for this sort of mapping.
   4.117 + * 
   4.118 + * blktap_mmap sets up this mapping.  Most of the real work is done in
   4.119 + * blktap_write_fe_ring below.
   4.120   */
   4.121  static int blktap_mmap(struct file *filp, struct vm_area_struct *vma)
   4.122  {
   4.123      int size;
   4.124 +    struct page **map;
   4.125 +    int i;
   4.126  
   4.127 -    printk(KERN_ALERT "blktap mmap (%lx, %lx)\n",
   4.128 +    DPRINTK(KERN_ALERT "blktap mmap (%lx, %lx)\n",
   4.129             vma->vm_start, vma->vm_end);
   4.130  
   4.131 +    vma->vm_flags |= VM_RESERVED;
   4.132      vma->vm_ops = &blktap_vm_ops;
   4.133  
   4.134      size = vma->vm_end - vma->vm_start;
   4.135 @@ -177,10 +219,10 @@ static int blktap_mmap(struct file *filp
   4.136      }
   4.137  
   4.138      size >>= PAGE_SHIFT;
   4.139 -    printk(KERN_INFO "blktap: 2 rings + %d pages.\n", size-1);
   4.140 +    DPRINTK(KERN_INFO "blktap: 2 rings + %d pages.\n", size-1);
   4.141      
   4.142      rings_vstart = vma->vm_start;
   4.143 -    mmap_vstart  = rings_vstart + (RING_PAGES << PAGE_SHIFT);
   4.144 +    user_vstart  = rings_vstart + (RING_PAGES << PAGE_SHIFT);
   4.145      
   4.146      /* Map the ring pages to the start of the region and reserve it. */
   4.147  
   4.148 @@ -190,29 +232,44 @@ static int blktap_mmap(struct file *filp
   4.149      DPRINTK("Mapping ctrl_ring page %lx.\n", __pa(blktap_uctrl_ring.sring));
   4.150      if (remap_pfn_range(vma, vma->vm_start, 
   4.151                           __pa(blktap_uctrl_ring.sring) >> PAGE_SHIFT, 
   4.152 -                         PAGE_SIZE, vma->vm_page_prot)) {
   4.153 -        WPRINTK("ctrl_ring: remap_pfn_range failure!\n");
   4.154 -    }
   4.155 +                         PAGE_SIZE, vma->vm_page_prot)) 
   4.156 +        goto fail;
   4.157  
   4.158  
   4.159      DPRINTK("Mapping be_ring page %lx.\n", __pa(blktap_ube_ring.sring));
   4.160      if (remap_pfn_range(vma, vma->vm_start + PAGE_SIZE, 
   4.161                           __pa(blktap_ube_ring.sring) >> PAGE_SHIFT, 
   4.162 -                         PAGE_SIZE, vma->vm_page_prot)) {
   4.163 -        WPRINTK("be_ring: remap_pfn_range failure!\n");
   4.164 -    }
   4.165 +                         PAGE_SIZE, vma->vm_page_prot)) 
   4.166 +        goto fail;
   4.167  
   4.168      DPRINTK("Mapping fe_ring page %lx.\n", __pa(blktap_ufe_ring.sring));
   4.169      if (remap_pfn_range(vma, vma->vm_start + ( 2 * PAGE_SIZE ), 
   4.170                           __pa(blktap_ufe_ring.sring) >> PAGE_SHIFT, 
   4.171 -                         PAGE_SIZE, vma->vm_page_prot)) {
   4.172 -        WPRINTK("fe_ring: remap_pfn_range failure!\n");
   4.173 -    }
   4.174 -            
   4.175 +                         PAGE_SIZE, vma->vm_page_prot)) 
   4.176 +        goto fail;
   4.177 +
   4.178 +    /* Mark this VM as containing foreign pages, and set up mappings. */
   4.179 +    map = kmalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
   4.180 +                  * sizeof(struct page_struct*),
   4.181 +                  GFP_KERNEL);
   4.182 +    if (map == NULL) goto fail;
   4.183 +
   4.184 +    for (i=0; i<((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
   4.185 +        map[i] = NULL;
   4.186 +    
   4.187 +    vma->vm_private_data = map;
   4.188 +    vma->vm_flags |= VM_FOREIGN;
   4.189 +
   4.190      blktap_vma = vma;
   4.191      blktap_ring_ok = 1;
   4.192  
   4.193      return 0;
   4.194 + fail:
   4.195 +    /* Clear any active mappings. */
   4.196 +    zap_page_range(vma, vma->vm_start, 
   4.197 +                   vma->vm_end - vma->vm_start, NULL);
   4.198 +
   4.199 +    return -ENOMEM;
   4.200  }
   4.201  
   4.202  static int blktap_ioctl(struct inode *inode, struct file *filp,
   4.203 @@ -263,6 +320,8 @@ static unsigned int blktap_poll(struct f
   4.204               RING_HAS_UNPUSHED_REQUESTS(&blktap_ufe_ring)   ||
   4.205               RING_HAS_UNPUSHED_RESPONSES(&blktap_ube_ring) ) {
   4.206  
   4.207 +            flush_tlb_all();
   4.208 +
   4.209              RING_PUSH_REQUESTS(&blktap_uctrl_ring);
   4.210              RING_PUSH_REQUESTS(&blktap_ufe_ring);
   4.211              RING_PUSH_RESPONSES(&blktap_ube_ring);
   4.212 @@ -290,10 +349,35 @@ static struct file_operations blktap_fop
   4.213  /*-----[ Data to/from user space ]----------------------------------------*/
   4.214  
   4.215  
   4.216 +static void fast_flush_area(int idx, int nr_pages)
   4.217 +{
   4.218 +    multicall_entry_t mcl[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   4.219 +    int               i;
   4.220 +
   4.221 +    for ( i = 0; i < nr_pages; i++ )
   4.222 +    {
   4.223 +        MULTI_update_va_mapping(mcl+i, MMAP_VADDR(mmap_vstart, idx, i),
   4.224 +                                __pte(0), 0);
   4.225 +    }
   4.226 +
   4.227 +    mcl[nr_pages-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
   4.228 +    if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) )
   4.229 +        BUG();
   4.230 +}
   4.231 +
   4.232 +
   4.233 +extern int __direct_remap_area_pages(struct mm_struct *mm,
   4.234 +                                     unsigned long address,
   4.235 +                                     unsigned long size,
   4.236 +                                     mmu_update_t *v);
   4.237 +
   4.238  int blktap_write_fe_ring(blkif_request_t *req)
   4.239  {
   4.240      blkif_request_t *target;
   4.241 -    int error, i;
   4.242 +    int i;
   4.243 +    unsigned long remap_prot;
   4.244 +    multicall_entry_t mcl[BLKIF_MAX_SEGMENTS_PER_REQUEST+1];
   4.245 +    mmu_update_t mmu[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   4.246  
   4.247      /*
   4.248       * This is called to pass a request from the real frontend domain's
   4.249 @@ -310,26 +394,81 @@ int blktap_write_fe_ring(blkif_request_t
   4.250          return 0;
   4.251      }
   4.252  
   4.253 -    target = RING_GET_REQUEST(&blktap_ufe_ring,
   4.254 -            blktap_ufe_ring.req_prod_pvt);
   4.255 +    remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW;
   4.256 +    flush_cache_all(); /* a noop on intel... */
   4.257 +
   4.258 +    target = RING_GET_REQUEST(&blktap_ufe_ring, blktap_ufe_ring.req_prod_pvt);
   4.259      memcpy(target, req, sizeof(*req));
   4.260  
   4.261 -    /* Attempt to map the foreign pages directly in to the application */
   4.262 +    /* Map the foreign pages directly in to the application */
   4.263      for (i=0; i<target->nr_segments; i++) {
   4.264 +        unsigned long buf;
   4.265 +        unsigned long uvaddr;
   4.266 +        unsigned long kvaddr;
   4.267 +        unsigned long offset;
   4.268 +
   4.269 +        buf   = target->frame_and_sects[i] & PAGE_MASK;
   4.270 +        uvaddr = MMAP_VADDR(user_vstart, ID_TO_IDX(req->id), i);
   4.271 +        kvaddr = MMAP_VADDR(mmap_vstart, ID_TO_IDX(req->id), i);
   4.272 +
   4.273 +        MULTI_update_va_mapping_otherdomain(
   4.274 +            mcl+i, 
   4.275 +            kvaddr, 
   4.276 +            pfn_pte_ma(buf >> PAGE_SHIFT, __pgprot(remap_prot)),
   4.277 +            0,
   4.278 +            ID_TO_DOM(req->id));
   4.279 +
   4.280 +        phys_to_machine_mapping[__pa(kvaddr)>>PAGE_SHIFT] =
   4.281 +            FOREIGN_FRAME(buf >> PAGE_SHIFT);
   4.282  
   4.283 -        error = direct_remap_area_pages(blktap_vma->vm_mm, 
   4.284 -                                        MMAP_VADDR(ID_TO_IDX(req->id), i), 
   4.285 -                                        target->frame_and_sects[i] & PAGE_MASK,
   4.286 -                                        PAGE_SIZE,
   4.287 -                                        blktap_vma->vm_page_prot,
   4.288 -                                        ID_TO_DOM(req->id));
   4.289 -        if ( error != 0 ) {
   4.290 -            printk(KERN_INFO "remapping attached page failed! (%d)\n", error);
   4.291 -            /* the request is now dropped on the floor. */
   4.292 -            return 0;
   4.293 +        __direct_remap_area_pages(blktap_vma->vm_mm,
   4.294 +                                  uvaddr,
   4.295 +                                  PAGE_SIZE,
   4.296 +                                  &mmu[i]);
   4.297 +        mmu[i].val = (target->frame_and_sects[i] & PAGE_MASK)
   4.298 +            | pgprot_val(blktap_vma->vm_page_prot);
   4.299 +
   4.300 +        offset = (uvaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
   4.301 +        ((struct page **)blktap_vma->vm_private_data)[offset] =
   4.302 +            pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
   4.303 +    }
   4.304 +    
   4.305 +    /* Add the mmu_update call. */
   4.306 +    mcl[i].op = __HYPERVISOR_mmu_update;
   4.307 +    mcl[i].args[0] = (unsigned long)mmu;
   4.308 +    mcl[i].args[1] = target->nr_segments;
   4.309 +    mcl[i].args[2] = 0;
   4.310 +    mcl[i].args[3] = ID_TO_DOM(req->id);
   4.311 +
   4.312 +    BUG_ON(HYPERVISOR_multicall(mcl, target->nr_segments+1) != 0);
   4.313 +
   4.314 +    /* Make sure it all worked. */
   4.315 +    for ( i = 0; i < target->nr_segments; i++ )
   4.316 +    {
   4.317 +        if ( unlikely(mcl[i].result != 0) )
   4.318 +        {
   4.319 +            DPRINTK("invalid buffer -- could not remap it\n");
   4.320 +            fast_flush_area(ID_TO_IDX(req->id), target->nr_segments);
   4.321 +            return -1;
   4.322          }
   4.323      }
   4.324 -    
   4.325 +    if ( unlikely(mcl[i].result != 0) )
   4.326 +    {
   4.327 +        DPRINTK("direct remapping of pages to /dev/blktap failed.\n");
   4.328 +        return -1;
   4.329 +    }
   4.330 +
   4.331 +
   4.332 +    /* Mark mapped pages as reserved: */
   4.333 +    for ( i = 0; i < target->nr_segments; i++ )
   4.334 +    {
   4.335 +        unsigned long kvaddr;
   4.336 +
   4.337 +        kvaddr = MMAP_VADDR(mmap_vstart, ID_TO_IDX(req->id), i);
   4.338 +        SetPageReserved(pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT));
   4.339 +    }
   4.340 +
   4.341 +
   4.342      blktap_ufe_ring.req_prod_pvt++;
   4.343      
   4.344      return 0;
   4.345 @@ -366,7 +505,7 @@ static int blktap_read_fe_ring(void)
   4.346  {
   4.347      /* This is called to read responses from the UFE ring. */
   4.348  
   4.349 -    RING_IDX i, rp;
   4.350 +    RING_IDX i, j, rp;
   4.351      blkif_response_t *resp_s;
   4.352      blkif_t *blkif;
   4.353      active_req_t *ar;
   4.354 @@ -387,8 +526,23 @@ static int blktap_read_fe_ring(void)
   4.355              DPRINTK("resp->fe_ring\n");
   4.356              ar = lookup_active_req(ID_TO_IDX(resp_s->id));
   4.357              blkif = ar->blkif;
   4.358 -            zap_page_range(blktap_vma, MMAP_VADDR(ID_TO_IDX(resp_s->id), 0), 
   4.359 +            for (j = 0; j < ar->nr_pages; j++) {
   4.360 +                unsigned long vaddr;
   4.361 +                struct page **map = blktap_vma->vm_private_data;
   4.362 +                int offset; 
   4.363 +
   4.364 +                vaddr  = MMAP_VADDR(user_vstart, ID_TO_IDX(resp_s->id), j);
   4.365 +                offset = (vaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
   4.366 +
   4.367 +                ClearPageReserved(virt_to_page(vaddr));
   4.368 +                map[offset] = NULL;
   4.369 +            }
   4.370 +
   4.371 +
   4.372 +            zap_page_range(blktap_vma, 
   4.373 +                    MMAP_VADDR(user_vstart, ID_TO_IDX(resp_s->id), 0), 
   4.374                      ar->nr_pages << PAGE_SHIFT, NULL);
   4.375 +            fast_flush_area(ID_TO_IDX(resp_s->id), ar->nr_pages);
   4.376              write_resp_to_fe_ring(blkif, resp_s);
   4.377              blktap_ufe_ring.rsp_cons = i + 1;
   4.378              kick_fe_domain(blkif);
   4.379 @@ -464,6 +618,9 @@ int blktap_init(void)
   4.380  {
   4.381      int err;
   4.382  
   4.383 +    if ( (mmap_vstart = allocate_empty_lowmem_region(MMAP_PAGES)) == 0 )
   4.384 +        BUG();
   4.385 +
   4.386      err = misc_register(&blktap_miscdev);
   4.387      if ( err != 0 )
   4.388      {
     5.1 --- a/tools/blktap/blktaplib.c	Thu Aug 04 16:53:11 2005 +0000
     5.2 +++ b/tools/blktap/blktaplib.c	Thu Aug 04 16:53:30 2005 +0000
     5.3 @@ -34,7 +34,7 @@
     5.4  #else
     5.5  #define DPRINTF(_f, _a...) ((void)0)
     5.6  #endif
     5.7 -#define DEBUG_RING_IDXS 0
     5.8 +#define DEBUG_RING_IDXS 1
     5.9  
    5.10  #define POLLRDNORM     0x040 
    5.11  
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/tools/blktap/parallax/Makefile	Thu Aug 04 16:53:30 2005 +0000
     6.3 @@ -0,0 +1,64 @@
     6.4 +XEN_ROOT = ../../..
     6.5 +include $(XEN_ROOT)/tools/Rules.mk
     6.6 +
     6.7 +PARALLAX_INSTALL_DIR	= /usr/sbin
     6.8 +
     6.9 +INSTALL         = install
    6.10 +INSTALL_PROG    = $(INSTALL) -m0755
    6.11 +INSTALL_DIR     = $(INSTALL) -d -m0755
    6.12 +
    6.13 +INCLUDES += -I.. -I/usr/include -I $(XEN_LIBXC)
    6.14 +
    6.15 +LDFLAGS = -L.. -lpthread -lz -lblktap
    6.16 +
    6.17 +#PLX_SRCS := 
    6.18 +PLX_SRCS := vdi.c 
    6.19 +PLX_SRCS += radix.c 
    6.20 +PLX_SRCS += snaplog.c
    6.21 +PLX_SRCS += blockstore.c 
    6.22 +PLX_SRCS += block-async.c
    6.23 +PLX_SRCS += requests-async.c
    6.24 +VDI_SRCS := $(PLX_SRCS)
    6.25 +PLX_SRCS += parallax.c
    6.26 +
    6.27 +#VDI_TOOLS :=
    6.28 +VDI_TOOLS := vdi_create
    6.29 +VDI_TOOLS += vdi_list
    6.30 +VDI_TOOLS += vdi_snap
    6.31 +VDI_TOOLS += vdi_snap_list
    6.32 +VDI_TOOLS += vdi_snap_delete
    6.33 +VDI_TOOLS += vdi_fill
    6.34 +VDI_TOOLS += vdi_tree
    6.35 +VDI_TOOLS += vdi_validate
    6.36 +
    6.37 +CFLAGS   += -Wall
    6.38 +CFLAGS   += -Werror
    6.39 +CFLAGS   += -Wno-unused
    6.40 +#CFLAGS   += -O3
    6.41 +CFLAGS   += -g3
    6.42 +CFLAGS   += -fno-strict-aliasing
    6.43 +CFLAGS   += $(INCLUDES)
    6.44 +CFLAGS   += -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE
    6.45 +# Get gcc to generate the dependencies for us.
    6.46 +CFLAGS   += -Wp,-MD,.$(@F).d
    6.47 +DEPS     = .*.d
    6.48 +
    6.49 +OBJS     = $(patsubst %.c,%.o,$(SRCS))
    6.50 +IBINS    = parallax $(VDI_TOOLS)
    6.51 +
    6.52 +all: $(VDI_TOOLS) parallax blockstored
    6.53 +
    6.54 +install: all
    6.55 +	$(INSTALL_PROG) $(IBINS) $(DESTDIR)$(PARALLAX_INSTALL_DIR)
    6.56 +
    6.57 +clean:
    6.58 +	rm -rf *.o *~ $(DEPS) xen TAGS $(VDI_TOOLS) parallax vdi_unittest
    6.59 +
    6.60 +parallax: $(PLX_SRCS)
    6.61 +	$(CC) $(CFLAGS) -o parallax -L.. $(LDFLAGS) $(PLX_SRCS)
    6.62 +
    6.63 +${VDI_TOOLS}: %: %.c $(VDI_SRCS)
    6.64 +	$(CC) $(CFLAGS) -g3 -o $@ $@.c $(LDFLAGS) $(VDI_SRCS)
    6.65 +
    6.66 +.PHONY: TAGS clean install rpm
    6.67 +-include $(DEPS)
    6.68 \ No newline at end of file
     9.1 --- a/xen/include/public/io/blkif.h	Thu Aug 04 16:53:11 2005 +0000
     9.2 +++ b/xen/include/public/io/blkif.h	Thu Aug 04 16:53:30 2005 +0000
     9.3 @@ -47,7 +47,7 @@ typedef struct blkif_request {
     9.4      unsigned long  frame_and_sects[BLKIF_MAX_SEGMENTS_PER_REQUEST];
     9.5  } blkif_request_t;
     9.6  
     9.7 -#define blkif_fas(_addr, _fs, _ls) ((addr)|((_fs)<<5)|(_ls))
     9.8 +#define blkif_fas(_addr, _fs, _ls) ((_addr)|((_fs)<<5)|(_ls))
     9.9  #define blkif_first_sect(_fas) (((_fas)>>5)&31)
    9.10  #define blkif_last_sect(_fas)  ((_fas)&31)
    9.11