ia64/xen-unstable

changeset 6029:9f0eff879d89

Blktap updates: request batching, O_DIRECT/AIO support.

This patch changes the blktap code mapping pages to user space to be
faster and to allow page lookups to foreign mapped pages through linux
to do direct io. An AIO test driver on this achieves comparable
performance to the in-kernel block-backend.

Signed-off-by: andrew.warfield@cl.cam.ac.uk
author akw27@arcadians.cl.cam.ac.uk
date Thu Aug 04 16:35:35 2005 +0000 (2005-08-04)
parents 1d240086de52
children 8004acaa6684
files linux-2.6-xen-sparse/drivers/xen/blktap/blktap.h linux-2.6-xen-sparse/drivers/xen/blktap/blktap_datapath.c linux-2.6-xen-sparse/drivers/xen/blktap/blktap_userdev.c tools/blktap/blktaplib.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.h	Thu Aug 04 15:02:09 2005 +0000
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.h	Thu Aug 04 16:35:35 2005 +0000
     1.3 @@ -103,8 +103,6 @@ typedef struct {
     1.4      blkif_t       *blkif;
     1.5      unsigned long  id;
     1.6      int            nr_pages;
     1.7 -    unsigned long  mach_fas[BLKIF_MAX_SEGMENTS_PER_REQUEST];
     1.8 -    unsigned long  virt_fas[BLKIF_MAX_SEGMENTS_PER_REQUEST];
     1.9      int            next_free;
    1.10  } active_req_t;
    1.11  
    1.12 @@ -172,32 +170,7 @@ static inline int BLKTAP_MODE_VALID(unsi
    1.13  
    1.14  
    1.15  /* -------[ Mappings to User VMA ]------------------------------------ */
    1.16 -#define MAX_PENDING_REQS 64
    1.17  #define BATCH_PER_DOMAIN 16
    1.18 -extern struct vm_area_struct *blktap_vma;
    1.19 -
    1.20 -/* The following are from blkback.c and should probably be put in a
    1.21 - * header and included from there.
    1.22 - * The mmap area described here is where attached data pages eill be mapped.
    1.23 - */
    1.24 - 
    1.25 -extern unsigned long mmap_vstart;
    1.26 -#define MMAP_PAGES_PER_REQUEST \
    1.27 -    (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1)
    1.28 -#define MMAP_PAGES             \
    1.29 -    (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST)
    1.30 -#define MMAP_VADDR(_req,_seg)                        \
    1.31 -    (mmap_vstart +                                   \
    1.32 -     ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
    1.33 -     ((_seg) * PAGE_SIZE))
    1.34 -
    1.35 -/* immediately before the mmap area, we have a bunch of pages reserved
    1.36 - * for shared memory rings.
    1.37 - */
    1.38 -
    1.39 -#define RING_PAGES 3 /* Ctrl, Front, and Back */ 
    1.40 -extern unsigned long rings_vstart;
    1.41 -
    1.42  
    1.43  /* -------[ Here be globals ]----------------------------------------- */
    1.44  extern unsigned long blktap_mode;
     2.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap_datapath.c	Thu Aug 04 15:02:09 2005 +0000
     2.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap_datapath.c	Thu Aug 04 16:35:35 2005 +0000
     2.3 @@ -280,8 +280,6 @@ static int do_block_io_op(blkif_t *blkif
     2.4      int more_to_do = 0;
     2.5      int notify_be = 0, notify_user = 0;
     2.6      
     2.7 -    if (NR_ACTIVE_REQS == MAX_ACTIVE_REQS) return 1;
     2.8 -    
     2.9      /* lock both rings */
    2.10      spin_lock_irqsave(&blkif_io_lock, flags);
    2.11  
     3.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap_userdev.c	Thu Aug 04 15:02:09 2005 +0000
     3.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap_userdev.c	Thu Aug 04 16:35:35 2005 +0000
     3.3 @@ -19,6 +19,7 @@
     3.4  #include <linux/gfp.h>
     3.5  #include <linux/poll.h>
     3.6  #include <asm/pgalloc.h>
     3.7 +#include <asm/tlbflush.h>
     3.8  #include <asm-xen/xen-public/io/blkif.h> /* for control ring. */
     3.9  
    3.10  #include "blktap.h"
    3.11 @@ -33,11 +34,6 @@ unsigned long blktap_ring_ok; /* make th
    3.12  /* for poll: */
    3.13  static wait_queue_head_t blktap_wait;
    3.14  
    3.15 -/* Where things are inside the device mapping. */
    3.16 -struct vm_area_struct *blktap_vma = NULL;
    3.17 -unsigned long mmap_vstart;
    3.18 -unsigned long rings_vstart;
    3.19 -
    3.20  /* Rings up to user space. */
    3.21  static blkif_front_ring_t blktap_ufe_ring;
    3.22  static blkif_back_ring_t  blktap_ube_ring;
    3.23 @@ -47,6 +43,39 @@ static ctrl_front_ring_t  blktap_uctrl_r
    3.24  static int blktap_read_fe_ring(void);
    3.25  static int blktap_read_be_ring(void);
    3.26  
    3.27 +/* -------[ mmap region ]--------------------------------------------- */
    3.28 +/*
    3.29 + * We use a big chunk of address space to map in-flight requests into,
    3.30 + * and export this region up to user-space.  See the comments in blkback
    3.31 + * about this -- the two must be kept in sync if the tap is used as a 
    3.32 + * passthrough.
    3.33 + */
    3.34 +
    3.35 +#define MAX_PENDING_REQS 64
    3.36 +
    3.37 +/* immediately before the mmap area, we have a bunch of pages reserved
    3.38 + * for shared memory rings.
    3.39 + */
    3.40 +#define RING_PAGES 3 /* Ctrl, Front, and Back */ 
    3.41 +
    3.42 +/* Where things are inside the device mapping. */
    3.43 +struct vm_area_struct *blktap_vma = NULL;
    3.44 +unsigned long mmap_vstart;  /* Kernel pages for mapping in data. */
    3.45 +unsigned long rings_vstart; /* start of mmaped vma               */
    3.46 +unsigned long user_vstart;  /* start of user mappings            */
    3.47 +
    3.48 +#define MMAP_PAGES_PER_REQUEST \
    3.49 +    (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1)
    3.50 +#define MMAP_PAGES             \
    3.51 +    (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST)
    3.52 +#define MMAP_VADDR(_start, _req,_seg)                \
    3.53 +    ( _start +                                       \
    3.54 +     ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
    3.55 +     ((_seg) * PAGE_SIZE))
    3.56 +
    3.57 +
    3.58 +
    3.59 +
    3.60  /* -------[ blktap vm ops ]------------------------------------------- */
    3.61  
    3.62  static struct page *blktap_nopage(struct vm_area_struct *vma,
    3.63 @@ -76,8 +105,6 @@ static int blktap_open(struct inode *ino
    3.64      
    3.65      if ( test_and_set_bit(0, &blktap_dev_inuse) )
    3.66          return -EBUSY;
    3.67 -
    3.68 -    printk(KERN_ALERT "blktap open.\n");
    3.69      
    3.70      /* Allocate the ctrl ring. */
    3.71      csring = (ctrl_sring_t *)get_zeroed_page(GFP_KERNEL);
    3.72 @@ -128,7 +155,7 @@ static int blktap_release(struct inode *
    3.73      blktap_dev_inuse = 0;
    3.74      blktap_ring_ok = 0;
    3.75  
    3.76 -    printk(KERN_ALERT "blktap closed.\n");
    3.77 +    DPRINTK(KERN_ALERT "blktap closed.\n");
    3.78  
    3.79      /* Free the ring page. */
    3.80      ClearPageReserved(virt_to_page(blktap_uctrl_ring.sring));
    3.81 @@ -140,7 +167,7 @@ static int blktap_release(struct inode *
    3.82      ClearPageReserved(virt_to_page(blktap_ube_ring.sring));
    3.83      free_page((unsigned long) blktap_ube_ring.sring);
    3.84  
    3.85 -    /* Clear any active mappings. */
    3.86 +    /* Clear any active mappings and free foreign map table */
    3.87      if (blktap_vma != NULL) {
    3.88          zap_page_range(blktap_vma, blktap_vma->vm_start, 
    3.89                         blktap_vma->vm_end - blktap_vma->vm_start, NULL);
    3.90 @@ -151,21 +178,36 @@ static int blktap_release(struct inode *
    3.91  }
    3.92  
    3.93  /* Note on mmap:
    3.94 - * remap_pfn_range sets VM_IO on vma->vm_flags.  In trying to make libaio
    3.95 - * work to do direct page access from userspace, this ended up being a
    3.96 - * problem.  The bigger issue seems to be that there is no way to map
    3.97 - * a foreign page in to user space and have the virtual address of that 
    3.98 - * page map sanely down to a mfn.
    3.99 - * Removing the VM_IO flag results in a loop in get_user_pages, as 
   3.100 - * pfn_valid() always fails on a foreign page.
   3.101 + * We need to map pages to user space in a way that will allow the block
   3.102 + * subsystem set up direct IO to them.  This couldn't be done before, because
   3.103 + * there isn't really a sane way to make a user virtual address down to a 
   3.104 + * physical address when the page belongs to another domain.
   3.105 + *
   3.106 + * My first approach was to map the page in to kernel memory, add an entry
   3.107 + * for it in the physical frame list (using alloc_lomem_region as in blkback)
   3.108 + * and then attempt to map that page up to user space.  This is disallowed
   3.109 + * by xen though, which realizes that we don't really own the machine frame
   3.110 + * underlying the physical page.
   3.111 + *
   3.112 + * The new approach is to provide explicit support for this in xen linux.
   3.113 + * The VMA now has a flag, VM_FOREIGN, to indicate that it contains pages
   3.114 + * mapped from other vms.  vma->vm_private_data is set up as a mapping 
   3.115 + * from pages to actual page structs.  There is a new clause in get_user_pages
   3.116 + * that does the right thing for this sort of mapping.
   3.117 + * 
   3.118 + * blktap_mmap sets up this mapping.  Most of the real work is done in
   3.119 + * blktap_write_fe_ring below.
   3.120   */
   3.121  static int blktap_mmap(struct file *filp, struct vm_area_struct *vma)
   3.122  {
   3.123      int size;
   3.124 +    struct page **map;
   3.125 +    int i;
   3.126  
   3.127 -    printk(KERN_ALERT "blktap mmap (%lx, %lx)\n",
   3.128 +    DPRINTK(KERN_ALERT "blktap mmap (%lx, %lx)\n",
   3.129             vma->vm_start, vma->vm_end);
   3.130  
   3.131 +    vma->vm_flags |= VM_RESERVED;
   3.132      vma->vm_ops = &blktap_vm_ops;
   3.133  
   3.134      size = vma->vm_end - vma->vm_start;
   3.135 @@ -177,10 +219,10 @@ static int blktap_mmap(struct file *filp
   3.136      }
   3.137  
   3.138      size >>= PAGE_SHIFT;
   3.139 -    printk(KERN_INFO "blktap: 2 rings + %d pages.\n", size-1);
   3.140 +    DPRINTK(KERN_INFO "blktap: 2 rings + %d pages.\n", size-1);
   3.141      
   3.142      rings_vstart = vma->vm_start;
   3.143 -    mmap_vstart  = rings_vstart + (RING_PAGES << PAGE_SHIFT);
   3.144 +    user_vstart  = rings_vstart + (RING_PAGES << PAGE_SHIFT);
   3.145      
   3.146      /* Map the ring pages to the start of the region and reserve it. */
   3.147  
   3.148 @@ -190,29 +232,44 @@ static int blktap_mmap(struct file *filp
   3.149      DPRINTK("Mapping ctrl_ring page %lx.\n", __pa(blktap_uctrl_ring.sring));
   3.150      if (remap_pfn_range(vma, vma->vm_start, 
   3.151                           __pa(blktap_uctrl_ring.sring) >> PAGE_SHIFT, 
   3.152 -                         PAGE_SIZE, vma->vm_page_prot)) {
   3.153 -        WPRINTK("ctrl_ring: remap_pfn_range failure!\n");
   3.154 -    }
   3.155 +                         PAGE_SIZE, vma->vm_page_prot)) 
   3.156 +        goto fail;
   3.157  
   3.158  
   3.159      DPRINTK("Mapping be_ring page %lx.\n", __pa(blktap_ube_ring.sring));
   3.160      if (remap_pfn_range(vma, vma->vm_start + PAGE_SIZE, 
   3.161                           __pa(blktap_ube_ring.sring) >> PAGE_SHIFT, 
   3.162 -                         PAGE_SIZE, vma->vm_page_prot)) {
   3.163 -        WPRINTK("be_ring: remap_pfn_range failure!\n");
   3.164 -    }
   3.165 +                         PAGE_SIZE, vma->vm_page_prot)) 
   3.166 +        goto fail;
   3.167  
   3.168      DPRINTK("Mapping fe_ring page %lx.\n", __pa(blktap_ufe_ring.sring));
   3.169      if (remap_pfn_range(vma, vma->vm_start + ( 2 * PAGE_SIZE ), 
   3.170                           __pa(blktap_ufe_ring.sring) >> PAGE_SHIFT, 
   3.171 -                         PAGE_SIZE, vma->vm_page_prot)) {
   3.172 -        WPRINTK("fe_ring: remap_pfn_range failure!\n");
   3.173 -    }
   3.174 -            
   3.175 +                         PAGE_SIZE, vma->vm_page_prot)) 
   3.176 +        goto fail;
   3.177 +
   3.178 +    /* Mark this VM as containing foreign pages, and set up mappings. */
   3.179 +    map = kmalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
   3.180 +                  * sizeof(struct page_struct*),
   3.181 +                  GFP_KERNEL);
   3.182 +    if (map == NULL) goto fail;
   3.183 +
   3.184 +    for (i=0; i<((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
   3.185 +        map[i] = NULL;
   3.186 +    
   3.187 +    vma->vm_private_data = map;
   3.188 +    vma->vm_flags |= VM_FOREIGN;
   3.189 +
   3.190      blktap_vma = vma;
   3.191      blktap_ring_ok = 1;
   3.192  
   3.193      return 0;
   3.194 + fail:
   3.195 +    /* Clear any active mappings. */
   3.196 +    zap_page_range(vma, vma->vm_start, 
   3.197 +                   vma->vm_end - vma->vm_start, NULL);
   3.198 +
   3.199 +    return -ENOMEM;
   3.200  }
   3.201  
   3.202  static int blktap_ioctl(struct inode *inode, struct file *filp,
   3.203 @@ -263,6 +320,8 @@ static unsigned int blktap_poll(struct f
   3.204               RING_HAS_UNPUSHED_REQUESTS(&blktap_ufe_ring)   ||
   3.205               RING_HAS_UNPUSHED_RESPONSES(&blktap_ube_ring) ) {
   3.206  
   3.207 +            flush_tlb_all();
   3.208 +
   3.209              RING_PUSH_REQUESTS(&blktap_uctrl_ring);
   3.210              RING_PUSH_REQUESTS(&blktap_ufe_ring);
   3.211              RING_PUSH_RESPONSES(&blktap_ube_ring);
   3.212 @@ -290,10 +349,35 @@ static struct file_operations blktap_fop
   3.213  /*-----[ Data to/from user space ]----------------------------------------*/
   3.214  
   3.215  
   3.216 +static void fast_flush_area(int idx, int nr_pages)
   3.217 +{
   3.218 +    multicall_entry_t mcl[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   3.219 +    int               i;
   3.220 +
   3.221 +    for ( i = 0; i < nr_pages; i++ )
   3.222 +    {
   3.223 +        MULTI_update_va_mapping(mcl+i, MMAP_VADDR(mmap_vstart, idx, i),
   3.224 +                                __pte(0), 0);
   3.225 +    }
   3.226 +
   3.227 +    mcl[nr_pages-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
   3.228 +    if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) )
   3.229 +        BUG();
   3.230 +}
   3.231 +
   3.232 +
   3.233 +extern int __direct_remap_area_pages(struct mm_struct *mm,
   3.234 +                                     unsigned long address,
   3.235 +                                     unsigned long size,
   3.236 +                                     mmu_update_t *v);
   3.237 +
   3.238  int blktap_write_fe_ring(blkif_request_t *req)
   3.239  {
   3.240      blkif_request_t *target;
   3.241 -    int error, i;
   3.242 +    int i;
   3.243 +    unsigned long remap_prot;
   3.244 +    multicall_entry_t mcl[BLKIF_MAX_SEGMENTS_PER_REQUEST+1];
   3.245 +    mmu_update_t mmu[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   3.246  
   3.247      /*
   3.248       * This is called to pass a request from the real frontend domain's
   3.249 @@ -310,26 +394,81 @@ int blktap_write_fe_ring(blkif_request_t
   3.250          return 0;
   3.251      }
   3.252  
   3.253 -    target = RING_GET_REQUEST(&blktap_ufe_ring,
   3.254 -            blktap_ufe_ring.req_prod_pvt);
   3.255 +    remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW;
   3.256 +    flush_cache_all(); /* a noop on intel... */
   3.257 +
   3.258 +    target = RING_GET_REQUEST(&blktap_ufe_ring, blktap_ufe_ring.req_prod_pvt);
   3.259      memcpy(target, req, sizeof(*req));
   3.260  
   3.261 -    /* Attempt to map the foreign pages directly in to the application */
   3.262 +    /* Map the foreign pages directly in to the application */
   3.263      for (i=0; i<target->nr_segments; i++) {
   3.264 +        unsigned long buf;
   3.265 +        unsigned long uvaddr;
   3.266 +        unsigned long kvaddr;
   3.267 +        unsigned long offset;
   3.268 +
   3.269 +        buf   = target->frame_and_sects[i] & PAGE_MASK;
   3.270 +        uvaddr = MMAP_VADDR(user_vstart, ID_TO_IDX(req->id), i);
   3.271 +        kvaddr = MMAP_VADDR(mmap_vstart, ID_TO_IDX(req->id), i);
   3.272 +
   3.273 +        MULTI_update_va_mapping_otherdomain(
   3.274 +            mcl+i, 
   3.275 +            kvaddr, 
   3.276 +            pfn_pte_ma(buf >> PAGE_SHIFT, __pgprot(remap_prot)),
   3.277 +            0,
   3.278 +            ID_TO_DOM(req->id));
   3.279 +
   3.280 +        phys_to_machine_mapping[__pa(kvaddr)>>PAGE_SHIFT] =
   3.281 +            FOREIGN_FRAME(buf >> PAGE_SHIFT);
   3.282  
   3.283 -        error = direct_remap_area_pages(blktap_vma->vm_mm, 
   3.284 -                                        MMAP_VADDR(ID_TO_IDX(req->id), i), 
   3.285 -                                        target->frame_and_sects[i] & PAGE_MASK,
   3.286 -                                        PAGE_SIZE,
   3.287 -                                        blktap_vma->vm_page_prot,
   3.288 -                                        ID_TO_DOM(req->id));
   3.289 -        if ( error != 0 ) {
   3.290 -            printk(KERN_INFO "remapping attached page failed! (%d)\n", error);
   3.291 -            /* the request is now dropped on the floor. */
   3.292 -            return 0;
   3.293 +        __direct_remap_area_pages(blktap_vma->vm_mm,
   3.294 +                                  uvaddr,
   3.295 +                                  PAGE_SIZE,
   3.296 +                                  &mmu[i]);
   3.297 +        mmu[i].val = (target->frame_and_sects[i] & PAGE_MASK)
   3.298 +            | pgprot_val(blktap_vma->vm_page_prot);
   3.299 +
   3.300 +        offset = (uvaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
   3.301 +        ((struct page **)blktap_vma->vm_private_data)[offset] =
   3.302 +            pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
   3.303 +    }
   3.304 +    
   3.305 +    /* Add the mmu_update call. */
   3.306 +    mcl[i].op = __HYPERVISOR_mmu_update;
   3.307 +    mcl[i].args[0] = (unsigned long)mmu;
   3.308 +    mcl[i].args[1] = target->nr_segments;
   3.309 +    mcl[i].args[2] = 0;
   3.310 +    mcl[i].args[3] = ID_TO_DOM(req->id);
   3.311 +
   3.312 +    BUG_ON(HYPERVISOR_multicall(mcl, target->nr_segments+1) != 0);
   3.313 +
   3.314 +    /* Make sure it all worked. */
   3.315 +    for ( i = 0; i < target->nr_segments; i++ )
   3.316 +    {
   3.317 +        if ( unlikely(mcl[i].result != 0) )
   3.318 +        {
   3.319 +            DPRINTK("invalid buffer -- could not remap it\n");
   3.320 +            fast_flush_area(ID_TO_IDX(req->id), target->nr_segments);
   3.321 +            return -1;
   3.322          }
   3.323      }
   3.324 -    
   3.325 +    if ( unlikely(mcl[i].result != 0) )
   3.326 +    {
   3.327 +        DPRINTK("direct remapping of pages to /dev/blktap failed.\n");
   3.328 +        return -1;
   3.329 +    }
   3.330 +
   3.331 +
   3.332 +    /* Mark mapped pages as reserved: */
   3.333 +    for ( i = 0; i < target->nr_segments; i++ )
   3.334 +    {
   3.335 +        unsigned long kvaddr;
   3.336 +
   3.337 +        kvaddr = MMAP_VADDR(mmap_vstart, ID_TO_IDX(req->id), i);
   3.338 +        SetPageReserved(pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT));
   3.339 +    }
   3.340 +
   3.341 +
   3.342      blktap_ufe_ring.req_prod_pvt++;
   3.343      
   3.344      return 0;
   3.345 @@ -366,7 +505,7 @@ static int blktap_read_fe_ring(void)
   3.346  {
   3.347      /* This is called to read responses from the UFE ring. */
   3.348  
   3.349 -    RING_IDX i, rp;
   3.350 +    RING_IDX i, j, rp;
   3.351      blkif_response_t *resp_s;
   3.352      blkif_t *blkif;
   3.353      active_req_t *ar;
   3.354 @@ -387,8 +526,23 @@ static int blktap_read_fe_ring(void)
   3.355              DPRINTK("resp->fe_ring\n");
   3.356              ar = lookup_active_req(ID_TO_IDX(resp_s->id));
   3.357              blkif = ar->blkif;
   3.358 -            zap_page_range(blktap_vma, MMAP_VADDR(ID_TO_IDX(resp_s->id), 0), 
   3.359 +            for (j = 0; j < ar->nr_pages; j++) {
   3.360 +                unsigned long vaddr;
   3.361 +                struct page **map = blktap_vma->vm_private_data;
   3.362 +                int offset; 
   3.363 +
   3.364 +                vaddr  = MMAP_VADDR(user_vstart, ID_TO_IDX(resp_s->id), j);
   3.365 +                offset = (vaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
   3.366 +
   3.367 +                ClearPageReserved(virt_to_page(vaddr));
   3.368 +                map[offset] = NULL;
   3.369 +            }
   3.370 +
   3.371 +
   3.372 +            zap_page_range(blktap_vma, 
   3.373 +                    MMAP_VADDR(user_vstart, ID_TO_IDX(resp_s->id), 0), 
   3.374                      ar->nr_pages << PAGE_SHIFT, NULL);
   3.375 +            fast_flush_area(ID_TO_IDX(resp_s->id), ar->nr_pages);
   3.376              write_resp_to_fe_ring(blkif, resp_s);
   3.377              blktap_ufe_ring.rsp_cons = i + 1;
   3.378              kick_fe_domain(blkif);
   3.379 @@ -464,6 +618,9 @@ int blktap_init(void)
   3.380  {
   3.381      int err;
   3.382  
   3.383 +    if ( (mmap_vstart = allocate_empty_lowmem_region(MMAP_PAGES)) == 0 )
   3.384 +        BUG();
   3.385 +
   3.386      err = misc_register(&blktap_miscdev);
   3.387      if ( err != 0 )
   3.388      {
     4.1 --- a/tools/blktap/blktaplib.c	Thu Aug 04 15:02:09 2005 +0000
     4.2 +++ b/tools/blktap/blktaplib.c	Thu Aug 04 16:35:35 2005 +0000
     4.3 @@ -34,7 +34,7 @@
     4.4  #else
     4.5  #define DPRINTF(_f, _a...) ((void)0)
     4.6  #endif
     4.7 -#define DEBUG_RING_IDXS 0
     4.8 +#define DEBUG_RING_IDXS 1
     4.9  
    4.10  #define POLLRDNORM     0x040 
    4.11