ia64/xen-unstable

changeset 6206:eaf498f1ffde

Add grant table support to block tap.

This patch adds grant table support to the block tap. The AIO support
introduced in patch 9f0eff879d8913a824280cf67658a530c80e8424 still
works -- The tap code maps a granted page twice, once in kernel and
once in user. The kernel page is patched into the p2m table and pages
added to the user vm_area are mapped to the appropriate underlying
struct pages using the VM_FOREIGN hooks in get_user_pages().

Comparing block IO from dom0 to the existing block backend, and to the
tap managing the same partition as the BE from user space with AIO, I
get the following performance:

Version 1.03 ------Sequential Output------ --Sequential Input- --Random-
-Per Chr- --Block-- -Rewrite- -Per Chr- --Block-- --Seeks--
Machine Size K/sec %CP K/sec %CP K/sec %CP K/sec %CP K/sec %CP /sec %CP
xen0 2G 31198 95 56818 8 20967 2 28415 77 59595 4 264.9 0
xenU-blkbe2cpuGT 2G 31157 96 54026 10 25585 4 30664 90 64919 7 292.7 0
xenU-blktp2cpuGT 2G 32313 97 54217 8 20950 3 28117 87 65924 4 191.8 0

Signed-off-by: andrew.warfield@cl.cam.ac.uk
author akw27@arcadians.cl.cam.ac.uk
date Tue Aug 16 10:12:18 2005 +0000 (2005-08-16)
parents 0237746ecf92
children 3d187585c141
files linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c linux-2.6-xen-sparse/drivers/xen/blktap/blktap.h linux-2.6-xen-sparse/drivers/xen/blktap/blktap_controlmsg.c linux-2.6-xen-sparse/drivers/xen/blktap/blktap_userdev.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c	Tue Aug 16 07:07:11 2005 +0000
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c	Tue Aug 16 10:12:18 2005 +0000
     1.3 @@ -23,6 +23,9 @@ int __init xlblktap_init(void)
     1.4      blkif_be_driver_status_t be_st;
     1.5  
     1.6      printk(KERN_INFO "Initialising Xen block tap device\n");
     1.7 +#ifdef CONFIG_XEN_BLKDEV_GRANT
     1.8 +    printk(KERN_INFO "Block tap is using grant tables.\n");
     1.9 +#endif
    1.10  
    1.11      DPRINTK("   tap - Backend connection init:\n");
    1.12  
     2.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.h	Tue Aug 16 07:07:11 2005 +0000
     2.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.h	Tue Aug 16 10:12:18 2005 +0000
     2.3 @@ -85,6 +85,11 @@ typedef struct blkif_st {
     2.4      spinlock_t          blk_ring_lock;
     2.5      atomic_t            refcnt;
     2.6      struct work_struct work;
     2.7 +#ifdef CONFIG_XEN_BLKDEV_GRANT
     2.8 +    u16 shmem_handle;
     2.9 +    memory_t shmem_vaddr;
    2.10 +    grant_ref_t shmem_ref;
    2.11 +#endif
    2.12  } blkif_t;
    2.13  
    2.14  blkif_t *blkif_find_by_handle(domid_t domid, unsigned int handle);
     3.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap_controlmsg.c	Tue Aug 16 07:07:11 2005 +0000
     3.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap_controlmsg.c	Tue Aug 16 10:12:18 2005 +0000
     3.3 @@ -9,6 +9,7 @@
     3.4   */
     3.5   
     3.6  #include "blktap.h"
     3.7 +#include <asm-xen/evtchn.h>
     3.8  
     3.9  static char *blkif_state_name[] = {
    3.10      [BLKIF_STATE_CLOSED]       = "closed",
    3.11 @@ -48,12 +49,21 @@ static void __blkif_disconnect_complete(
    3.12      blkif_t              *blkif = (blkif_t *)arg;
    3.13      ctrl_msg_t            cmsg;
    3.14      blkif_be_disconnect_t disc;
    3.15 +#ifdef CONFIG_XEN_BLKDEV_GRANT
    3.16 +    struct gnttab_unmap_grant_ref op;
    3.17 +#endif
    3.18  
    3.19      /*
    3.20       * These can't be done in blkif_disconnect() because at that point there
    3.21       * may be outstanding requests at the disc whose asynchronous responses
    3.22       * must still be notified to the remote driver.
    3.23       */
    3.24 +#ifdef CONFIG_XEN_BLKDEV_GRANT
    3.25 +    op.host_addr = blkif->shmem_vaddr;
    3.26 +    op.handle         = blkif->shmem_handle;
    3.27 +    op.dev_bus_addr   = 0;
    3.28 +    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
    3.29 +#endif
    3.30      vfree(blkif->blk_ring.sring);
    3.31  
    3.32      /* Construct the deferred response message. */
    3.33 @@ -177,8 +187,12 @@ void blkif_ptfe_connect(blkif_be_connect
    3.34      unsigned int   evtchn = connect->evtchn;
    3.35      unsigned long  shmem_frame = connect->shmem_frame;
    3.36      struct vm_struct *vma;
    3.37 +#ifdef CONFIG_XEN_BLKDEV_GRANT
    3.38 +    int ref = connect->shmem_ref;
    3.39 +#else
    3.40      pgprot_t       prot;
    3.41      int            error;
    3.42 +#endif
    3.43      blkif_t       *blkif;
    3.44      blkif_sring_t *sring;
    3.45  
    3.46 @@ -199,24 +213,46 @@ void blkif_ptfe_connect(blkif_be_connect
    3.47          return;
    3.48      }
    3.49  
    3.50 -    prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED);
    3.51 +#ifndef CONFIG_XEN_BLKDEV_GRANT
    3.52 +    prot = __pgprot(_KERNPG_TABLE);
    3.53      error = direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(vma->addr),
    3.54                                      shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
    3.55                                      prot, domid);
    3.56      if ( error != 0 )
    3.57      {
    3.58 -        WPRINTK("BE_CONNECT: error! (%d)\n", error);
    3.59          if ( error == -ENOMEM ) 
    3.60              connect->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
    3.61 -        else if ( error == -EFAULT ) {
    3.62 +        else if ( error == -EFAULT )
    3.63              connect->status = BLKIF_BE_STATUS_MAPPING_ERROR;
    3.64 -            WPRINTK("BE_CONNECT: MAPPING error!\n");
    3.65 -        }
    3.66          else
    3.67              connect->status = BLKIF_BE_STATUS_ERROR;
    3.68          vfree(vma->addr);
    3.69          return;
    3.70      }
    3.71 +#else
    3.72 +    { /* Map: Use the Grant table reference */
    3.73 +        struct gnttab_map_grant_ref op;
    3.74 +        op.host_addr = VMALLOC_VMADDR(vma->addr);
    3.75 +        op.flags            = GNTMAP_host_map;
    3.76 +        op.ref              = ref;
    3.77 +        op.dom              = domid;
    3.78 +       
    3.79 +        BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) );
    3.80 +       
    3.81 +        handle = op.handle;
    3.82 +       
    3.83 +        if (op.handle < 0) {
    3.84 +            DPRINTK(" Grant table operation failure !\n");
    3.85 +            connect->status = BLKIF_BE_STATUS_MAPPING_ERROR;
    3.86 +            vfree(vma->addr);
    3.87 +            return;
    3.88 +        }
    3.89 +
    3.90 +        blkif->shmem_ref = ref;
    3.91 +        blkif->shmem_handle = handle;
    3.92 +        blkif->shmem_vaddr = VMALLOC_VMADDR(vma->addr);
    3.93 +    }
    3.94 +#endif
    3.95  
    3.96      if ( blkif->status != DISCONNECTED )
    3.97      {
     4.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap_userdev.c	Tue Aug 16 07:07:11 2005 +0000
     4.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap_userdev.c	Tue Aug 16 10:12:18 2005 +0000
     4.3 @@ -21,6 +21,9 @@
     4.4  #include <asm/pgalloc.h>
     4.5  #include <asm/tlbflush.h>
     4.6  #include <asm-xen/xen-public/io/blkif.h> /* for control ring. */
     4.7 +#ifdef CONFIG_XEN_BLKDEV_GRANT
     4.8 +#include <asm-xen/xen-public/grant_table.h>
     4.9 +#endif
    4.10  
    4.11  #include "blktap.h"
    4.12  
    4.13 @@ -43,6 +46,7 @@ static ctrl_front_ring_t  blktap_uctrl_r
    4.14  static int blktap_read_fe_ring(void);
    4.15  static int blktap_read_be_ring(void);
    4.16  
    4.17 +
    4.18  /* -------[ mmap region ]--------------------------------------------- */
    4.19  /*
    4.20   * We use a big chunk of address space to map in-flight requests into,
    4.21 @@ -73,7 +77,28 @@ unsigned long user_vstart;  /* start of 
    4.22       ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
    4.23       ((_seg) * PAGE_SIZE))
    4.24  
    4.25 +/* -------[ grant handles ]------------------------------------------- */
    4.26  
    4.27 +#ifdef CONFIG_XEN_BLKDEV_GRANT
    4.28 +/* When using grant tables to map a frame for device access then the
    4.29 + * handle returned must be used to unmap the frame. This is needed to
    4.30 + * drop the ref count on the frame.
    4.31 + */
    4.32 +struct grant_handle_pair
    4.33 +{
    4.34 +    u16  kernel;
    4.35 +    u16  user;
    4.36 +};
    4.37 +static struct grant_handle_pair pending_grant_handles[MMAP_PAGES];
    4.38 +#define pending_handle(_idx, _i) \
    4.39 +    (pending_grant_handles[((_idx) * BLKIF_MAX_SEGMENTS_PER_REQUEST) + (_i)])
    4.40 +#define BLKTAP_INVALID_HANDLE(_g) \
    4.41 +    (((_g->kernel) == 0xFFFF) && ((_g->user) == 0xFFFF))
    4.42 +#define BLKTAP_INVALIDATE_HANDLE(_g) do {       \
    4.43 +    (_g)->kernel = 0xFFFF; (_g)->user = 0xFFFF; \
    4.44 +    } while(0)
    4.45 +    
    4.46 +#endif
    4.47  
    4.48  
    4.49  /* -------[ blktap vm ops ]------------------------------------------- */
    4.50 @@ -348,9 +373,43 @@ static struct file_operations blktap_fop
    4.51      
    4.52  /*-----[ Data to/from user space ]----------------------------------------*/
    4.53  
    4.54 -
    4.55  static void fast_flush_area(int idx, int nr_pages)
    4.56  {
    4.57 +#ifdef CONFIG_XEN_BLKDEV_GRANT
    4.58 +    struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
    4.59 +    unsigned int i, op = 0;
    4.60 +    struct grant_handle_pair *handle;
    4.61 +    unsigned long ptep;
    4.62 +
    4.63 +    for (i=0; i<nr_pages; i++)
    4.64 +    {
    4.65 +        handle = &pending_handle(idx, i);
    4.66 +        if (!BLKTAP_INVALID_HANDLE(handle))
    4.67 +        {
    4.68 +
    4.69 +            unmap[op].host_addr = MMAP_VADDR(mmap_vstart, idx, i);
    4.70 +            unmap[op].dev_bus_addr = 0;
    4.71 +            unmap[op].handle = handle->kernel;
    4.72 +            op++;
    4.73 +
    4.74 +            if (create_lookup_pte_addr(blktap_vma->vm_mm,
    4.75 +                                       MMAP_VADDR(user_vstart, idx, i), 
    4.76 +                                       &ptep) !=0) {
    4.77 +                DPRINTK("Couldn't get a pte addr!\n");
    4.78 +                return;
    4.79 +            }
    4.80 +            unmap[op].host_addr    = ptep;
    4.81 +            unmap[op].dev_bus_addr = 0;
    4.82 +            unmap[op].handle       = handle->user;
    4.83 +            op++;
    4.84 +            
    4.85 +            BLKTAP_INVALIDATE_HANDLE(handle);
    4.86 +        }
    4.87 +    }
    4.88 +    if ( unlikely(HYPERVISOR_grant_table_op(
    4.89 +        GNTTABOP_unmap_grant_ref, unmap, op)))
    4.90 +        BUG();
    4.91 +#else
    4.92      multicall_entry_t mcl[BLKIF_MAX_SEGMENTS_PER_REQUEST];
    4.93      int               i;
    4.94  
    4.95 @@ -363,21 +422,22 @@ static void fast_flush_area(int idx, int
    4.96      mcl[nr_pages-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
    4.97      if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) )
    4.98          BUG();
    4.99 +#endif
   4.100  }
   4.101  
   4.102  
   4.103 -extern int __direct_remap_area_pages(struct mm_struct *mm,
   4.104 -                                     unsigned long address,
   4.105 -                                     unsigned long size,
   4.106 -                                     mmu_update_t *v);
   4.107 -
   4.108  int blktap_write_fe_ring(blkif_request_t *req)
   4.109  {
   4.110      blkif_request_t *target;
   4.111 -    int i;
   4.112 +    int i, ret = 0;
   4.113 +#ifdef CONFIG_XEN_BLKDEV_GRANT
   4.114 +    struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
   4.115 +    int op;
   4.116 +#else
   4.117      unsigned long remap_prot;
   4.118      multicall_entry_t mcl[BLKIF_MAX_SEGMENTS_PER_REQUEST+1];
   4.119      mmu_update_t mmu[BLKIF_MAX_SEGMENTS_PER_REQUEST];
   4.120 +#endif
   4.121  
   4.122      /*
   4.123       * This is called to pass a request from the real frontend domain's
   4.124 @@ -394,18 +454,109 @@ int blktap_write_fe_ring(blkif_request_t
   4.125          return 0;
   4.126      }
   4.127  
   4.128 -    remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW;
   4.129      flush_cache_all(); /* a noop on intel... */
   4.130  
   4.131      target = RING_GET_REQUEST(&blktap_ufe_ring, blktap_ufe_ring.req_prod_pvt);
   4.132      memcpy(target, req, sizeof(*req));
   4.133  
   4.134      /* Map the foreign pages directly in to the application */
   4.135 +#ifdef CONFIG_XEN_BLKDEV_GRANT
   4.136 +    op = 0;
   4.137 +    for (i=0; i<target->nr_segments; i++) {
   4.138 +
   4.139 +        unsigned long uvaddr;
   4.140 +        unsigned long kvaddr;
   4.141 +        unsigned long ptep;
   4.142 +
   4.143 +        uvaddr = MMAP_VADDR(user_vstart, ID_TO_IDX(req->id), i);
   4.144 +        kvaddr = MMAP_VADDR(mmap_vstart, ID_TO_IDX(req->id), i);
   4.145 +
   4.146 +        /* Map the remote page to kernel. */
   4.147 +        map[op].host_addr = kvaddr;
   4.148 +        map[op].dom   = ID_TO_DOM(req->id);
   4.149 +        map[op].ref   = blkif_gref_from_fas(target->frame_and_sects[i]);
   4.150 +        map[op].flags = GNTMAP_host_map;
   4.151 +        /* This needs a bit more thought in terms of interposition: 
   4.152 +         * If we want to be able to modify pages during write using 
   4.153 +         * grant table mappings, the guest will either need to allow 
   4.154 +         * it, or we'll need to incur a copy. */
   4.155 +        if (req->operation == BLKIF_OP_WRITE)
   4.156 +            map[op].flags |= GNTMAP_readonly;
   4.157 +        op++;
   4.158 +
   4.159 +        /* Now map it to user. */
   4.160 +        ret = create_lookup_pte_addr(blktap_vma->vm_mm, uvaddr, &ptep);
   4.161 +        if (ret)
   4.162 +        {
   4.163 +            DPRINTK("Couldn't get a pte addr!\n");
   4.164 +            goto fail;
   4.165 +        }
   4.166 +
   4.167 +        map[op].host_addr = ptep;
   4.168 +        map[op].dom       = ID_TO_DOM(req->id);
   4.169 +        map[op].ref       = blkif_gref_from_fas(target->frame_and_sects[i]);
   4.170 +        map[op].flags     = GNTMAP_host_map | GNTMAP_application_map
   4.171 +                            | GNTMAP_contains_pte;
   4.172 +        /* Above interposition comment applies here as well. */
   4.173 +        if (req->operation == BLKIF_OP_WRITE)
   4.174 +            map[op].flags |= GNTMAP_readonly;
   4.175 +        op++;
   4.176 +    }
   4.177 +
   4.178 +    if ( unlikely(HYPERVISOR_grant_table_op(
   4.179 +            GNTTABOP_map_grant_ref, map, op)))
   4.180 +        BUG();
   4.181 +
   4.182 +    op = 0;
   4.183 +    for (i=0; i<(target->nr_segments*2); i+=2) {
   4.184 +        unsigned long uvaddr;
   4.185 +        unsigned long kvaddr;
   4.186 +        unsigned long offset;
   4.187 +        int cancel = 0;
   4.188 +
   4.189 +        uvaddr = MMAP_VADDR(user_vstart, ID_TO_IDX(req->id), i/2);
   4.190 +        kvaddr = MMAP_VADDR(mmap_vstart, ID_TO_IDX(req->id), i/2);
   4.191 +
   4.192 +        if ( unlikely(map[i].handle < 0) ) {
   4.193 +            DPRINTK("Error on kernel grant mapping (%d)\n", map[i].handle);
   4.194 +            ret = map[i].handle;
   4.195 +            cancel = 1;
   4.196 +        }
   4.197 +
   4.198 +        if ( unlikely(map[i+1].handle < 0) ) {
   4.199 +            DPRINTK("Error on user grant mapping (%d)\n", map[i+1].handle);
   4.200 +            ret = map[i+1].handle;
   4.201 +            cancel = 1;
   4.202 +        }
   4.203 +
   4.204 +        if (cancel) 
   4.205 +            goto fail;
   4.206 +
   4.207 +        /* Set the necessary mappings in p2m and in the VM_FOREIGN 
   4.208 +         * vm_area_struct to allow user vaddr -> struct page lookups
   4.209 +         * to work.  This is needed for direct IO to foreign pages. */
   4.210 +        phys_to_machine_mapping[__pa(kvaddr)>>PAGE_SHIFT] =
   4.211 +            FOREIGN_FRAME(map[i].dev_bus_addr);
   4.212 +
   4.213 +        offset = (uvaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
   4.214 +        ((struct page **)blktap_vma->vm_private_data)[offset] =
   4.215 +            pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
   4.216 +
   4.217 +        /* Save handles for unmapping later. */
   4.218 +        pending_handle(ID_TO_IDX(req->id), i/2).kernel = map[i].handle;
   4.219 +        pending_handle(ID_TO_IDX(req->id), i/2).user   = map[i+1].handle;
   4.220 +    }
   4.221 +    
   4.222 +#else
   4.223 +
   4.224 +    remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW;
   4.225 +
   4.226      for (i=0; i<target->nr_segments; i++) {
   4.227          unsigned long buf;
   4.228          unsigned long uvaddr;
   4.229          unsigned long kvaddr;
   4.230          unsigned long offset;
   4.231 +        unsigned long ptep;
   4.232  
   4.233          buf   = target->frame_and_sects[i] & PAGE_MASK;
   4.234          uvaddr = MMAP_VADDR(user_vstart, ID_TO_IDX(req->id), i);
   4.235 @@ -421,10 +572,14 @@ int blktap_write_fe_ring(blkif_request_t
   4.236          phys_to_machine_mapping[__pa(kvaddr)>>PAGE_SHIFT] =
   4.237              FOREIGN_FRAME(buf >> PAGE_SHIFT);
   4.238  
   4.239 -        __direct_remap_area_pages(blktap_vma->vm_mm,
   4.240 -                                  uvaddr,
   4.241 -                                  PAGE_SIZE,
   4.242 -                                  &mmu[i]);
   4.243 +        ret = create_lookup_pte_addr(blktap_vma->vm_mm, uvaddr, &ptep);
   4.244 +        if (ret)
   4.245 +        { 
   4.246 +            DPRINTK("error getting pte\n");
   4.247 +            goto fail;
   4.248 +        }
   4.249 +
   4.250 +        mmu[i].ptr = ptep;
   4.251          mmu[i].val = (target->frame_and_sects[i] & PAGE_MASK)
   4.252              | pgprot_val(blktap_vma->vm_page_prot);
   4.253  
   4.254 @@ -448,16 +603,17 @@ int blktap_write_fe_ring(blkif_request_t
   4.255          if ( unlikely(mcl[i].result != 0) )
   4.256          {
   4.257              DPRINTK("invalid buffer -- could not remap it\n");
   4.258 -            fast_flush_area(ID_TO_IDX(req->id), target->nr_segments);
   4.259 -            return -1;
   4.260 +            ret = mcl[i].result;
   4.261 +            goto fail;
   4.262          }
   4.263      }
   4.264      if ( unlikely(mcl[i].result != 0) )
   4.265      {
   4.266          DPRINTK("direct remapping of pages to /dev/blktap failed.\n");
   4.267 -        return -1;
   4.268 +        ret = mcl[i].result;
   4.269 +        goto fail;
   4.270      }
   4.271 -
   4.272 +#endif /* CONFIG_XEN_BLKDEV_GRANT */
   4.273  
   4.274      /* Mark mapped pages as reserved: */
   4.275      for ( i = 0; i < target->nr_segments; i++ )
   4.276 @@ -472,6 +628,10 @@ int blktap_write_fe_ring(blkif_request_t
   4.277      blktap_ufe_ring.req_prod_pvt++;
   4.278      
   4.279      return 0;
   4.280 +
   4.281 + fail:
   4.282 +    fast_flush_area(ID_TO_IDX(req->id), target->nr_segments);
   4.283 +    return ret;
   4.284  }
   4.285  
   4.286  int blktap_write_be_ring(blkif_response_t *rsp)
   4.287 @@ -538,11 +698,10 @@ static int blktap_read_fe_ring(void)
   4.288                  map[offset] = NULL;
   4.289              }
   4.290  
   4.291 -
   4.292 +            fast_flush_area(ID_TO_IDX(resp_s->id), ar->nr_pages);
   4.293              zap_page_range(blktap_vma, 
   4.294                      MMAP_VADDR(user_vstart, ID_TO_IDX(resp_s->id), 0), 
   4.295                      ar->nr_pages << PAGE_SHIFT, NULL);
   4.296 -            fast_flush_area(ID_TO_IDX(resp_s->id), ar->nr_pages);
   4.297              write_resp_to_fe_ring(blkif, resp_s);
   4.298              blktap_ufe_ring.rsp_cons = i + 1;
   4.299              kick_fe_domain(blkif);
   4.300 @@ -616,11 +775,17 @@ static struct miscdevice blktap_miscdev 
   4.301  
   4.302  int blktap_init(void)
   4.303  {
   4.304 -    int err;
   4.305 +    int err, i, j;
   4.306  
   4.307      if ( (mmap_vstart = allocate_empty_lowmem_region(MMAP_PAGES)) == 0 )
   4.308          BUG();
   4.309  
   4.310 +#ifdef CONFIG_XEN_BLKDEV_GRANT
   4.311 +    for (i=0; i<MAX_PENDING_REQS ; i++)
   4.312 +        for (j=0; j<BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
   4.313 +            BLKTAP_INVALIDATE_HANDLE(&pending_handle(i, j));
   4.314 +#endif
   4.315 +
   4.316      err = misc_register(&blktap_miscdev);
   4.317      if ( err != 0 )
   4.318      {