ia64/xen-unstable
changeset 4416:70ce80d7e30c
bitkeeper revision 1.1236.1.180 (424d0cc0fMbtHkfJJ78Iu20ay7GbmA)
Clean up blkback data path -- each scatter-gather request maps to
a contiguous extent of a single disk (we no longer directly create
VBDs out of multiple physical extents but leave that kind of thing to
LVM). Also the 2.6 datapath creates the smallest number of bio's that
it can, to avoid unnecessary remerging in the lower block layers.
Signed-off-by: Keir Fraser <keir@xensource.com>
Clean up blkback data path -- each scatter-gather request maps to
a contiguous extent of a single disk (we no longer directly create
VBDs out of multiple physical extents but leave that kind of thing to
LVM). Also the 2.6 datapath creates the smallest number of bio's that
it can, to avoid unnecessary remerging in the lower block layers.
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Fri Apr 01 08:56:32 2005 +0000 (2005-04-01) |
parents | 6db79d6cee87 |
children | 5fbc250ab25b |
files | linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c linux-2.6.11-xen-sparse/drivers/xen/blkback/common.h linux-2.6.11-xen-sparse/drivers/xen/blkback/vbd.c |
line diff
1.1 --- a/linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c Fri Apr 01 08:00:10 2005 +0000 1.2 +++ b/linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c Fri Apr 01 08:56:32 2005 +0000 1.3 @@ -26,13 +26,11 @@ 1.4 #define BATCH_PER_DOMAIN 16 1.5 1.6 static unsigned long mmap_vstart; 1.7 -#define MMAP_PAGES_PER_REQUEST \ 1.8 - (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1) 1.9 -#define MMAP_PAGES \ 1.10 - (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST) 1.11 -#define MMAP_VADDR(_req,_seg) \ 1.12 - (mmap_vstart + \ 1.13 - ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \ 1.14 +#define MMAP_PAGES \ 1.15 + (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST) 1.16 +#define MMAP_VADDR(_req,_seg) \ 1.17 + (mmap_vstart + \ 1.18 + ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \ 1.19 ((_seg) * PAGE_SIZE)) 1.20 1.21 /* 1.22 @@ -102,7 +100,7 @@ static void make_response(blkif_t *blkif 1.23 1.24 static void fast_flush_area(int idx, int nr_pages) 1.25 { 1.26 - multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST]; 1.27 + multicall_entry_t mcl[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 1.28 int i; 1.29 1.30 for ( i = 0; i < nr_pages; i++ ) 1.31 @@ -384,94 +382,82 @@ static void dispatch_rw_block_io(blkif_t 1.32 { 1.33 extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); 1.34 int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ; 1.35 - short nr_sects; 1.36 - unsigned long buffer, fas; 1.37 - int i, tot_sects, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)]; 1.38 + unsigned long fas, remap_prot; 1.39 + int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)]; 1.40 pending_req_t *pending_req; 1.41 - unsigned long remap_prot; 1.42 - multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST]; 1.43 - 1.44 - /* We map virtual scatter/gather segments to physical segments. */ 1.45 - int new_segs, nr_psegs = 0; 1.46 - phys_seg_t phys_seg[BLKIF_MAX_SEGMENTS_PER_REQUEST + 1]; 1.47 + multicall_entry_t mcl[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 1.48 + struct phys_req preq; 1.49 + struct { 1.50 + unsigned long buf; unsigned int nsec; 1.51 + } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 1.52 + unsigned int nseg; 1.53 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) 1.54 + struct buffer_head *bh; 1.55 +#else 1.56 + struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 1.57 + int nbio = 0; 1.58 + request_queue_t *q; 1.59 +#endif 1.60 1.61 /* Check that number of segments is sane. */ 1.62 - if ( unlikely(req->nr_segments == 0) || 1.63 - unlikely(req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST) ) 1.64 + nseg = req->nr_segments; 1.65 + if ( unlikely(nseg == 0) || 1.66 + unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) ) 1.67 { 1.68 - DPRINTK("Bad number of segments in request (%d)\n", req->nr_segments); 1.69 + DPRINTK("Bad number of segments in request (%d)\n", nseg); 1.70 goto bad_descriptor; 1.71 } 1.72 1.73 - /* 1.74 - * Check each address/size pair is sane, and convert into a 1.75 - * physical device and block offset. Note that if the offset and size 1.76 - * crosses a virtual extent boundary, we may end up with more 1.77 - * physical scatter/gather segments than virtual segments. 1.78 - */ 1.79 - for ( i = tot_sects = 0; i < req->nr_segments; i++, tot_sects += nr_sects ) 1.80 - { 1.81 - fas = req->frame_and_sects[i]; 1.82 - buffer = (fas & PAGE_MASK) | (blkif_first_sect(fas) << 9); 1.83 - nr_sects = blkif_last_sect(fas) - blkif_first_sect(fas) + 1; 1.84 - 1.85 - if ( nr_sects <= 0 ) 1.86 - goto bad_descriptor; 1.87 + preq.dev = req->device; 1.88 + preq.sector_number = req->sector_number; 1.89 + preq.nr_sects = 0; 1.90 1.91 - phys_seg[nr_psegs].dev = req->device; 1.92 - phys_seg[nr_psegs].sector_number = req->sector_number + tot_sects; 1.93 - phys_seg[nr_psegs].buffer = buffer; 1.94 - phys_seg[nr_psegs].nr_sects = nr_sects; 1.95 - 1.96 - /* Translate the request into the relevant 'physical device' */ 1.97 - new_segs = vbd_translate(&phys_seg[nr_psegs], blkif, operation); 1.98 - if ( new_segs < 0 ) 1.99 - { 1.100 - DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", 1.101 - operation == READ ? "read" : "write", 1.102 - req->sector_number + tot_sects, 1.103 - req->sector_number + tot_sects + nr_sects, 1.104 - req->device); 1.105 + for ( i = 0; i < nseg; i++ ) 1.106 + { 1.107 + fas = req->frame_and_sects[i]; 1.108 + seg[i].buf = (fas & PAGE_MASK) | (blkif_first_sect(fas) << 9); 1.109 + seg[i].nsec = blkif_last_sect(fas) - blkif_first_sect(fas) + 1; 1.110 + if ( seg[i].nsec <= 0 ) 1.111 goto bad_descriptor; 1.112 - } 1.113 - 1.114 - nr_psegs += new_segs; 1.115 - ASSERT(nr_psegs <= (BLKIF_MAX_SEGMENTS_PER_REQUEST+1)); 1.116 + preq.nr_sects += seg[i].nsec; 1.117 } 1.118 1.119 - /* Nonsensical zero-sized request? */ 1.120 - if ( unlikely(nr_psegs == 0) ) 1.121 + if ( vbd_translate(&preq, blkif, operation) != 0 ) 1.122 + { 1.123 + DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", 1.124 + operation == READ ? "read" : "write", preq.sector_number, 1.125 + preq.sector_number + preq.nr_sects, preq.dev); 1.126 goto bad_descriptor; 1.127 + } 1.128 1.129 if ( operation == READ ) 1.130 remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW; 1.131 else 1.132 remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED; 1.133 1.134 - for ( i = 0; i < nr_psegs; i++ ) 1.135 + for ( i = 0; i < nseg; i++ ) 1.136 { 1.137 mcl[i].op = __HYPERVISOR_update_va_mapping_otherdomain; 1.138 mcl[i].args[0] = MMAP_VADDR(pending_idx, i); 1.139 - mcl[i].args[1] = (phys_seg[i].buffer & PAGE_MASK) | remap_prot; 1.140 + mcl[i].args[1] = (seg[i].buf & PAGE_MASK) | remap_prot; 1.141 mcl[i].args[2] = 0; 1.142 + mcl[i].args[3] = blkif->domid; 1.143 #ifdef CONFIG_XEN_BLKDEV_TAP_BE 1.144 - mcl[i].args[3] = (blkif->is_blktap) ? ID_TO_DOM(req->id) : blkif->domid; 1.145 -#else 1.146 - mcl[i].args[3] = blkif->domid; 1.147 + if ( blkif->is_blktap ) 1.148 + mcl[i].args[3] = ID_TO_DOM(req->id); 1.149 #endif 1.150 phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] = 1.151 - FOREIGN_FRAME(phys_seg[i].buffer >> PAGE_SHIFT); 1.152 + FOREIGN_FRAME(seg[i].buf >> PAGE_SHIFT); 1.153 } 1.154 1.155 - if ( unlikely(HYPERVISOR_multicall(mcl, nr_psegs) != 0) ) 1.156 - BUG(); 1.157 + BUG_ON(HYPERVISOR_multicall(mcl, nseg) != 0); 1.158 1.159 - for ( i = 0; i < nr_psegs; i++ ) 1.160 + for ( i = 0; i < nseg; i++ ) 1.161 { 1.162 if ( unlikely(mcl[i].args[5] != 0) ) 1.163 { 1.164 DPRINTK("invalid buffer -- could not remap it\n"); 1.165 - fast_flush_area(pending_idx, nr_psegs); 1.166 + fast_flush_area(pending_idx, nseg); 1.167 goto bad_descriptor; 1.168 } 1.169 } 1.170 @@ -481,19 +467,17 @@ static void dispatch_rw_block_io(blkif_t 1.171 pending_req->id = req->id; 1.172 pending_req->operation = operation; 1.173 pending_req->status = BLKIF_RSP_OKAY; 1.174 - pending_req->nr_pages = nr_psegs; 1.175 - atomic_set(&pending_req->pendcnt, nr_psegs); 1.176 + pending_req->nr_pages = nseg; 1.177 + 1.178 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) 1.179 + 1.180 + atomic_set(&pending_req->pendcnt, nseg); 1.181 pending_cons++; 1.182 - 1.183 blkif_get(blkif); 1.184 1.185 - /* Now we pass each segment down to the real blkdev layer. */ 1.186 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) 1.187 - for ( i = 0; i < nr_psegs; i++ ) 1.188 + for ( i = 0; i < nseg; i++ ) 1.189 { 1.190 - struct buffer_head *bh; 1.191 - 1.192 - bh = kmem_cache_alloc(buffer_head_cachep, GFP_ATOMIC); 1.193 + bh = kmem_cache_alloc(buffer_head_cachep, GFP_KERNEL); 1.194 if ( unlikely(bh == NULL) ) 1.195 { 1.196 __end_block_io_op(pending_req, 0); 1.197 @@ -503,12 +487,12 @@ static void dispatch_rw_block_io(blkif_t 1.198 memset(bh, 0, sizeof (struct buffer_head)); 1.199 1.200 init_waitqueue_head(&bh->b_wait); 1.201 - bh->b_size = phys_seg[i].nr_sects << 9; 1.202 - bh->b_dev = phys_seg[i].dev; 1.203 - bh->b_rdev = phys_seg[i].dev; 1.204 - bh->b_rsector = (unsigned long)phys_seg[i].sector_number; 1.205 + bh->b_size = seg[i].nsec << 9; 1.206 + bh->b_dev = preq.dev; 1.207 + bh->b_rdev = preq.dev; 1.208 + bh->b_rsector = (unsigned long)preq.sector_number; 1.209 bh->b_data = (char *)MMAP_VADDR(pending_idx, i) + 1.210 - (phys_seg[i].buffer & ~PAGE_MASK); 1.211 + (seg[i].buf & ~PAGE_MASK); 1.212 bh->b_page = virt_to_page(MMAP_VADDR(pending_idx, i)); 1.213 bh->b_end_io = end_block_io_op; 1.214 bh->b_private = pending_req; 1.215 @@ -522,40 +506,53 @@ static void dispatch_rw_block_io(blkif_t 1.216 1.217 /* Dispatch a single request. We'll flush it to disc later. */ 1.218 generic_make_request(operation, bh); 1.219 + 1.220 + preq.sector_number += seg[i].nsec; 1.221 } 1.222 + 1.223 #else 1.224 - for ( i = 0; i < nr_psegs; i++ ) 1.225 + 1.226 + for ( i = 0; i < nseg; i++ ) 1.227 { 1.228 - struct bio *bio; 1.229 - request_queue_t *q; 1.230 - 1.231 - bio = bio_alloc(GFP_ATOMIC, 1); 1.232 - if ( unlikely(bio == NULL) ) 1.233 + while ( (bio == NULL) || 1.234 + (bio_add_page(bio, 1.235 + virt_to_page(MMAP_VADDR(pending_idx, i)), 1.236 + seg[i].nsec << 9, 1.237 + seg[i].buf & ~PAGE_MASK) < 1.238 + (seg[i].nsec << 9)) ) 1.239 { 1.240 - __end_block_io_op(pending_req, 0); 1.241 - continue; 1.242 + bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i); 1.243 + if ( unlikely(bio == NULL) ) 1.244 + { 1.245 + for ( i = 0; i < (nbio-1); i++ ) 1.246 + bio_put(biolist[i]); 1.247 + fast_flush_area(pending_idx, nseg); 1.248 + goto bad_descriptor; 1.249 + } 1.250 + 1.251 + bio->bi_bdev = preq.bdev; 1.252 + bio->bi_private = pending_req; 1.253 + bio->bi_end_io = end_block_io_op; 1.254 + bio->bi_sector = preq.sector_number; 1.255 } 1.256 1.257 - bio->bi_bdev = phys_seg[i].bdev; 1.258 - bio->bi_private = pending_req; 1.259 - bio->bi_end_io = end_block_io_op; 1.260 - bio->bi_sector = phys_seg[i].sector_number; 1.261 + preq.sector_number += seg[i].nsec; 1.262 + } 1.263 1.264 - bio_add_page( 1.265 - bio, 1.266 - virt_to_page(MMAP_VADDR(pending_idx, i)), 1.267 - phys_seg[i].nr_sects << 9, 1.268 - phys_seg[i].buffer & ~PAGE_MASK); 1.269 + if ( (q = bdev_get_queue(bio->bi_bdev)) != plugged_queue ) 1.270 + { 1.271 + flush_plugged_queue(); 1.272 + blk_get_queue(q); 1.273 + plugged_queue = q; 1.274 + } 1.275 1.276 - if ( (q = bdev_get_queue(bio->bi_bdev)) != plugged_queue ) 1.277 - { 1.278 - flush_plugged_queue(); 1.279 - blk_get_queue(q); 1.280 - plugged_queue = q; 1.281 - } 1.282 + atomic_set(&pending_req->pendcnt, nbio); 1.283 + pending_cons++; 1.284 + blkif_get(blkif); 1.285 1.286 - submit_bio(operation, bio); 1.287 - } 1.288 + for ( i = 0; i < nbio; i++ ) 1.289 + submit_bio(operation, biolist[i]); 1.290 + 1.291 #endif 1.292 1.293 return;
2.1 --- a/linux-2.6.11-xen-sparse/drivers/xen/blkback/common.h Fri Apr 01 08:00:10 2005 +0000 2.2 +++ b/linux-2.6.11-xen-sparse/drivers/xen/blkback/common.h Fri Apr 01 08:56:32 2005 +0000 2.3 @@ -80,30 +80,19 @@ blkif_t *blkif_find_by_handle(domid_t do 2.4 blkif_disconnect_complete(_b); \ 2.5 } while (0) 2.6 2.7 -typedef struct _vbd { 2.8 - blkif_vdev_t vdevice; /* what the domain refers to this vbd as */ 2.9 - unsigned char readonly; /* Non-zero -> read-only */ 2.10 - unsigned char type; /* VDISK_TYPE_xxx */ 2.11 - blkif_pdev_t pdevice; /* phys device that this vbd maps to */ 2.12 - struct block_device *bdev; 2.13 - rb_node_t rb; /* for linking into R-B tree lookup struct */ 2.14 -} vbd_t; 2.15 - 2.16 void vbd_create(blkif_be_vbd_create_t *create); 2.17 void vbd_destroy(blkif_be_vbd_destroy_t *delete); 2.18 int vbd_probe(blkif_t *blkif, vdisk_t *vbd_info, int max_vbds); 2.19 void destroy_all_vbds(blkif_t *blkif); 2.20 2.21 -/* Describes a [partial] disk extent (part of a block io request) */ 2.22 -typedef struct { 2.23 +struct phys_req { 2.24 unsigned short dev; 2.25 unsigned short nr_sects; 2.26 struct block_device *bdev; 2.27 - unsigned long buffer; 2.28 blkif_sector_t sector_number; 2.29 -} phys_seg_t; 2.30 +}; 2.31 2.32 -int vbd_translate(phys_seg_t *pseg, blkif_t *blkif, int operation); 2.33 +int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation); 2.34 2.35 void blkif_interface_init(void); 2.36 void blkif_ctrlif_init(void);
3.1 --- a/linux-2.6.11-xen-sparse/drivers/xen/blkback/vbd.c Fri Apr 01 08:00:10 2005 +0000 3.2 +++ b/linux-2.6.11-xen-sparse/drivers/xen/blkback/vbd.c Fri Apr 01 08:56:32 2005 +0000 3.3 @@ -12,6 +12,15 @@ 3.4 3.5 #include "common.h" 3.6 3.7 +struct vbd { 3.8 + blkif_vdev_t vdevice; /* what the domain refers to this vbd as */ 3.9 + unsigned char readonly; /* Non-zero -> read-only */ 3.10 + unsigned char type; /* VDISK_TYPE_xxx */ 3.11 + blkif_pdev_t pdevice; /* phys device that this vbd maps to */ 3.12 + struct block_device *bdev; 3.13 + rb_node_t rb; /* for linking into R-B tree lookup struct */ 3.14 +}; 3.15 + 3.16 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 3.17 static inline dev_t vbd_map_devnum(blkif_pdev_t cookie) 3.18 { return MKDEV(cookie>>8, cookie&0xff); } 3.19 @@ -25,7 +34,7 @@ static inline dev_t vbd_map_devnum(blkif 3.20 3.21 void vbd_create(blkif_be_vbd_create_t *create) 3.22 { 3.23 - vbd_t *vbd; 3.24 + struct vbd *vbd; 3.25 rb_node_t **rb_p, *rb_parent = NULL; 3.26 blkif_t *blkif; 3.27 blkif_vdev_t vdevice = create->vdevice; 3.28 @@ -43,7 +52,7 @@ void vbd_create(blkif_be_vbd_create_t *c 3.29 while ( *rb_p != NULL ) 3.30 { 3.31 rb_parent = *rb_p; 3.32 - vbd = rb_entry(rb_parent, vbd_t, rb); 3.33 + vbd = rb_entry(rb_parent, struct vbd, rb); 3.34 if ( vdevice < vbd->vdevice ) 3.35 { 3.36 rb_p = &rb_parent->rb_left; 3.37 @@ -60,7 +69,7 @@ void vbd_create(blkif_be_vbd_create_t *c 3.38 } 3.39 } 3.40 3.41 - if ( unlikely((vbd = kmalloc(sizeof(vbd_t), GFP_KERNEL)) == NULL) ) 3.42 + if ( unlikely((vbd = kmalloc(sizeof(struct vbd), GFP_KERNEL)) == NULL) ) 3.43 { 3.44 DPRINTK("vbd_create: out of memory\n"); 3.45 create->status = BLKIF_BE_STATUS_OUT_OF_MEMORY; 3.46 @@ -115,7 +124,7 @@ void vbd_create(blkif_be_vbd_create_t *c 3.47 void vbd_destroy(blkif_be_vbd_destroy_t *destroy) 3.48 { 3.49 blkif_t *blkif; 3.50 - vbd_t *vbd; 3.51 + struct vbd *vbd; 3.52 rb_node_t *rb; 3.53 blkif_vdev_t vdevice = destroy->vdevice; 3.54 3.55 @@ -131,7 +140,7 @@ void vbd_destroy(blkif_be_vbd_destroy_t 3.56 rb = blkif->vbd_rb.rb_node; 3.57 while ( rb != NULL ) 3.58 { 3.59 - vbd = rb_entry(rb, vbd_t, rb); 3.60 + vbd = rb_entry(rb, struct vbd, rb); 3.61 if ( vdevice < vbd->vdevice ) 3.62 rb = rb->rb_left; 3.63 else if ( vdevice > vbd->vdevice ) 3.64 @@ -154,14 +163,14 @@ void vbd_destroy(blkif_be_vbd_destroy_t 3.65 3.66 void destroy_all_vbds(blkif_t *blkif) 3.67 { 3.68 - vbd_t *vbd; 3.69 - rb_node_t *rb; 3.70 + struct vbd *vbd; 3.71 + rb_node_t *rb; 3.72 3.73 spin_lock(&blkif->vbd_lock); 3.74 3.75 while ( (rb = blkif->vbd_rb.rb_node) != NULL ) 3.76 { 3.77 - vbd = rb_entry(rb, vbd_t, rb); 3.78 + vbd = rb_entry(rb, struct vbd, rb); 3.79 rb_erase(rb, &blkif->vbd_rb); 3.80 spin_unlock(&blkif->vbd_lock); 3.81 bdev_put(vbd->bdev); 3.82 @@ -173,7 +182,8 @@ void destroy_all_vbds(blkif_t *blkif) 3.83 } 3.84 3.85 3.86 -static void vbd_probe_single(blkif_t *blkif, vdisk_t *vbd_info, vbd_t *vbd) 3.87 +static void vbd_probe_single( 3.88 + blkif_t *blkif, vdisk_t *vbd_info, struct vbd *vbd) 3.89 { 3.90 vbd_info->device = vbd->vdevice; 3.91 vbd_info->info = vbd->type | (vbd->readonly ? VDISK_FLAG_RO : 0); 3.92 @@ -199,7 +209,8 @@ int vbd_probe(blkif_t *blkif, vdisk_t *v 3.93 for ( ; ; ) 3.94 { 3.95 /* STEP 2. Dealt with left subtree. Now process current node. */ 3.96 - vbd_probe_single(blkif, &vbd_info[nr_vbds], rb_entry(rb, vbd_t, rb)); 3.97 + vbd_probe_single(blkif, &vbd_info[nr_vbds], 3.98 + rb_entry(rb, struct vbd, rb)); 3.99 if ( ++nr_vbds == max_vbds ) 3.100 goto out; 3.101 3.102 @@ -232,11 +243,11 @@ int vbd_probe(blkif_t *blkif, vdisk_t *v 3.103 } 3.104 3.105 3.106 -int vbd_translate(phys_seg_t *pseg, blkif_t *blkif, int operation) 3.107 +int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation) 3.108 { 3.109 - vbd_t *vbd; 3.110 - rb_node_t *rb; 3.111 - int rc = -EACCES; 3.112 + struct vbd *vbd; 3.113 + rb_node_t *rb; 3.114 + int rc = -EACCES; 3.115 3.116 /* Take the vbd_lock because another thread could be updating the tree. */ 3.117 spin_lock(&blkif->vbd_lock); 3.118 @@ -244,10 +255,10 @@ int vbd_translate(phys_seg_t *pseg, blki 3.119 rb = blkif->vbd_rb.rb_node; 3.120 while ( rb != NULL ) 3.121 { 3.122 - vbd = rb_entry(rb, vbd_t, rb); 3.123 - if ( pseg->dev < vbd->vdevice ) 3.124 + vbd = rb_entry(rb, struct vbd, rb); 3.125 + if ( req->dev < vbd->vdevice ) 3.126 rb = rb->rb_left; 3.127 - else if ( pseg->dev > vbd->vdevice ) 3.128 + else if ( req->dev > vbd->vdevice ) 3.129 rb = rb->rb_right; 3.130 else 3.131 goto found; 3.132 @@ -263,12 +274,12 @@ int vbd_translate(phys_seg_t *pseg, blki 3.133 if ( (operation == WRITE) && vbd->readonly ) 3.134 goto out; 3.135 3.136 - if ( unlikely((pseg->sector_number + pseg->nr_sects) > vbd_sz(vbd)) ) 3.137 + if ( unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)) ) 3.138 goto out; 3.139 3.140 - pseg->dev = vbd->pdevice; 3.141 - pseg->bdev = vbd->bdev; 3.142 - rc = 1; 3.143 + req->dev = vbd->pdevice; 3.144 + req->bdev = vbd->bdev; 3.145 + rc = 0; 3.146 3.147 out: 3.148 spin_unlock(&blkif->vbd_lock);