]> xenbits.xensource.com Git - legacy/linux-2.6.18-xen.git/commitdiff
linux/blkfront: use blk_rq_map_sg to generate ring entries
authorKeir Fraser <keir.fraser@citrix.com>
Thu, 5 Mar 2009 14:42:00 +0000 (14:42 +0000)
committerKeir Fraser <keir.fraser@citrix.com>
Thu, 5 Mar 2009 14:42:00 +0000 (14:42 +0000)
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@novell.com>
drivers/xen/blkfront/blkfront.c
drivers/xen/blkfront/block.h

index c4b1ecd65242608a1552d9a9769c015368d3a69a..632b35fee3d55f68370627ab6a0bca5a4cbfeb85 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/cdrom.h>
 #include <linux/sched.h>
 #include <linux/interrupt.h>
+#include <linux/scatterlist.h>
 #include <scsi/scsi.h>
 #include <xen/evtchn.h>
 #include <xen/xenbus.h>
@@ -232,6 +233,8 @@ static int setup_blkring(struct xenbus_device *dev,
        SHARED_RING_INIT(sring);
        FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
 
+       sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+
        err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
        if (err < 0) {
                free_page((unsigned long)sring);
@@ -587,13 +590,11 @@ static int blkif_queue_request(struct request *req)
        struct blkfront_info *info = req->rq_disk->private_data;
        unsigned long buffer_mfn;
        blkif_request_t *ring_req;
-       struct bio *bio;
-       struct bio_vec *bvec;
-       int idx;
        unsigned long id;
        unsigned int fsect, lsect;
-       int ref;
+       int i, ref;
        grant_ref_t gref_head;
+       struct scatterlist *sg;
 
        if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
                return 1;
@@ -622,14 +623,13 @@ static int blkif_queue_request(struct request *req)
        if (blk_barrier_rq(req))
                ring_req->operation = BLKIF_OP_WRITE_BARRIER;
 
-       ring_req->nr_segments = 0;
-       rq_for_each_bio (bio, req) {
-               bio_for_each_segment (bvec, bio, idx) {
-                       BUG_ON(ring_req->nr_segments
-                              == BLKIF_MAX_SEGMENTS_PER_REQUEST);
-                       buffer_mfn = page_to_phys(bvec->bv_page) >> PAGE_SHIFT;
-                       fsect = bvec->bv_offset >> 9;
-                       lsect = fsect + (bvec->bv_len >> 9) - 1;
+       ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
+       BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
+       for (i = 0; i < ring_req->nr_segments; ++i) {
+                       sg = info->sg + i;
+                       buffer_mfn = page_to_phys(sg->page) >> PAGE_SHIFT;
+                       fsect = sg->offset >> 9;
+                       lsect = fsect + (sg->length >> 9) - 1;
                        /* install a grant reference. */
                        ref = gnttab_claim_grant_reference(&gref_head);
                        BUG_ON(ref == -ENOSPC);
@@ -640,17 +640,12 @@ static int blkif_queue_request(struct request *req)
                                buffer_mfn,
                                rq_data_dir(req) ? GTF_readonly : 0 );
 
-                       info->shadow[id].frame[ring_req->nr_segments] =
-                               mfn_to_pfn(buffer_mfn);
-
-                       ring_req->seg[ring_req->nr_segments] =
+                       info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
+                       ring_req->seg[i] =
                                (struct blkif_request_segment) {
                                        .gref       = ref,
                                        .first_sect = fsect,
                                        .last_sect  = lsect };
-
-                       ring_req->nr_segments++;
-               }
        }
 
        info->ring.req_prod_pvt++;
index b8f0cf53f7317cb961cf6a871831fb957dead496..3168653ddf3509ac59ab1719a7ba91aefc42d31a 100644 (file)
@@ -103,6 +103,7 @@ struct blkfront_info
        int connected;
        int ring_ref;
        blkif_front_ring_t ring;
+       struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        unsigned int irq;
        struct xlbd_major_info *mi;
        request_queue_t *rq;