]> xenbits.xensource.com Git - people/pauldu/qemu.git/commitdiff
Use a single entry iovec since we're copying data anyway
authorPaul Durrant <paul.durrant@citrix.com>
Fri, 20 Apr 2018 13:18:10 +0000 (14:18 +0100)
committerPaul Durrant <paul.durrant@citrix.com>
Thu, 26 Apr 2018 10:11:19 +0000 (11:11 +0100)
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
hw/block/xen_disk.c

index 8f4e229e3a51137f0f320c739c3ad0089108e92b..129f33f224b43c6e137315b4ebbcd80c71641250 100644 (file)
@@ -45,15 +45,12 @@ struct ioreq {
 
     /* parsed request */
     off_t               start;
-    QEMUIOVector        v;
+    void                *buf;
+    size_t              size;
     int                 presync;
 
-    uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    void                *pages;
-
     /* aio status */
+    QEMUIOVector        v;
     int                 aio_inflight;
     int                 aio_errors;
 
@@ -110,21 +107,17 @@ static void ioreq_reset(struct ioreq *ioreq)
     memset(&ioreq->req, 0, sizeof(ioreq->req));
     ioreq->status = 0;
     ioreq->start = 0;
+    ioreq->buf = NULL;
+    ioreq->size = 0;
     ioreq->presync = 0;
 
-    memset(ioreq->domids, 0, sizeof(ioreq->domids));
-    memset(ioreq->refs, 0, sizeof(ioreq->refs));
-    memset(ioreq->page, 0, sizeof(ioreq->page));
-    ioreq->pages = NULL;
-
+    qemu_iovec_reset(&ioreq->v);
     ioreq->aio_inflight = 0;
     ioreq->aio_errors = 0;
 
     ioreq->blkdev = NULL;
     memset(&ioreq->list, 0, sizeof(ioreq->list));
     memset(&ioreq->acct, 0, sizeof(ioreq->acct));
-
-    qemu_iovec_reset(&ioreq->v);
 }
 
 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
@@ -139,7 +132,7 @@ static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
         ioreq = g_malloc0(sizeof(*ioreq));
         ioreq->blkdev = blkdev;
         blkdev->requests_total++;
-        qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+        qemu_iovec_init(&ioreq->v, 1);
     } else {
         /* get one from freelist */
         ioreq = QLIST_FIRST(&blkdev->freelist);
@@ -184,7 +177,6 @@ static void ioreq_release(struct ioreq *ioreq, bool finish)
 static int ioreq_parse(struct ioreq *ioreq)
 {
     struct XenBlkDev *blkdev = ioreq->blkdev;
-    uintptr_t mem;
     size_t len;
     int i;
 
@@ -231,14 +223,10 @@ static int ioreq_parse(struct ioreq *ioreq)
             goto err;
         }
 
-        ioreq->domids[i] = blkdev->xendev.dom;
-        ioreq->refs[i]   = ioreq->req.seg[i].gref;
-
-        mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
         len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
-        qemu_iovec_add(&ioreq->v, (void*)mem, len);
+        ioreq->size += len;
     }
-    if (ioreq->start + ioreq->v.size > blkdev->file_size) {
+    if (ioreq->start + ioreq->size > blkdev->file_size) {
         xen_pv_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
         goto err;
     }
@@ -251,78 +239,59 @@ err:
 
 static void ioreq_free_copy_buffers(struct ioreq *ioreq)
 {
-    int i;
-
-    for (i = 0; i < ioreq->v.niov; i++) {
-        ioreq->page[i] = NULL;
-    }
-
-    qemu_vfree(ioreq->pages);
+    qemu_vfree(ioreq->buf);
 }
 
 static int ioreq_init_copy_buffers(struct ioreq *ioreq)
 {
-    int i;
-
-    if (ioreq->v.niov == 0) {
-        return 0;
-    }
-
-    ioreq->pages = qemu_memalign(XC_PAGE_SIZE, ioreq->v.niov * XC_PAGE_SIZE);
-
-    for (i = 0; i < ioreq->v.niov; i++) {
-        ioreq->page[i] = ioreq->pages + i * XC_PAGE_SIZE;
-        ioreq->v.iov[i].iov_base = ioreq->page[i];
-    }
+    ioreq->buf = qemu_memalign(XC_PAGE_SIZE, ioreq->size);
 
     return 0;
 }
 
 static int ioreq_grant_copy(struct ioreq *ioreq)
 {
-    xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
+    struct XenBlkDev *blkdev = ioreq->blkdev;
+    xengnttab_handle *gnt = blkdev->xendev.gnttabdev;
+    void *virt = ioreq->buf;
     xengnttab_grant_copy_segment_t segs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    int i, count, rc;
-    int64_t file_blk = ioreq->blkdev->file_blk;
+    int i, rc;
+    int64_t file_blk = blkdev->file_blk;
 
-    if (ioreq->v.niov == 0) {
-        return 0;
-    }
-
-    count = ioreq->v.niov;
-
-    for (i = 0; i < count; i++) {
+    for (i = 0; i < ioreq->req.nr_segments; i++) {
         if (ioreq->req.operation == BLKIF_OP_READ) {
             segs[i].flags = GNTCOPY_dest_gref;
-            segs[i].dest.foreign.ref = ioreq->refs[i];
-            segs[i].dest.foreign.domid = ioreq->domids[i];
+            segs[i].dest.foreign.ref = ioreq->req.seg[i].gref;
+            segs[i].dest.foreign.domid = blkdev->xendev.dom;
             segs[i].dest.foreign.offset = ioreq->req.seg[i].first_sect * file_blk;
-            segs[i].source.virt = ioreq->v.iov[i].iov_base;
+            segs[i].source.virt = virt;
         } else {
             segs[i].flags = GNTCOPY_source_gref;
-            segs[i].source.foreign.ref = ioreq->refs[i];
-            segs[i].source.foreign.domid = ioreq->domids[i];
+            segs[i].source.foreign.ref = ioreq->req.seg[i].gref;
+            segs[i].source.foreign.domid = blkdev->xendev.dom;
             segs[i].source.foreign.offset = ioreq->req.seg[i].first_sect * file_blk;
-            segs[i].dest.virt = ioreq->v.iov[i].iov_base;
+            segs[i].dest.virt = virt;
         }
         segs[i].len = (ioreq->req.seg[i].last_sect
                        - ioreq->req.seg[i].first_sect + 1) * file_blk;
+        virt += segs[i].len;
     }
 
-    rc = xengnttab_grant_copy(gnt, count, segs);
+    rc = xengnttab_grant_copy(gnt, ioreq->req.nr_segments, segs);
 
     if (rc) {
-        xen_pv_printf(&ioreq->blkdev->xendev, 0,
+        xen_pv_printf(&blkdev->xendev, 0,
                       "failed to copy data %d\n", rc);
         ioreq->aio_errors++;
         return -1;
     }
 
-    for (i = 0; i < count; i++) {
+    for (i = 0; i < ioreq->req.nr_segments; i++) {
         if (segs[i].status != GNTST_okay) {
-            xen_pv_printf(&ioreq->blkdev->xendev, 3,
+            xen_pv_printf(&blkdev->xendev, 3,
                           "failed to copy data %d for gref %d, domid %d\n",
-                          segs[i].status, ioreq->refs[i], ioreq->domids[i]);
+                          segs[i].status, ioreq->req.seg[i].gref,
+                          blkdev->xendev.dom);
             ioreq->aio_errors++;
             rc = -1;
         }
@@ -455,7 +424,8 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
     switch (ioreq->req.operation) {
     case BLKIF_OP_READ:
         block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
-                         ioreq->v.size, BLOCK_ACCT_READ);
+                         ioreq->size, BLOCK_ACCT_READ);
+        qemu_iovec_add(&ioreq->v, ioreq->buf, ioreq->size);
         ioreq->aio_inflight++;
         blk_aio_preadv(blkdev->blk, ioreq->start, &ioreq->v, 0,
                        qemu_aio_complete, ioreq);
@@ -467,9 +437,10 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
         }
 
         block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
-                         ioreq->v.size,
+                         ioreq->size,
                          ioreq->req.operation == BLKIF_OP_WRITE ?
                          BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
+        qemu_iovec_add(&ioreq->v, ioreq->buf, ioreq->size);
         ioreq->aio_inflight++;
         blk_aio_pwritev(blkdev->blk, ioreq->start, &ioreq->v, 0,
                         qemu_aio_complete, ioreq);