]> xenbits.xensource.com Git - people/ssmith/nc2-2.6.27.git/commitdiff
patch blkback_multi_page_ring
authorSteven Smith <ssmith@weybridge.uk.xensource.com>
Fri, 2 Oct 2009 11:58:56 +0000 (12:58 +0100)
committerSteven Smith <ssmith@weybridge.uk.xensource.com>
Fri, 2 Oct 2009 11:58:56 +0000 (12:58 +0100)
drivers/xen/blkback/common.h
drivers/xen/blkback/interface.c
drivers/xen/blkback/xenbus.c

index 82b5a91a66098156982b7eed62eb9f234d56138d..3b7b7fbcd1ea6ad00326f905f674e18a53b7039f 100644 (file)
@@ -70,6 +70,9 @@ struct vbd {
 
 struct backend_info;
 
+#define        BLKIF_MAX_RING_PAGE_ORDER 2
+#define        BLKIF_MAX_RING_PAGES (1 << BLKIF_MAX_RING_PAGE_ORDER)
+
 typedef struct blkif_st {
        /* Unique identifier for this interface. */
        domid_t           domid;
@@ -78,8 +81,8 @@ typedef struct blkif_st {
        unsigned int      irq;
        /* Comms information. */
        enum blkif_protocol blk_protocol;
-       blkif_back_rings_t blk_rings;
-       struct vm_struct *blk_ring_area;
+       blkif_back_rings_t  blk_rings;
+       struct vm_struct   *blk_ring_area;
        /* The VBD attached to this interface. */
        struct vbd        vbd;
        /* Back pointer to the backend_info. */
@@ -115,14 +118,15 @@ typedef struct blkif_st {
 
        wait_queue_head_t waiting_to_free;
 
-       grant_handle_t shmem_handle;
-       grant_ref_t    shmem_ref;
+       unsigned int   nr_shared_pages;
+       grant_handle_t shmem_handle[BLKIF_MAX_RING_PAGES];
 } blkif_t;
 
 blkif_t *blkif_alloc(domid_t domid);
 void blkif_disconnect(blkif_t *blkif);
 void blkif_free(blkif_t *blkif);
-int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn);
+int blkif_map(blkif_t *blkif, unsigned long *shared_pages,
+             unsigned int nr_shared_pages, unsigned int evtchn);
 
 #define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
 #define blkif_put(_b)                                  \
index 238922cc0f6783951160cbf34a9a215dc989b52b..7dcbc7d75aad74b2aabd5fc606c350ef8335c0bf 100644 (file)
@@ -57,50 +57,91 @@ blkif_t *blkif_alloc(domid_t domid)
        return blkif;
 }
 
-static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
-{
-       struct gnttab_map_grant_ref op;
-
-       gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
-                         GNTMAP_host_map, shared_page, blkif->domid);
+#define        INVALID_GRANT_HANDLE    ((grant_handle_t)~0U)
 
-       if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
-               BUG();
-
-       if (op.status) {
-               DPRINTK(" Grant table operation failure !\n");
-               return op.status;
+static void unmap_frontend_pages(blkif_t *blkif)
+{
+       struct vm_struct *area = blkif->blk_ring_area;
+       struct gnttab_unmap_grant_ref op[BLKIF_MAX_RING_PAGES];
+       unsigned int i;
+       unsigned int j;
+
+       j = 0;
+       for (i = 0; i < blkif->nr_shared_pages; i++) {
+               unsigned long addr = (unsigned long)area->addr +
+                                    (i * PAGE_SIZE);
+
+               if (blkif->shmem_handle[i] != INVALID_GRANT_HANDLE) {
+                       gnttab_set_unmap_op(&op[j++], addr,
+                                           GNTMAP_host_map,
+                                           blkif->shmem_handle[i]);
+
+                       blkif->shmem_handle[i] = INVALID_GRANT_HANDLE;
+               }
        }
 
-       blkif->shmem_ref = shared_page;
-       blkif->shmem_handle = op.handle;
+       blkif->nr_shared_pages = 0;
 
-       return 0;
+       if (j != 0) {
+               if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
+                                             op, j))
+                       BUG();
+       }
 }
 
-static void unmap_frontend_page(blkif_t *blkif)
+static int map_frontend_pages(blkif_t *blkif, unsigned long shared_pages[],
+                             unsigned int nr_shared_pages)
 {
-       struct gnttab_unmap_grant_ref op;
+       struct vm_struct *area = blkif->blk_ring_area;
+       struct gnttab_map_grant_ref op[BLKIF_MAX_RING_PAGES];
+       unsigned int i;
+       int status = 0;
 
-       gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
-                           GNTMAP_host_map, blkif->shmem_handle);
+       for (i = 0; i < nr_shared_pages; i++) {
+               unsigned long addr = (unsigned long)area->addr +
+                                    (i * PAGE_SIZE);
+
+               gnttab_set_map_op(&op[i], addr, GNTMAP_host_map,
+                                 shared_pages[i], blkif->domid);
+       }
 
-       if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
+       if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, op,
+                                     nr_shared_pages))
                BUG();
+
+       for (i = 0; i < nr_shared_pages; i++) {
+               if ((status = op[i].status) != 0) {
+                       blkif->shmem_handle[i] = INVALID_GRANT_HANDLE;
+                       continue;
+               }
+
+               blkif->shmem_handle[i] = op[i].handle;
+       }
+
+       blkif->nr_shared_pages = nr_shared_pages;
+
+       if (status != 0) {
+               DPRINTK(" Grant table operation failure !\n");
+               unmap_frontend_pages(blkif);
+       }
+
+       return status;
 }
 
-int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
+int blkif_map(blkif_t *blkif, unsigned long shared_pages[],
+             unsigned int nr_shared_pages, unsigned int evtchn)
 {
+       unsigned long size = nr_shared_pages * PAGE_SIZE;
        int err;
 
        /* Already connected through? */
        if (blkif->irq)
                return 0;
 
-       if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
+       if ( (blkif->blk_ring_area = alloc_vm_area(size)) == NULL )
                return -ENOMEM;
 
-       err = map_frontend_page(blkif, shared_page);
+       err = map_frontend_pages(blkif, shared_pages, nr_shared_pages);
        if (err) {
                free_vm_area(blkif->blk_ring_area);
                return err;
@@ -111,21 +152,23 @@ int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
        {
                blkif_sring_t *sring;
                sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
-               BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
+               BACK_RING_INIT(&blkif->blk_rings.native, sring, size);
                break;
        }
        case BLKIF_PROTOCOL_X86_32:
        {
                blkif_x86_32_sring_t *sring_x86_32;
                sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr;
-               BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
+               BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32,
+                              size);
                break;
        }
        case BLKIF_PROTOCOL_X86_64:
        {
                blkif_x86_64_sring_t *sring_x86_64;
                sring_x86_64 = (blkif_x86_64_sring_t *)blkif->blk_ring_area->addr;
-               BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
+               BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64,
+                              size);
                break;
        }
        default:
@@ -133,14 +176,17 @@ int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
        }
 
        err = bind_interdomain_evtchn_to_irqhandler(
-               blkif->domid, evtchn, blkif_be_int, 0, "blkif-backend", blkif);
+               blkif->domid, evtchn, blkif_be_int, 0, "blkif-backend",
+               blkif);
        if (err < 0)
        {
-               unmap_frontend_page(blkif);
+               unmap_frontend_pages(blkif);
                free_vm_area(blkif->blk_ring_area);
+               blkif->blk_ring_area = NULL;
                blkif->blk_rings.common.sring = NULL;
                return err;
        }
+
        blkif->irq = err;
 
        return 0;
@@ -158,8 +204,9 @@ void blkif_disconnect(blkif_t *blkif)
        }
 
        if (blkif->blk_rings.common.sring) {
-               unmap_frontend_page(blkif);
+               unmap_frontend_pages(blkif);
                free_vm_area(blkif->blk_ring_area);
+               blkif->blk_ring_area = NULL;
                blkif->blk_rings.common.sring = NULL;
        }
 }
index 443a32d0bbbfaae299a0b01ceee942061b9f0b95..514380b24014001785ef87dd263267ed02dbb200 100644 (file)
@@ -51,6 +51,11 @@ static int connect_ring(struct backend_info *);
 static void backend_changed(struct xenbus_watch *, const char **,
                            unsigned int);
 
+/* Order of maximum shared ring size advertised to the front end. */
+static int blkif_max_ring_page_order = 0;
+module_param_named(max_ring_page_order, blkif_max_ring_page_order, int, 0);
+MODULE_PARM_DESC(max_ring_page_order, "Order of maximum VM shared ring size");
+
 static int blkback_name(blkif_t *blkif, char *buf)
 {
        char *devpath, *devname;
@@ -543,6 +548,11 @@ static int blkback_probe(struct xenbus_device *dev,
        if (err)
                goto fail;
 
+       err = xenbus_printf(XBT_NIL, dev->nodename, "max-ring-page-order",
+                           "%u", blkif_max_ring_page_order);
+       if (err)
+               goto fail;
+
        err = xenbus_switch_state(dev, XenbusStateInitWait);
        if (err)
                goto fail;
@@ -574,6 +584,8 @@ static int blkback_open_bdev(struct backend_info *be)
        err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
                           &major, &minor);
        if (err != 2) {
+               err = -EINVAL;
+
                xenbus_dev_fatal(dev, err, "reading physical-device");
                return err;
        }
@@ -831,30 +843,89 @@ again:
        xenbus_transaction_end(xbt, 1);
 }
 
-
 static int connect_ring(struct backend_info *be)
 {
        struct xenbus_device *dev = be->dev;
-       unsigned long ring_ref;
        unsigned int evtchn;
+       unsigned int ring_order;
+       unsigned long ring_ref[BLKIF_MAX_RING_PAGES];
        char protocol[64] = "";
        int err;
 
        DPRINTK("%s", dev->otherend);
 
-       err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu", &ring_ref,
-                           "event-channel", "%u", &evtchn, NULL);
-       if (err) {
-               xenbus_dev_fatal(dev, err,
-                                "reading %s/ring-ref and event-channel",
+       err = xenbus_scanf(XBT_NIL, dev->otherend, "event-channel", "%u",
+                          &evtchn);
+       if (err != 1) {
+               err = -EINVAL;
+
+               xenbus_dev_fatal(dev, err, "reading %s/event-channel",
                                 dev->otherend);
                return err;
        }
 
+       printk(KERN_INFO "blkback: event-channel %u\n", evtchn);
+
+       err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
+                          &ring_order);
+       if (err != 1) {
+               DPRINTK("%s: using single page handshake", dev->otherend);
+
+               ring_order = 0;
+
+               err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref",
+                                  "%lu", &ring_ref[0]);
+               if (err != 1) {
+                       err = -EINVAL;
+
+                       xenbus_dev_fatal(dev, err, "reading %s/ring-ref",
+                                        dev->otherend);
+                       return err;
+               }
+
+               printk(KERN_INFO "blkback: ring-ref %lu\n", ring_ref[0]);
+       } else {
+               unsigned int i;
+
+               if (ring_order > blkif_max_ring_page_order) {
+                       err = -EINVAL;
+
+                       xenbus_dev_fatal(dev, err,
+                                        "%s/ring-page-order too big",
+                                        dev->otherend);
+                       return err;
+               }
+
+               DPRINTK("%s: using %u page(s)", dev->otherend,
+                       (1 << ring_order));
+
+               for (i = 0; i < (1u << ring_order); i++) {
+                       char ring_ref_name[10];
+
+                       snprintf(ring_ref_name, sizeof (ring_ref_name),
+                                "ring-ref%1u", i);
+                       err = xenbus_scanf(XBT_NIL, dev->otherend,
+                                          ring_ref_name, "%lu",
+                                          &ring_ref[i]);
+                       if (err != 1) {
+                               err = -EINVAL;
+
+                               xenbus_dev_fatal(dev, err,
+                                                "reading %s/%s",
+                                                dev->otherend,
+                                                ring_ref_name);
+                               return err;
+                       }
+
+                       printk(KERN_INFO "blkback: ring-ref%u %lu\n", i,
+                              ring_ref[i]);
+               }
+       }
+
        be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
-       err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
-                           "%63s", protocol, NULL);
-       if (err)
+       err = xenbus_scanf(XBT_NIL, dev->otherend, "protocol",
+                           "%63s", protocol);
+       if (err != 1)
                strcpy(protocol, "unspecified, assuming native");
        else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
                be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
@@ -869,18 +940,21 @@ static int connect_ring(struct backend_info *be)
                be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
 #endif
        else {
-               xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
-               return -1;
+               err = -EINVAL;
+
+               xenbus_dev_fatal(dev, err, "unknown fe protocol %s",
+                                protocol);
+               return err;
        }
-       printk(KERN_INFO
-              "blkback: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
-              ring_ref, evtchn, be->blkif->blk_protocol, protocol);
+
+       printk(KERN_INFO "blkback: protocol %d (%s)\n",
+              be->blkif->blk_protocol, protocol);
 
        /* Map the shared frame, irq etc. */
-       err = blkif_map(be->blkif, ring_ref, evtchn);
+       err = blkif_map(be->blkif, ring_ref, (1u << ring_order), evtchn);
        if (err) {
-               xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
-                                ring_ref, evtchn);
+               xenbus_dev_fatal(dev, err,
+                                "mapping ring-refs and evtchn");
                return err;
        }
 
@@ -908,6 +982,9 @@ static struct xenbus_driver blkback = {
 
 void blkif_xenbus_init(void)
 {
+       if (blkif_max_ring_page_order > BLKIF_MAX_RING_PAGE_ORDER)
+               blkif_max_ring_page_order = BLKIF_MAX_RING_PAGE_ORDER;
+
        if (xenbus_register_backend(&blkback))
                BUG();
 }