ia64/xen-unstable

changeset 14951:816d274e2a85

blktap: Segments can span multiple clusters with tap:qcow.

In blktap's qcow we need split up read/write requests if the requests
span multiple clusters. However, with our MAX_AIO_REQUESTS define we
assume that there is only ever a single aio request per tapdisk
request and under heavy i/o we can run out of room causing us to
cancel requests.

Dynamically allocate (based on cluster_bits) the various io request
queues the driver maintains.

Signed-off-by: Mark McLoughlin <markmc@redhat.com>
author kfraser@localhost.localdomain
date Thu Apr 26 11:29:41 2007 +0100 (2007-04-26)
parents 7f9ce06d3c61
children f8fd9afd2aad
files tools/blktap/drivers/block-qcow.c
line diff
     1.1 --- a/tools/blktap/drivers/block-qcow.c	Thu Apr 26 11:18:42 2007 +0100
     1.2 +++ b/tools/blktap/drivers/block-qcow.c	Thu Apr 26 11:29:41 2007 +0100
     1.3 @@ -55,7 +55,6 @@
     1.4  
     1.5  /******AIO DEFINES******/
     1.6  #define REQUEST_ASYNC_FD 1
     1.7 -#define MAX_AIO_REQS (MAX_REQUESTS * MAX_SEGMENTS_PER_REQ)
     1.8  
     1.9  struct pending_aio {
    1.10          td_callback_t cb;
    1.11 @@ -146,19 +145,38 @@ struct tdqcow_state {
    1.12  	AES_KEY aes_encrypt_key;       /*AES key*/
    1.13  	AES_KEY aes_decrypt_key;       /*AES key*/
    1.14          /* libaio state */
    1.15 -        io_context_t       aio_ctx;
    1.16 -        struct iocb        iocb_list  [MAX_AIO_REQS];
    1.17 -        struct iocb       *iocb_free  [MAX_AIO_REQS];
    1.18 -        struct pending_aio pending_aio[MAX_AIO_REQS];
    1.19 -        int                iocb_free_count;
    1.20 -        struct iocb       *iocb_queue[MAX_AIO_REQS];
    1.21 -        int                iocb_queued;
    1.22 -        int                poll_fd;      /* NB: we require aio_poll support */
    1.23 -        struct io_event    aio_events[MAX_AIO_REQS];
    1.24 +        io_context_t        aio_ctx;
    1.25 +        int                 max_aio_reqs;
    1.26 +        struct iocb        *iocb_list;
    1.27 +        struct iocb       **iocb_free;
    1.28 +        struct pending_aio *pending_aio;
    1.29 +        int                 iocb_free_count;
    1.30 +        struct iocb       **iocb_queue;
    1.31 +        int                 iocb_queued;
    1.32 +        int                 poll_fd;      /* NB: we require aio_poll support */
    1.33 +        struct io_event    *aio_events;
    1.34  };
    1.35  
    1.36  static int decompress_cluster(struct tdqcow_state *s, uint64_t cluster_offset);
    1.37  
    1.38 +static void free_aio_state(struct disk_driver *dd)
    1.39 +{
    1.40 +        struct tdqcow_state *s = (struct tdqcow_state *)dd->private;
    1.41 +
    1.42 +        if (s->sector_lock)
    1.43 +                free(s->sector_lock);
    1.44 +        if (s->iocb_list)
    1.45 +                free(s->iocb_list);
    1.46 +        if (s->pending_aio)
    1.47 +                free(s->pending_aio);
    1.48 +        if (s->aio_events)
    1.49 +                free(s->aio_events);
    1.50 +        if (s->iocb_free)
    1.51 +                free(s->iocb_free);
    1.52 +        if (s->iocb_queue)
    1.53 +                free(s->iocb_queue);
    1.54 +}
    1.55 +
    1.56  static int init_aio_state(struct disk_driver *dd)
    1.57  {
    1.58          int i;
    1.59 @@ -166,6 +184,12 @@ static int init_aio_state(struct disk_dr
    1.60  	struct tdqcow_state  *s = (struct tdqcow_state *)dd->private;
    1.61          long     ioidx;
    1.62  
    1.63 +        s->iocb_list = NULL;
    1.64 +        s->pending_aio = NULL;
    1.65 +        s->aio_events = NULL;
    1.66 +        s->iocb_free = NULL;
    1.67 +        s->iocb_queue = NULL;
    1.68 +
    1.69          /*Initialize Locking bitmap*/
    1.70  	s->sector_lock = calloc(1, bs->size);
    1.71  	
    1.72 @@ -174,13 +198,26 @@ static int init_aio_state(struct disk_dr
    1.73  		goto fail;
    1.74  	}
    1.75  
    1.76 +        /* A segment (i.e. a page) can span multiple clusters */
    1.77 +        s->max_aio_reqs = (getpagesize() / s->cluster_size) + 1;
    1.78 +
    1.79          /* Initialize AIO */
    1.80 -        s->iocb_free_count = MAX_AIO_REQS;
    1.81 +        s->iocb_free_count = s->max_aio_reqs;
    1.82          s->iocb_queued     = 0;
    1.83  
    1.84 +        if (!(s->iocb_list = malloc(sizeof(struct iocb) * s->max_aio_reqs)) ||
    1.85 +            !(s->pending_aio = malloc(sizeof(struct pending_aio) * s->max_aio_reqs)) ||
    1.86 +            !(s->aio_events = malloc(sizeof(struct io_event) * s->max_aio_reqs)) ||
    1.87 +            !(s->iocb_free = malloc(sizeof(struct iocb *) * s->max_aio_reqs)) ||
    1.88 +            !(s->iocb_queue = malloc(sizeof(struct iocb *) * s->max_aio_reqs))) {
    1.89 +                DPRINTF("Failed to allocate AIO structs (max_aio_reqs = %d)\n",
    1.90 +                        s->max_aio_reqs);
    1.91 +                goto fail;
    1.92 +        }
    1.93 +
    1.94          /*Signal kernel to create Poll FD for Asyc completion events*/
    1.95          s->aio_ctx = (io_context_t) REQUEST_ASYNC_FD;   
    1.96 -        s->poll_fd = io_setup(MAX_AIO_REQS, &s->aio_ctx);
    1.97 +        s->poll_fd = io_setup(s->max_aio_reqs, &s->aio_ctx);
    1.98  
    1.99  	if (s->poll_fd < 0) {
   1.100                  if (s->poll_fd == -EAGAIN) {
   1.101 @@ -198,7 +235,7 @@ static int init_aio_state(struct disk_dr
   1.102  		goto fail;
   1.103  	}
   1.104  
   1.105 -        for (i=0;i<MAX_AIO_REQS;i++)
   1.106 +        for (i=0;i<s->max_aio_reqs;i++)
   1.107                  s->iocb_free[i] = &s->iocb_list[i];
   1.108  
   1.109          DPRINTF("AIO state initialised\n");
   1.110 @@ -946,6 +983,7 @@ int tdqcow_open (struct disk_driver *dd,
   1.111   end_xenhdr:
   1.112  	if (init_aio_state(dd)!=0) {
   1.113  		DPRINTF("Unable to initialise AIO state\n");
   1.114 +                free_aio_state(dd);
   1.115  		goto fail;
   1.116  	}
   1.117  	init_fds(dd);
   1.118 @@ -962,6 +1000,7 @@ int tdqcow_open (struct disk_driver *dd,
   1.119  	
   1.120  fail:
   1.121  	DPRINTF("QCOW Open failed\n");
   1.122 +	free_aio_state(dd);
   1.123  	free(s->l1_table);
   1.124  	free(s->l2_cache);
   1.125  	free(s->cluster_cache);
   1.126 @@ -1145,7 +1184,7 @@ int tdqcow_do_callbacks(struct disk_driv
   1.127          if (sid > MAX_IOFD) return 1;
   1.128  	
   1.129  	/* Non-blocking test for completed io. */
   1.130 -        ret = io_getevents(prv->aio_ctx, 0, MAX_AIO_REQS, prv->aio_events,
   1.131 +        ret = io_getevents(prv->aio_ctx, 0, prv->max_aio_reqs, prv->aio_events,
   1.132                             NULL);
   1.133  
   1.134          for (ep = prv->aio_events, i = ret; i-- > 0; ep++) {