direct-io.hg

changeset 6459:db61a0f346a8

Allocate a request-id space per vbd in blkfront.
This should fix the assertions people have been seeing
in domU blkfront where we run out of IDs (because they
were being shared by multiple vbds).

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Aug 29 13:56:53 2005 +0000 (2005-08-29)
parents 98de1d5fe5fb
children 3b3532384aab
files linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c linux-2.6-xen-sparse/drivers/xen/blkfront/block.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c	Sun Aug 28 21:50:04 2005 -0800
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c	Mon Aug 29 13:56:53 2005 +0000
     1.3 @@ -55,39 +55,32 @@
     1.4  
     1.5  static unsigned int blkif_state = BLKIF_STATE_DISCONNECTED;
     1.6  
     1.7 -#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
     1.8 -
     1.9  #define MAXIMUM_OUTSTANDING_BLOCK_REQS \
    1.10      (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLKIF_RING_SIZE)
    1.11  #define GRANTREF_INVALID (1<<15)
    1.12  
    1.13 -static struct blk_shadow {
    1.14 -	blkif_request_t req;
    1.15 -	unsigned long request;
    1.16 -	unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
    1.17 -} blk_shadow[BLK_RING_SIZE];
    1.18 -unsigned long blk_shadow_free;
    1.19 -
    1.20  static int recovery = 0; /* Recovery in progress: protected by blkif_io_lock */
    1.21  
    1.22  static void kick_pending_request_queues(struct blkfront_info *info);
    1.23  
    1.24  static void blkif_completion(struct blk_shadow *s);
    1.25  
    1.26 -static inline int GET_ID_FROM_FREELIST(void)
    1.27 +static inline int GET_ID_FROM_FREELIST(
    1.28 +	struct blkfront_info *info)
    1.29  {
    1.30 -	unsigned long free = blk_shadow_free;
    1.31 +	unsigned long free = info->shadow_free;
    1.32  	BUG_ON(free > BLK_RING_SIZE);
    1.33 -	blk_shadow_free = blk_shadow[free].req.id;
    1.34 -	blk_shadow[free].req.id = 0x0fffffee; /* debug */
    1.35 +	info->shadow_free = info->shadow[free].req.id;
    1.36 +	info->shadow[free].req.id = 0x0fffffee; /* debug */
    1.37  	return free;
    1.38  }
    1.39  
    1.40 -static inline void ADD_ID_TO_FREELIST(unsigned long id)
    1.41 +static inline void ADD_ID_TO_FREELIST(
    1.42 +	struct blkfront_info *info, unsigned long id)
    1.43  {
    1.44 -	blk_shadow[id].req.id  = blk_shadow_free;
    1.45 -	blk_shadow[id].request = 0;
    1.46 -	blk_shadow_free = id;
    1.47 +	info->shadow[id].req.id  = info->shadow_free;
    1.48 +	info->shadow[id].request = 0;
    1.49 +	info->shadow_free = id;
    1.50  }
    1.51  
    1.52  static inline void pickle_request(struct blk_shadow *s, blkif_request_t *r)
    1.53 @@ -213,8 +206,8 @@ static int blkif_queue_request(struct re
    1.54  
    1.55  	/* Fill out a communications ring structure. */
    1.56  	ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
    1.57 -	id = GET_ID_FROM_FREELIST();
    1.58 -	blk_shadow[id].request = (unsigned long)req;
    1.59 +	id = GET_ID_FROM_FREELIST(info);
    1.60 +	info->shadow[id].request = (unsigned long)req;
    1.61  
    1.62  	ring_req->id = id;
    1.63  	ring_req->operation = rq_data_dir(req) ?
    1.64 @@ -240,7 +233,7 @@ static int blkif_queue_request(struct re
    1.65  				buffer_ma >> PAGE_SHIFT,
    1.66  				rq_data_dir(req) );
    1.67  
    1.68 -			blk_shadow[id].frame[ring_req->nr_segments] =
    1.69 +			info->shadow[id].frame[ring_req->nr_segments] =
    1.70  				buffer_ma >> PAGE_SHIFT;
    1.71  
    1.72  			ring_req->frame_and_sects[ring_req->nr_segments] =
    1.73 @@ -253,7 +246,7 @@ static int blkif_queue_request(struct re
    1.74  	info->ring.req_prod_pvt++;
    1.75  
    1.76  	/* Keep a private copy so we can reissue requests when recovering. */
    1.77 -	pickle_request(&blk_shadow[id], ring_req);
    1.78 +	pickle_request(&info->shadow[id], ring_req);
    1.79  
    1.80  	gnttab_free_grant_references(gref_head);
    1.81  
    1.82 @@ -331,11 +324,11 @@ static irqreturn_t blkif_int(int irq, vo
    1.83  
    1.84  		bret = RING_GET_RESPONSE(&info->ring, i);
    1.85  		id   = bret->id;
    1.86 -		req  = (struct request *)blk_shadow[id].request;
    1.87 +		req  = (struct request *)info->shadow[id].request;
    1.88  
    1.89 -		blkif_completion(&blk_shadow[id]);
    1.90 +		blkif_completion(&info->shadow[id]);
    1.91  
    1.92 -		ADD_ID_TO_FREELIST(id);
    1.93 +		ADD_ID_TO_FREELIST(info, id);
    1.94  
    1.95  		switch (bret->operation) {
    1.96  		case BLKIF_OP_READ:
    1.97 @@ -387,16 +380,16 @@ static void blkif_recover(struct blkfron
    1.98  	int j;
    1.99  
   1.100  	/* Stage 1: Make a safe copy of the shadow state. */
   1.101 -	copy = (struct blk_shadow *)kmalloc(sizeof(blk_shadow), GFP_KERNEL);
   1.102 +	copy = (struct blk_shadow *)kmalloc(sizeof(info->shadow), GFP_KERNEL);
   1.103  	BUG_ON(copy == NULL);
   1.104 -	memcpy(copy, blk_shadow, sizeof(blk_shadow));
   1.105 +	memcpy(copy, info->shadow, sizeof(info->shadow));
   1.106  
   1.107  	/* Stage 2: Set up free list. */
   1.108 -	memset(&blk_shadow, 0, sizeof(blk_shadow));
   1.109 +	memset(&info->shadow, 0, sizeof(info->shadow));
   1.110  	for (i = 0; i < BLK_RING_SIZE; i++)
   1.111 -		blk_shadow[i].req.id = i+1;
   1.112 -	blk_shadow_free = info->ring.req_prod_pvt;
   1.113 -	blk_shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
   1.114 +		info->shadow[i].req.id = i+1;
   1.115 +	info->shadow_free = info->ring.req_prod_pvt;
   1.116 +	info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
   1.117  
   1.118  	/* Stage 3: Find pending requests and requeue them. */
   1.119  	for (i = 0; i < BLK_RING_SIZE; i++) {
   1.120 @@ -410,8 +403,8 @@ static void blkif_recover(struct blkfron
   1.121  		unpickle_request(req, &copy[i]);
   1.122  
   1.123  		/* We get a new request id, and must reset the shadow state. */
   1.124 -		req->id = GET_ID_FROM_FREELIST();
   1.125 -		memcpy(&blk_shadow[req->id], &copy[i], sizeof(copy[i]));
   1.126 +		req->id = GET_ID_FROM_FREELIST(info);
   1.127 +		memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
   1.128  
   1.129  		/* Rewrite any grant references invalidated by susp/resume. */
   1.130  		for (j = 0; j < req->nr_segments; j++) {
   1.131 @@ -420,13 +413,13 @@ static void blkif_recover(struct blkfron
   1.132  					blkif_gref_from_fas(
   1.133  						req->frame_and_sects[j]),
   1.134  					info->backend_id,
   1.135 -					blk_shadow[req->id].frame[j],
   1.136 +					info->shadow[req->id].frame[j],
   1.137  					rq_data_dir(
   1.138  						(struct request *)
   1.139 -						blk_shadow[req->id].request));
   1.140 +						info->shadow[req->id].request));
   1.141  			req->frame_and_sects[j] &= ~GRANTREF_INVALID;
   1.142  		}
   1.143 -		blk_shadow[req->id].req = *req;
   1.144 +		info->shadow[req->id].req = *req;
   1.145  
   1.146  		info->ring.req_prod_pvt++;
   1.147  	}
   1.148 @@ -628,9 +621,8 @@ static int talk_to_backend(struct xenbus
   1.149  static int blkfront_probe(struct xenbus_device *dev,
   1.150  			  const struct xenbus_device_id *id)
   1.151  {
   1.152 -	int err;
   1.153 +	int err, vdevice, i;
   1.154  	struct blkfront_info *info;
   1.155 -	int vdevice;
   1.156  
   1.157  	/* FIXME: Use dynamic device id if this is not set. */
   1.158  	err = xenbus_scanf(dev->nodename, "virtual-device", "%i", &vdevice);
   1.159 @@ -652,6 +644,12 @@ static int blkfront_probe(struct xenbus_
   1.160  	info->mi = NULL;
   1.161  	INIT_WORK(&info->work, blkif_restart_queue, (void *)info);
   1.162  
   1.163 +	info->shadow_free = 0;
   1.164 +	memset(info->shadow, 0, sizeof(info->shadow));
   1.165 +	for (i = 0; i < BLK_RING_SIZE; i++)
   1.166 +		info->shadow[i].req.id = i+1;
   1.167 +	info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
   1.168 +
   1.169  	/* Front end dir is a number, which is used as the id. */
   1.170  	info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0);
   1.171  	dev->data = info;
   1.172 @@ -752,20 +750,12 @@ static int wait_for_blkif(void)
   1.173  
   1.174  static int __init xlblk_init(void)
   1.175  {
   1.176 -	int i;
   1.177 -
   1.178  	if ((xen_start_info.flags & SIF_INITDOMAIN)
   1.179  	    || (xen_start_info.flags & SIF_BLK_BE_DOMAIN) )
   1.180  		return 0;
   1.181  
   1.182  	IPRINTK("Initialising virtual block device driver\n");
   1.183  
   1.184 -	blk_shadow_free = 0;
   1.185 -	memset(blk_shadow, 0, sizeof(blk_shadow));
   1.186 -	for (i = 0; i < BLK_RING_SIZE; i++)
   1.187 -		blk_shadow[i].req.id = i+1;
   1.188 -	blk_shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
   1.189 -
   1.190  	init_blk_xenbus();
   1.191  
   1.192  	wait_for_blkif();
     2.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h	Sun Aug 28 21:50:04 2005 -0800
     2.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h	Mon Aug 29 13:56:53 2005 +0000
     2.3 @@ -96,6 +96,14 @@ struct xlbd_major_info
     2.4  	struct xlbd_type_info *type;
     2.5  };
     2.6  
     2.7 +struct blk_shadow {
     2.8 +	blkif_request_t req;
     2.9 +	unsigned long request;
    2.10 +	unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
    2.11 +};
    2.12 +
    2.13 +#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
    2.14 +
    2.15  /*
    2.16   * We have one of these per vbd, whether ide, scsi or 'other'.  They
    2.17   * hang in private_data off the gendisk structure. We may end up
    2.18 @@ -119,6 +127,8 @@ struct blkfront_info
    2.19  	request_queue_t *rq;
    2.20  	struct work_struct work;
    2.21  	struct gnttab_free_callback callback;
    2.22 +	struct blk_shadow shadow[BLK_RING_SIZE];
    2.23 +	unsigned long shadow_free;
    2.24  };
    2.25  
    2.26  extern spinlock_t blkif_io_lock;