ia64/xen-unstable

changeset 6353:317db130cbbf

First pass at using one block interface per device.
Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
author cl349@firebug.cl.cam.ac.uk
date Mon Aug 22 14:22:59 2005 +0000 (2005-08-22)
parents 531ad4bde8f2
children 1d86fcb11b59
files linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c linux-2.6-xen-sparse/drivers/xen/blkfront/block.h linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c	Mon Aug 22 10:21:18 2005 +0000
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c	Mon Aug 22 14:22:59 2005 +0000
     1.3 @@ -58,6 +58,21 @@
     1.4  #include <asm-xen/xen-public/grant_table.h>
     1.5  #include <asm-xen/gnttab.h>
     1.6  
     1.7 +struct blkfront_info
     1.8 +{
     1.9 +	/* We watch the backend */
    1.10 +	struct xenbus_watch watch;
    1.11 +	int vdevice;
    1.12 +	u16 handle;
    1.13 +	int connected;
    1.14 +	struct xenbus_device *dev;
    1.15 +	char *backend;
    1.16 +	int backend_id;
    1.17 +	int grant_id;
    1.18 +	blkif_front_ring_t ring;
    1.19 +	unsigned int evtchn;
    1.20 +};
    1.21 +
    1.22  typedef unsigned char byte; /* from linux/ide.h */
    1.23  
    1.24  /* Control whether runtime update of vbds is enabled. */
    1.25 @@ -68,20 +83,14 @@ typedef unsigned char byte; /* from linu
    1.26  #define BLKIF_STATE_CONNECTED    2
    1.27  
    1.28  static unsigned int blkif_state = BLKIF_STATE_CLOSED;
    1.29 -static unsigned int blkif_evtchn = 0;
    1.30 -static unsigned int blkif_vbds = 0;
    1.31  static unsigned int blkif_vbds_connected = 0;
    1.32  
    1.33 -static blkif_front_ring_t blk_ring;
    1.34 -
    1.35  #define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
    1.36  
    1.37 -static domid_t rdomid = 0;
    1.38 -static grant_ref_t gref_head, gref_terminal;
    1.39  #define MAXIMUM_OUTSTANDING_BLOCK_REQS \
    1.40      (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLKIF_RING_SIZE)
    1.41  #define GRANTREF_INVALID (1<<15)
    1.42 -static int shmem_ref;
    1.43 +static grant_ref_t gref_head, gref_terminal;
    1.44  
    1.45  static struct blk_shadow {
    1.46      blkif_request_t req;
    1.47 @@ -138,11 +147,11 @@ static inline void unpickle_request(blki
    1.48  }
    1.49  
    1.50  
    1.51 -static inline void flush_requests(void)
    1.52 +static inline void flush_requests(struct blkfront_info *info)
    1.53  {
    1.54      DISABLE_SCATTERGATHER();
    1.55 -    RING_PUSH_REQUESTS(&blk_ring);
    1.56 -    notify_via_evtchn(blkif_evtchn);
    1.57 +    RING_PUSH_REQUESTS(&info->ring);
    1.58 +    notify_via_evtchn(info->evtchn);
    1.59  }
    1.60  
    1.61  
    1.62 @@ -156,7 +165,7 @@ static struct xlbd_disk_info *head_waiti
    1.63  static void kick_pending_request_queues(void)
    1.64  {
    1.65      struct xlbd_disk_info *di;
    1.66 -    while ( ((di = head_waiting) != NULL) && !RING_FULL(&blk_ring) )
    1.67 +    while ( ((di = head_waiting) != NULL) && !RING_FULL(&di->info->ring) )
    1.68      {
    1.69          head_waiting = di->next_waiting;
    1.70          di->next_waiting = NULL;
    1.71 @@ -242,7 +251,7 @@ static int blkif_queue_request(struct re
    1.72          return 1;
    1.73  
    1.74      /* Fill out a communications ring structure. */
    1.75 -    ring_req = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt);
    1.76 +    ring_req = RING_GET_REQUEST(&di->info->ring, di->info->ring.req_prod_pvt);
    1.77      id = GET_ID_FROM_FREELIST();
    1.78      blk_shadow[id].request = (unsigned long)req;
    1.79  
    1.80 @@ -268,7 +277,7 @@ static int blkif_queue_request(struct re
    1.81  
    1.82              gnttab_grant_foreign_access_ref(
    1.83                          ref,
    1.84 -                        rdomid,
    1.85 +                        di->info->backend_id,
    1.86                          buffer_ma >> PAGE_SHIFT,
    1.87                          rq_data_dir(req) );
    1.88  
    1.89 @@ -280,7 +289,7 @@ static int blkif_queue_request(struct re
    1.90          }
    1.91      }
    1.92  
    1.93 -    blk_ring.req_prod_pvt++;
    1.94 +    di->info->ring.req_prod_pvt++;
    1.95      
    1.96      /* Keep a private copy so we can reissue requests when recovering. */
    1.97      pickle_request(&blk_shadow[id], ring_req);
    1.98 @@ -295,7 +304,7 @@ static int blkif_queue_request(struct re
    1.99   */
   1.100  void do_blkif_request(request_queue_t *rq)
   1.101  {
   1.102 -    struct xlbd_disk_info *di;
   1.103 +    struct xlbd_disk_info *di = NULL;
   1.104      struct request *req;
   1.105      int queued;
   1.106  
   1.107 @@ -305,13 +314,15 @@ void do_blkif_request(request_queue_t *r
   1.108  
   1.109      while ( (req = elv_next_request(rq)) != NULL )
   1.110      {
   1.111 +	di = req->rq_disk->private_data;
   1.112 +
   1.113          if ( !blk_fs_request(req) )
   1.114          {
   1.115              end_request(req, 0);
   1.116              continue;
   1.117          }
   1.118  
   1.119 -        if ( RING_FULL(&blk_ring) )
   1.120 +        if ( RING_FULL(&di->info->ring) )
   1.121              goto wait;
   1.122  
   1.123          DPRINTK("do_blk_req %p: cmd %p, sec %lx, (%u/%li) buffer:%p [%s]\n",
   1.124 @@ -323,7 +334,6 @@ void do_blkif_request(request_queue_t *r
   1.125          if ( blkif_queue_request(req) )
   1.126          {
   1.127          wait:
   1.128 -            di = req->rq_disk->private_data;
   1.129              if ( di->next_waiting == NULL )
   1.130              {
   1.131                  di->next_waiting = head_waiting;
   1.132 @@ -338,7 +348,7 @@ void do_blkif_request(request_queue_t *r
   1.133      }
   1.134  
   1.135      if ( queued != 0 )
   1.136 -        flush_requests();
   1.137 +        flush_requests(di->info);
   1.138  }
   1.139  
   1.140  
   1.141 @@ -347,7 +357,8 @@ static irqreturn_t blkif_int(int irq, vo
   1.142      struct request *req;
   1.143      blkif_response_t *bret;
   1.144      RING_IDX i, rp;
   1.145 -    unsigned long flags; 
   1.146 +    unsigned long flags;
   1.147 +    struct blkfront_info *info = (struct blkfront_info *)dev_id;
   1.148      
   1.149      spin_lock_irqsave(&blkif_io_lock, flags);     
   1.150  
   1.151 @@ -358,14 +369,14 @@ static irqreturn_t blkif_int(int irq, vo
   1.152          return IRQ_HANDLED;
   1.153      }
   1.154      
   1.155 -    rp = blk_ring.sring->rsp_prod;
   1.156 +    rp = info->ring.sring->rsp_prod;
   1.157      rmb(); /* Ensure we see queued responses up to 'rp'. */
   1.158  
   1.159 -    for ( i = blk_ring.rsp_cons; i != rp; i++ )
   1.160 +    for ( i = info->ring.rsp_cons; i != rp; i++ )
   1.161      {
   1.162          unsigned long id;
   1.163  
   1.164 -        bret = RING_GET_RESPONSE(&blk_ring, i);
   1.165 +        bret = RING_GET_RESPONSE(&info->ring, i);
   1.166          id   = bret->id;
   1.167          req  = (struct request *)blk_shadow[id].request;
   1.168  
   1.169 @@ -394,7 +405,7 @@ static irqreturn_t blkif_int(int irq, vo
   1.170          }
   1.171      }
   1.172  
   1.173 -    blk_ring.rsp_cons = i;
   1.174 +    info->ring.rsp_cons = i;
   1.175  
   1.176      kick_pending_request_queues();
   1.177  
   1.178 @@ -426,10 +437,10 @@ static void kick_pending_request_queues(
   1.179  {
   1.180      /* We kick pending request queues if the ring is reasonably empty. */
   1.181      if ( (nr_pending != 0) && 
   1.182 -         (RING_PENDING_REQUESTS(&blk_ring) < (BLK_RING_SIZE >> 1)) )
   1.183 +         (RING_PENDING_REQUESTS(&info->ring) < (BLK_RING_SIZE >> 1)) )
   1.184      {
   1.185          /* Attempt to drain the queue, but bail if the ring becomes full. */
   1.186 -        while ( (nr_pending != 0) && !RING_FULL(&blk_ring) )
   1.187 +        while ( (nr_pending != 0) && !RING_FULL(&info->ring) )
   1.188              do_blkif_request(pending_queues[--nr_pending]);
   1.189      }
   1.190  }
   1.191 @@ -725,8 +736,8 @@ static int blkif_queue_request(unsigned 
   1.192               (sg_dev == device) &&
   1.193               (sg_next_sect == sector_number) )
   1.194          {
   1.195 -            req = RING_GET_REQUEST(&blk_ring, 
   1.196 -                                   blk_ring.req_prod_pvt - 1);
   1.197 +            req = RING_GET_REQUEST(&info->ring, 
   1.198 +                                   info->ring.req_prod_pvt - 1);
   1.199              bh = (struct buffer_head *)id;
   1.200       
   1.201              bh->b_reqnext = (struct buffer_head *)blk_shadow[req->id].request;
   1.202 @@ -738,7 +749,7 @@ static int blkif_queue_request(unsigned 
   1.203  
   1.204              gnttab_grant_foreign_access_ref(
   1.205                          ref,
   1.206 -                        rdomid,
   1.207 +                        info->backend_id,
   1.208                          buffer_ma >> PAGE_SHIFT,
   1.209                          ( operation == BLKIF_OP_WRITE ? 1 : 0 ) );
   1.210  
   1.211 @@ -757,7 +768,7 @@ static int blkif_queue_request(unsigned 
   1.212  
   1.213              return 0;
   1.214          }
   1.215 -        else if ( RING_FULL(&blk_ring) )
   1.216 +        else if ( RING_FULL(&info->ring) )
   1.217          {
   1.218              return 1;
   1.219          }
   1.220 @@ -774,7 +785,7 @@ static int blkif_queue_request(unsigned 
   1.221      }
   1.222  
   1.223      /* Fill out a communications ring structure. */
   1.224 -    req = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt);
   1.225 +    req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
   1.226  
   1.227      xid = GET_ID_FROM_FREELIST();
   1.228      blk_shadow[xid].request = (unsigned long)id;
   1.229 @@ -790,7 +801,7 @@ static int blkif_queue_request(unsigned 
   1.230  
   1.231      gnttab_grant_foreign_access_ref(
   1.232                  ref,
   1.233 -                rdomid,
   1.234 +                info->backend_id,
   1.235                  buffer_ma >> PAGE_SHIFT,
   1.236                  ( operation == BLKIF_OP_WRITE ? 1 : 0 ) );
   1.237  
   1.238 @@ -801,7 +812,7 @@ static int blkif_queue_request(unsigned 
   1.239      /* Keep a private copy so we can reissue requests when recovering. */    
   1.240      pickle_request(&blk_shadow[xid], req);
   1.241  
   1.242 -    blk_ring.req_prod_pvt++;
   1.243 +    info->ring.req_prod_pvt++;
   1.244      
   1.245      return 0;
   1.246  }
   1.247 @@ -903,15 +914,15 @@ static void blkif_int(int irq, void *dev
   1.248          return;
   1.249      }
   1.250  
   1.251 -    rp = blk_ring.sring->rsp_prod;
   1.252 +    rp = info->ring.sring->rsp_prod;
   1.253      rmb(); /* Ensure we see queued responses up to 'rp'. */
   1.254  
   1.255 -    for ( i = blk_ring.rsp_cons; i != rp; i++ )
   1.256 +    for ( i = info->ring.rsp_cons; i != rp; i++ )
   1.257      {
   1.258          unsigned long id;
   1.259          blkif_response_t *bret;
   1.260          
   1.261 -        bret = RING_GET_RESPONSE(&blk_ring, i);
   1.262 +        bret = RING_GET_RESPONSE(&info->ring, i);
   1.263          id = bret->id;
   1.264          bh = (struct buffer_head *)blk_shadow[id].request;
   1.265  
   1.266 @@ -943,7 +954,7 @@ static void blkif_int(int irq, void *dev
   1.267          }
   1.268  
   1.269      }
   1.270 -    blk_ring.rsp_cons = i;
   1.271 +    info->ring.rsp_cons = i;
   1.272      
   1.273      kick_pending_request_queues();
   1.274  
   1.275 @@ -954,7 +965,7 @@ static void blkif_int(int irq, void *dev
   1.276  
   1.277  /*****************************  COMMON CODE  *******************************/
   1.278  
   1.279 -static void blkif_free(void)
   1.280 +static void blkif_free(struct blkfront_info *info)
   1.281  {
   1.282      /* Prevent new requests being issued until we fix things up. */
   1.283      spin_lock_irq(&blkif_io_lock);
   1.284 @@ -962,16 +973,16 @@ static void blkif_free(void)
   1.285      spin_unlock_irq(&blkif_io_lock);
   1.286  
   1.287      /* Free resources associated with old device channel. */
   1.288 -    if ( blk_ring.sring != NULL )
   1.289 +    if ( info->ring.sring != NULL )
   1.290      {
   1.291 -        free_page((unsigned long)blk_ring.sring);
   1.292 -        blk_ring.sring = NULL;
   1.293 +        free_page((unsigned long)info->ring.sring);
   1.294 +        info->ring.sring = NULL;
   1.295      }
   1.296 -    unbind_evtchn_from_irqhandler(blkif_evtchn, NULL);
   1.297 -    blkif_evtchn = 0;
   1.298 +    unbind_evtchn_from_irqhandler(info->evtchn, NULL);
   1.299 +    info->evtchn = 0;
   1.300  }
   1.301  
   1.302 -static void blkif_recover(void)
   1.303 +static void blkif_recover(struct blkfront_info *info)
   1.304  {
   1.305      int i;
   1.306      blkif_request_t *req;
   1.307 @@ -987,7 +998,7 @@ static void blkif_recover(void)
   1.308      memset(&blk_shadow, 0, sizeof(blk_shadow));
   1.309      for ( i = 0; i < BLK_RING_SIZE; i++ )
   1.310          blk_shadow[i].req.id = i+1;
   1.311 -    blk_shadow_free = blk_ring.req_prod_pvt;
   1.312 +    blk_shadow_free = info->ring.req_prod_pvt;
   1.313      blk_shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
   1.314  
   1.315      /* Stage 3: Find pending requests and requeue them. */
   1.316 @@ -999,7 +1010,7 @@ static void blkif_recover(void)
   1.317  
   1.318          /* Grab a request slot and unpickle shadow state into it. */
   1.319          req = RING_GET_REQUEST(
   1.320 -            &blk_ring, blk_ring.req_prod_pvt);
   1.321 +            &info->ring, info->ring.req_prod_pvt);
   1.322          unpickle_request(req, &copy[i]);
   1.323  
   1.324          /* We get a new request id, and must reset the shadow state. */
   1.325 @@ -1012,7 +1023,7 @@ static void blkif_recover(void)
   1.326              if ( req->frame_and_sects[j] & GRANTREF_INVALID )
   1.327                  gnttab_grant_foreign_access_ref(
   1.328                      blkif_gref_from_fas(req->frame_and_sects[j]),
   1.329 -                    rdomid,
   1.330 +                    info->backend_id,
   1.331                      blk_shadow[req->id].frame[j],
   1.332                      rq_data_dir((struct request *)
   1.333                                  blk_shadow[req->id].request));
   1.334 @@ -1020,32 +1031,31 @@ static void blkif_recover(void)
   1.335          }
   1.336          blk_shadow[req->id].req = *req;
   1.337  
   1.338 -        blk_ring.req_prod_pvt++;
   1.339 +        info->ring.req_prod_pvt++;
   1.340      }
   1.341  
   1.342      kfree(copy);
   1.343  
   1.344      recovery = 0;
   1.345  
   1.346 -    /* blk_ring->req_prod will be set when we flush_requests().*/
   1.347 +    /* info->ring->req_prod will be set when we flush_requests().*/
   1.348      wmb();
   1.349  
   1.350      /* Kicks things back into life. */
   1.351 -    flush_requests();
   1.352 +    flush_requests(info);
   1.353  
   1.354      /* Now safe to left other people use the interface. */
   1.355      blkif_state = BLKIF_STATE_CONNECTED;
   1.356  }
   1.357  
   1.358 -static void blkif_connect(u16 evtchn, domid_t domid)
   1.359 +static void blkif_connect(struct blkfront_info *info, u16 evtchn)
   1.360  {
   1.361      int err = 0;
   1.362  
   1.363 -    blkif_evtchn = evtchn;
   1.364 -    rdomid       = domid;
   1.365 +    info->evtchn = evtchn;
   1.366  
   1.367      err = bind_evtchn_to_irqhandler(
   1.368 -        blkif_evtchn, blkif_int, SA_SAMPLE_RANDOM, "blkif", NULL);
   1.369 +        info->evtchn, blkif_int, SA_SAMPLE_RANDOM, "blkif", info);
   1.370      if ( err != 0 )
   1.371      {
   1.372          WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
   1.373 @@ -1059,17 +1069,6 @@ static struct xenbus_device_id blkfront_
   1.374  	{ "" }
   1.375  };
   1.376  
   1.377 -struct blkfront_info
   1.378 -{
   1.379 -	/* We watch the backend */
   1.380 -	struct xenbus_watch watch;
   1.381 -	int vdevice;
   1.382 -	u16 handle;
   1.383 -	int connected;
   1.384 -	struct xenbus_device *dev;
   1.385 -	char *backend;
   1.386 -};
   1.387 -
   1.388  static void watch_for_status(struct xenbus_watch *watch, const char *node)
   1.389  {
   1.390  	struct blkfront_info *info;
   1.391 @@ -1094,7 +1093,7 @@ static void watch_for_status(struct xenb
   1.392  		return;
   1.393  	}
   1.394  
   1.395 -	xlvbd_add(sectors, info->vdevice, info->handle, binfo, sector_size);
   1.396 +	xlvbd_add(sectors, info->vdevice, info->handle, binfo, sector_size, info);
   1.397  	info->connected = 1;
   1.398  
   1.399  	/* First to connect?  blkif is now connected. */
   1.400 @@ -1109,7 +1108,7 @@ static void watch_for_status(struct xenb
   1.401  	spin_unlock_irq(&blkif_io_lock);
   1.402  }
   1.403  
   1.404 -static int setup_blkring(struct xenbus_device *dev, unsigned int backend_id)
   1.405 +static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info)
   1.406  {
   1.407  	blkif_sring_t *sring;
   1.408  	evtchn_op_t op = { .cmd = EVTCHNOP_alloc_unbound };
   1.409 @@ -1121,25 +1120,25 @@ static int setup_blkring(struct xenbus_d
   1.410  		return -ENOMEM;
   1.411  	}
   1.412  	SHARED_RING_INIT(sring);
   1.413 -	FRONT_RING_INIT(&blk_ring, sring, PAGE_SIZE);
   1.414 +	FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
   1.415  
   1.416 -	shmem_ref = gnttab_claim_grant_reference(&gref_head,
   1.417 -						 gref_terminal);
   1.418 -	ASSERT(shmem_ref != -ENOSPC);
   1.419 -	gnttab_grant_foreign_access_ref(shmem_ref,
   1.420 -					backend_id,
   1.421 -					virt_to_mfn(blk_ring.sring),
   1.422 +	info->grant_id = gnttab_claim_grant_reference(&gref_head,
   1.423 +						      gref_terminal);
   1.424 +	ASSERT(info->grant_id != -ENOSPC);
   1.425 +	gnttab_grant_foreign_access_ref(info->grant_id,
   1.426 +					info->backend_id,
   1.427 +					virt_to_mfn(info->ring.sring),
   1.428  					0);
   1.429  
   1.430 -	op.u.alloc_unbound.dom = backend_id;
   1.431 +	op.u.alloc_unbound.dom = info->backend_id;
   1.432  	err = HYPERVISOR_event_channel_op(&op);
   1.433  	if (err) {
   1.434 -		free_page((unsigned long)blk_ring.sring);
   1.435 -		blk_ring.sring = 0;
   1.436 +		free_page((unsigned long)info->ring.sring);
   1.437 +		info->ring.sring = 0;
   1.438  		xenbus_dev_error(dev, err, "allocating event channel");
   1.439  		return err;
   1.440  	}
   1.441 -	blkif_connect(op.u.alloc_unbound.port, backend_id);
   1.442 +	blkif_connect(info, op.u.alloc_unbound.port);
   1.443  	return 0;
   1.444  }
   1.445  
   1.446 @@ -1149,11 +1148,11 @@ static int talk_to_backend(struct xenbus
   1.447  {
   1.448  	char *backend;
   1.449  	const char *message;
   1.450 -	int err, backend_id;
   1.451 +	int err;
   1.452  
   1.453  	backend = NULL;
   1.454  	err = xenbus_gather(dev->nodename,
   1.455 -			    "backend-id", "%i", &backend_id,
   1.456 +			    "backend-id", "%i", &info->backend_id,
   1.457  			    "backend", NULL, &backend,
   1.458  			    NULL);
   1.459  	if (XENBUS_EXIST_ERR(err))
   1.460 @@ -1168,12 +1167,10 @@ static int talk_to_backend(struct xenbus
   1.461  		goto out;
   1.462  	}
   1.463  
   1.464 -	/* First device?  We create shared ring, alloc event channel. */
   1.465 -	if (blkif_vbds == 0) {
   1.466 -		err = setup_blkring(dev, backend_id);
   1.467 -		if (err)
   1.468 -			goto out;
   1.469 -	}
   1.470 +	/* Create shared ring, alloc event channel. */
   1.471 +	err = setup_blkring(dev, info);
   1.472 +	if (err)
   1.473 +		goto out;
   1.474  
   1.475  	err = xenbus_transaction_start(dev->nodename);
   1.476  	if (err) {
   1.477 @@ -1181,13 +1178,13 @@ static int talk_to_backend(struct xenbus
   1.478  		goto destroy_blkring;
   1.479  	}
   1.480  
   1.481 -	err = xenbus_printf(dev->nodename, "grant-id","%u", shmem_ref);
   1.482 +	err = xenbus_printf(dev->nodename, "grant-id","%u", info->grant_id);
   1.483  	if (err) {
   1.484  		message = "writing grant-id";
   1.485  		goto abort_transaction;
   1.486  	}
   1.487  	err = xenbus_printf(dev->nodename,
   1.488 -			    "event-channel", "%u", blkif_evtchn);
   1.489 +			    "event-channel", "%u", info->evtchn);
   1.490  	if (err) {
   1.491  		message = "writing event-channel";
   1.492  		goto abort_transaction;
   1.493 @@ -1220,8 +1217,7 @@ static int talk_to_backend(struct xenbus
   1.494  	/* Have to do this *outside* transaction.  */
   1.495  	xenbus_dev_error(dev, err, "%s", message);
   1.496   destroy_blkring:
   1.497 -	if (blkif_vbds == 0)
   1.498 -		blkif_free();
   1.499 +	blkif_free(info);
   1.500  	goto out;
   1.501  }
   1.502  
   1.503 @@ -1266,7 +1262,6 @@ static int blkfront_probe(struct xenbus_
   1.504  
   1.505  	/* Call once in case entries already there. */
   1.506  	watch_for_status(&info->watch, info->watch.node);
   1.507 -	blkif_vbds++;
   1.508  	return 0;
   1.509  }
   1.510  
   1.511 @@ -1281,12 +1276,12 @@ static int blkfront_remove(struct xenbus
   1.512  		xlvbd_del(info->handle);
   1.513  		blkif_vbds_connected--;
   1.514  	}
   1.515 +
   1.516 +	blkif_free(info);
   1.517 +
   1.518  	kfree(info->backend);
   1.519  	kfree(info);
   1.520  
   1.521 -	if (--blkif_vbds == 0)
   1.522 -		blkif_free();
   1.523 -
   1.524  	return 0;
   1.525  }
   1.526  
   1.527 @@ -1298,10 +1293,8 @@ static int blkfront_suspend(struct xenbu
   1.528  	kfree(info->backend);
   1.529  	info->backend = NULL;
   1.530  
   1.531 -	if (--blkif_vbds == 0) {
   1.532 -		recovery = 1;
   1.533 -		blkif_free();
   1.534 -	}
   1.535 +	recovery = 1;
   1.536 +	blkif_free(info);
   1.537  
   1.538  	return 0;
   1.539  }
   1.540 @@ -1314,8 +1307,7 @@ static int blkfront_resume(struct xenbus
   1.541  	/* FIXME: Check geometry hasn't changed here... */
   1.542  	err = talk_to_backend(dev, info);
   1.543  	if (!err) {
   1.544 -		if (blkif_vbds++ == 0)
   1.545 -			blkif_recover();
   1.546 +		blkif_recover(info);
   1.547  	}
   1.548  	return err;
   1.549  }
   1.550 @@ -1363,15 +1355,15 @@ static int __init xlblk_init(void)
   1.551  {
   1.552      int i;
   1.553  
   1.554 +    if ( (xen_start_info.flags & SIF_INITDOMAIN) ||
   1.555 +         (xen_start_info.flags & SIF_BLK_BE_DOMAIN) )
   1.556 +        return 0;
   1.557 +
   1.558      /* A grant for every ring slot, plus one for the ring itself. */
   1.559      if (gnttab_alloc_grant_references(MAXIMUM_OUTSTANDING_BLOCK_REQS + 1,
   1.560  				      &gref_head, &gref_terminal) < 0)
   1.561          return 1;
   1.562  
   1.563 -    if ( (xen_start_info.flags & SIF_INITDOMAIN) ||
   1.564 -         (xen_start_info.flags & SIF_BLK_BE_DOMAIN) )
   1.565 -        return 0;
   1.566 -
   1.567      IPRINTK("Initialising virtual block device driver\n");
   1.568  
   1.569      blk_shadow_free = 0;
     2.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h	Mon Aug 22 10:21:18 2005 +0000
     2.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h	Mon Aug 22 14:22:59 2005 +0000
     2.3 @@ -79,6 +79,8 @@
     2.4  #define DPRINTK_IOCTL(_f, _a...) ((void)0)
     2.5  #endif
     2.6  
     2.7 +struct blkfront_info;
     2.8 +
     2.9  struct xlbd_type_info {
    2.10      int partn_shift;
    2.11      int disks_per_major;
    2.12 @@ -106,6 +108,7 @@ struct xlbd_disk_info {
    2.13      struct xlbd_disk_info  *next_waiting;
    2.14      request_queue_t        *rq;
    2.15  #endif
    2.16 +    struct blkfront_info *info;
    2.17  };
    2.18  
    2.19  typedef struct xen_block {
    2.20 @@ -124,6 +127,6 @@ extern void do_blkif_request (request_qu
    2.21  
    2.22  /* Virtual block-device subsystem. */
    2.23  int xlvbd_add(blkif_sector_t capacity, int device, blkif_vdev_t handle,
    2.24 -	      u16 info, u16 sector_size);
    2.25 +	      u16 vdisk_info, u16 sector_size, struct blkfront_info *info);
    2.26  void xlvbd_del(blkif_vdev_t handle);
    2.27  #endif /* __XEN_DRIVERS_BLOCK_H__ */
     3.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c	Mon Aug 22 10:21:18 2005 +0000
     3.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c	Mon Aug 22 14:22:59 2005 +0000
     3.3 @@ -222,7 +222,8 @@ static int xlvbd_init_blk_queue(struct g
     3.4  
     3.5  static struct gendisk *xlvbd_alloc_gendisk(
     3.6      struct xlbd_major_info *mi, int minor, blkif_sector_t capacity,
     3.7 -    int device, blkif_vdev_t handle, u16 info, u16 sector_size)
     3.8 +    int device, blkif_vdev_t handle, u16 vdisk_info, u16 sector_size,
     3.9 +    struct blkfront_info *info)
    3.10  {
    3.11      struct gendisk *gd;
    3.12      struct xlbd_disk_info *di;
    3.13 @@ -235,6 +236,7 @@ static struct gendisk *xlvbd_alloc_gendi
    3.14      di->mi = mi;
    3.15      di->xd_device = device;
    3.16      di->handle = handle;
    3.17 +    di->info = info;
    3.18  
    3.19      if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0)
    3.20          nr_minors = 1 << mi->type->partn_shift;
    3.21 @@ -266,13 +268,13 @@ static struct gendisk *xlvbd_alloc_gendi
    3.22  
    3.23      di->rq = gd->queue;
    3.24  
    3.25 -    if (info & VDISK_READONLY)
    3.26 +    if (vdisk_info & VDISK_READONLY)
    3.27          set_disk_ro(gd, 1);
    3.28  
    3.29 -    if (info & VDISK_REMOVABLE)
    3.30 +    if (vdisk_info & VDISK_REMOVABLE)
    3.31          gd->flags |= GENHD_FL_REMOVABLE;
    3.32  
    3.33 -    if (info & VDISK_CDROM)
    3.34 +    if (vdisk_info & VDISK_CDROM)
    3.35          gd->flags |= GENHD_FL_CD;
    3.36  
    3.37      add_disk(gd);
    3.38 @@ -285,7 +287,7 @@ out:
    3.39  }
    3.40  
    3.41  int xlvbd_add(blkif_sector_t capacity, int device, blkif_vdev_t handle,
    3.42 -	      u16 info, u16 sector_size)
    3.43 +	      u16 vdisk_info, u16 sector_size, struct blkfront_info *info)
    3.44  {
    3.45      struct lvdisk *new;
    3.46      struct block_device *bd;
    3.47 @@ -300,7 +302,7 @@ int xlvbd_add(blkif_sector_t capacity, i
    3.48      if (new == NULL)
    3.49          return -ENOMEM;
    3.50      new->capacity = capacity;
    3.51 -    new->info = info;
    3.52 +    new->info = vdisk_info;
    3.53      new->handle = handle;
    3.54      new->dev = MKDEV(MAJOR_XEN(device), MINOR_XEN(device));
    3.55  
    3.56 @@ -309,7 +311,7 @@ int xlvbd_add(blkif_sector_t capacity, i
    3.57          goto out;
    3.58      
    3.59      gd = xlvbd_alloc_gendisk(mi, MINOR_XEN(device), capacity, device, handle,
    3.60 -			     info, sector_size);
    3.61 +			     vdisk_info, sector_size, info);
    3.62      if (gd == NULL)
    3.63          goto out_bd;
    3.64