ia64/xen-unstable

changeset 4725:23682e5da945

bitkeeper revision 1.1389.1.22 (4275e5e1eelF9eh7n92D3W-1Ju8Fzg)

[PATCH] [PATCH] make XenFreeBSD VBD ring full handling sensible

# This is a BitKeeper generated diff -Nru style patch.
#
# ChangeSet
# 2005/05/01 23:48:15-07:00 kmacy@curly.lab.netapp.com
# make ring full handling sensible
# Signed-off-by: Kip Macy <kmacy@netapp.com>
#
# freebsd-5.3-xen-sparse/i386-xen/xen/blkfront/xb_blkfront.c
# 2005/05/01 23:48:12-07:00 kmacy@curly.lab.netapp.com +48 -5
# make ring full handling sensible
#
author kmacy@netapp.com[kaf24]
date Mon May 02 08:33:37 2005 +0000 (2005-05-02)
parents e8c18dee0bb6
children bc5f75a442ab
files freebsd-5.3-xen-sparse/i386-xen/xen/blkfront/xb_blkfront.c
line diff
     1.1 --- a/freebsd-5.3-xen-sparse/i386-xen/xen/blkfront/xb_blkfront.c	Mon May 02 08:33:31 2005 +0000
     1.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/xen/blkfront/xb_blkfront.c	Mon May 02 08:33:37 2005 +0000
     1.3 @@ -68,6 +68,7 @@ struct xb_softc {
     1.4      void		 *xb_resp_handler;
     1.5      int			  xb_unit;
     1.6      int			  xb_flags;
     1.7 +    struct xb_softc      *xb_next_blocked;
     1.8  #define XB_OPEN	(1<<0)		/* drive is open (can't shut down) */
     1.9  };
    1.10  
    1.11 @@ -118,6 +119,9 @@ static grant_ref_t gref_head, gref_termi
    1.12      (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLKIF_RING_SIZE)
    1.13  #endif
    1.14  
    1.15 +static struct xb_softc *xb_kick_pending_head = NULL;
    1.16 +static struct xb_softc *xb_kick_pending_tail = NULL;
    1.17 +static struct mtx blkif_io_block_lock;
    1.18  
    1.19  static unsigned long rec_ring_free;		
    1.20  blkif_request_t rec_ring[BLK_RING_SIZE];
    1.21 @@ -246,6 +250,7 @@ xb_response_intr(void *xsc)
    1.22      /* sometimes we seem to lose i/o.  stay in the interrupt handler while
    1.23       * there is stuff to process: continually recheck the response producer.
    1.24       */
    1.25 + process_rcvd:
    1.26      for ( i = blk_ring.rsp_cons; i != (rp = blk_ring.sring->rsp_prod); i++ ) {
    1.27  	unsigned long id;
    1.28          bret = RING_GET_RESPONSE(&blk_ring, i);
    1.29 @@ -298,9 +303,28 @@ xb_response_intr(void *xsc)
    1.30      
    1.31      blk_ring.rsp_cons = i;
    1.32  
    1.33 -    if (sc && xb_kick_pending) {
    1.34 -    	xb_kick_pending = FALSE;
    1.35 -	xb_startio(sc);
    1.36 +    if (xb_kick_pending) {
    1.37 +	unsigned long flags;
    1.38 +	mtx_lock_irqsave(&blkif_io_block_lock, flags);
    1.39 +   	xb_kick_pending = FALSE;
    1.40 +	/* Run as long as there are blocked devs or queue fills again */
    1.41 +	while ((NULL != xb_kick_pending_head) && (FALSE == xb_kick_pending)) {
    1.42 +	    struct xb_softc *xb_cur = xb_kick_pending_head;
    1.43 +	    xb_kick_pending_head = xb_cur->xb_next_blocked;
    1.44 +	    if(NULL == xb_kick_pending_head) {
    1.45 +		xb_kick_pending_tail = NULL;
    1.46 +	    }
    1.47 +	    xb_cur->xb_next_blocked = NULL;
    1.48 +	    mtx_unlock_irqrestore(&blkif_io_block_lock, flags);
    1.49 +	    xb_startio(xb_cur);
    1.50 +	    mtx_lock_irqsave(&blkif_io_block_lock, flags);
    1.51 +	}
    1.52 +	mtx_unlock_irqrestore(&blkif_io_block_lock, flags);
    1.53 +
    1.54 +	if(blk_ring.rsp_cons != blk_ring.sring->rsp_prod) {
    1.55 +	    /* Consume those, too */
    1.56 +	    goto process_rcvd;
    1.57 +	}
    1.58      }
    1.59  
    1.60      mtx_unlock_irqrestore(&blkif_io_lock, flags);
    1.61 @@ -448,8 +472,22 @@ xb_startio(struct xb_softc *sc)
    1.62  
    1.63      }
    1.64  
    1.65 -    if (RING_FULL(&blk_ring))
    1.66 +    if (RING_FULL(&blk_ring)) {
    1.67 +	unsigned long flags;
    1.68 +	mtx_lock_irqsave(&blkif_io_block_lock, flags);
    1.69  	xb_kick_pending = TRUE;
    1.70 +        /* If we are not already on blocked list, add us */
    1.71 +        if((NULL == sc->xb_next_blocked) && (xb_kick_pending_tail != sc)) {
    1.72 +
    1.73 +            if(NULL == xb_kick_pending_head) {
    1.74 +                xb_kick_pending_head = xb_kick_pending_tail = sc;
    1.75 +            } else {
    1.76 +                xb_kick_pending_tail->xb_next_blocked = sc;
    1.77 +                xb_kick_pending_tail = sc;
    1.78 +            }
    1.79 +        }
    1.80 +        mtx_unlock_irqrestore(&blkif_io_block_lock, flags);
    1.81 +    }
    1.82      
    1.83      if (queued != 0) 
    1.84  	flush_requests();
    1.85 @@ -501,6 +539,7 @@ xb_create(int unit)
    1.86      
    1.87      sc = (struct xb_softc *)malloc(sizeof(*sc), M_DEVBUF, M_WAITOK);
    1.88      sc->xb_unit = unit;
    1.89 +    sc->xb_next_blocked = NULL;
    1.90  
    1.91      memset(&sc->xb_disk, 0, sizeof(sc->xb_disk)); 
    1.92      sc->xb_disk.d_unit = unit;
    1.93 @@ -947,7 +986,10 @@ xb_init(void *unused)
    1.94          return;
    1.95      printk("Blkif frontend is using grant tables.\n");
    1.96  #endif
    1.97 -
    1.98 + 
    1.99 +    xb_kick_pending = FALSE;
   1.100 +    xb_kick_pending_head = NULL;
   1.101 +    xb_kick_pending_tail = NULL;
   1.102  
   1.103      rec_ring_free = 0;
   1.104      for (i = 0; i < BLK_RING_SIZE; i++) {
   1.105 @@ -1002,4 +1044,5 @@ blkif_completion(blkif_request_t *req)
   1.106  #endif    
   1.107  }
   1.108  MTX_SYSINIT(ioreq, &blkif_io_lock, "BIO LOCK", MTX_SPIN | MTX_NOWITNESS); /* XXX how does one enroll a lock? */
   1.109 + MTX_SYSINIT(ioreq_block, &blkif_io_block_lock, "BIO BLOCK LOCK", MTX_SPIN | MTX_NOWITNESS);
   1.110  SYSINIT(xbdev, SI_SUB_PSEUDO, SI_ORDER_ANY, xb_init, NULL)