ia64/xen-unstable

changeset 268:6501c2dbec48

bitkeeper revision 1.114 (3e68e46cO4SttqIQ-m6wF2vozDSi1Q)

xl_block.c:
Fix bug in XenoLinux handling of request queues when the comms ring gets full.
author kaf24@labyrinth.cl.cam.ac.uk
date Fri Mar 07 18:26:52 2003 +0000 (2003-03-07)
parents 893aee1b628b
children f51eab080fa1
files xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_block.c
line diff
     1.1 --- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_block.c	Fri Mar 07 17:01:28 2003 +0000
     1.2 +++ b/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_block.c	Fri Mar 07 18:26:52 2003 +0000
     1.3 @@ -22,6 +22,8 @@ static unsigned int resp_cons; /* Respon
     1.4  static xen_disk_info_t xlblk_disk_info;
     1.5  static int xlblk_control_msg_pending;
     1.6  
     1.7 +#define RING_FULL (BLK_RING_INC(blk_ring->req_prod) == resp_cons)
     1.8 +
     1.9  /*
    1.10   * Request queues with outstanding work, but ring is currently full.
    1.11   * We need no special lock here, as we always access this with the
    1.12 @@ -273,8 +275,7 @@ static int hypervisor_request(void *    
    1.13       * because we have a whole bunch of outstanding responses to process. No 
    1.14       * matter, as the response handler will kick the request queue.
    1.15       */
    1.16 -    if ( BLK_RING_INC(blk_ring->req_prod) == resp_cons )
    1.17 -        return 1;
    1.18 +    if ( RING_FULL ) return 1;
    1.19  
    1.20      buffer_ma = (void *)phys_to_machine(virt_to_phys(buffer)); 
    1.21  
    1.22 @@ -431,8 +432,12 @@ static void xlblk_response_int(int irq, 
    1.23           (((blk_ring->req_prod - resp_cons) & (BLK_RING_SIZE - 1)) < 
    1.24            (BLK_RING_SIZE >> 1)) )
    1.25      {
    1.26 -        do { do_xlblk_request(pending_queues[--nr_pending]); }
    1.27 -        while ( nr_pending != 0 );
    1.28 +        /* Attempt to drain the queue, but bail if the ring becomes full. */
    1.29 +        while ( nr_pending != 0 )
    1.30 +        {
    1.31 +            do_xlblk_request(pending_queues[--nr_pending]);
    1.32 +            if ( RING_FULL ) break;
    1.33 +        }
    1.34      }
    1.35  
    1.36      spin_unlock_irqrestore(&io_request_lock, flags);