direct-io.hg

changeset 1437:c061db2d4eb5

bitkeeper revision 1.934.2.1 (40c59df3I0NyFmedTB-YFOdH5U6utQ)

main.c:
Block device restartability.
author mwilli2@equilibrium.research.intel-research.net
date Tue Jun 08 11:07:31 2004 +0000 (2004-06-08)
parents 24ad109055b6
children 88484dcdafbf
files xenolinux-2.4.26-sparse/arch/xen/drivers/blkif/frontend/main.c
line diff
     1.1 --- a/xenolinux-2.4.26-sparse/arch/xen/drivers/blkif/frontend/main.c	Thu Jun 03 16:57:49 2004 +0000
     1.2 +++ b/xenolinux-2.4.26-sparse/arch/xen/drivers/blkif/frontend/main.c	Tue Jun 08 11:07:31 2004 +0000
     1.3 @@ -16,6 +16,8 @@
     1.4  #include <scsi/scsi.h>
     1.5  #include <asm/ctrl_if.h>
     1.6  
     1.7 +
     1.8 +
     1.9  typedef unsigned char byte; /* from linux/ide.h */
    1.10  
    1.11  #define BLKIF_STATE_CLOSED       0
    1.12 @@ -31,6 +33,15 @@ static blkif_ring_t *blk_ring;
    1.13  static BLK_RING_IDX resp_cons; /* Response consumer for comms ring. */
    1.14  static BLK_RING_IDX req_prod;  /* Private request producer.         */
    1.15  
    1.16 +
    1.17 +static blkif_ring_t *blk_ring_rec; /* Private copy of requests, used for
    1.18 +                                    * recovery.  Responses not stored here. */
    1.19 +static BLK_RING_IDX resp_cons_rec; /* Copy of response consumer, used for
    1.20 +                                    * recovery */
    1.21 +static int recovery = 0;           /* "Recovery in progress" flag.  Protected
    1.22 +                                    * by the io_request_lock */
    1.23 +
    1.24 +
    1.25  /* We plug the I/O ring if the driver is suspended or if the ring is full. */
    1.26  #define RING_PLUGGED (((req_prod - resp_cons) == BLK_RING_SIZE) || \
    1.27                        (blkif_state != BLKIF_STATE_CONNECTED))
    1.28 @@ -352,6 +363,11 @@ static int blkif_queue_request(unsigned 
    1.29                  sg_next_sect += nr_sectors;
    1.30              else
    1.31                  DISABLE_SCATTERGATHER();
    1.32 +
    1.33 +            /* Update the copy of the request in the recovery ring. */
    1.34 +            blk_ring_rec->ring[MASK_BLK_IDX(blk_ring_rec->req_prod - 1)].req
    1.35 +                = *req;
    1.36 +
    1.37              return 0;
    1.38          }
    1.39          else if ( RING_PLUGGED )
    1.40 @@ -380,6 +396,10 @@ static int blkif_queue_request(unsigned 
    1.41      req->frame_and_sects[0] = buffer_ma | (fsect<<3) | lsect;
    1.42      req_prod++;
    1.43  
    1.44 +    /* Keep a private copy so we can reissue requests when recovering. */
    1.45 +    blk_ring_rec->ring[MASK_BLK_IDX(blk_ring_rec->req_prod)].req = *req;
    1.46 +    blk_ring_rec->req_prod++;
    1.47 +
    1.48      return 0;
    1.49  }
    1.50  
    1.51 @@ -485,10 +505,17 @@ static void blkif_int(int irq, void *dev
    1.52      unsigned long flags; 
    1.53      struct buffer_head *bh, *next_bh;
    1.54      
    1.55 -    if ( unlikely(blkif_state == BLKIF_STATE_CLOSED) )
    1.56 +//    printk(KERN_ALERT "blkif_int\n");
    1.57 +
    1.58 +    spin_lock_irqsave(&io_request_lock, flags);     
    1.59 +
    1.60 +    if ( unlikely(blkif_state == BLKIF_STATE_CLOSED || recovery) )
    1.61 +    {
    1.62 +        printk("Bailed out\n");
    1.63 +        
    1.64 +        spin_unlock_irqrestore(&io_request_lock, flags);
    1.65          return;
    1.66 -    
    1.67 -    spin_lock_irqsave(&io_request_lock, flags);     
    1.68 +    }
    1.69  
    1.70      for ( i = resp_cons; i != blk_ring->resp_prod; i++ )
    1.71      {
    1.72 @@ -519,6 +546,7 @@ static void blkif_int(int irq, void *dev
    1.73      }
    1.74      
    1.75      resp_cons = i;
    1.76 +    resp_cons_rec = i;
    1.77  
    1.78      kick_pending_request_queues();
    1.79  
    1.80 @@ -546,6 +574,8 @@ void blkif_control_send(blkif_request_t 
    1.81  
    1.82      DISABLE_SCATTERGATHER();
    1.83      memcpy(&blk_ring->ring[MASK_BLK_IDX(req_prod)].req, req, sizeof(*req));
    1.84 +    memcpy(&blk_ring_rec->ring[MASK_BLK_IDX(blk_ring_rec->req_prod++)].req,
    1.85 +           req, sizeof(*req));
    1.86      req_prod++;
    1.87      flush_requests();
    1.88  
    1.89 @@ -586,7 +616,19 @@ static void blkif_status_change(blkif_fe
    1.90          {
    1.91              printk(KERN_WARNING "Unexpected blkif-DISCONNECTED message"
    1.92                     " in state %d\n", blkif_state);
    1.93 -            break;
    1.94 +
    1.95 +            printk(KERN_INFO "VBD driver recovery in progress\n");
    1.96 +            
    1.97 +            /* Prevent new requests being issued until we've fixed things up. */
    1.98 +            spin_lock_irq(&io_request_lock);
    1.99 +            recovery = 1;
   1.100 +            blkif_state = BLKIF_STATE_DISCONNECTED;
   1.101 +            spin_unlock_irq(&io_request_lock);
   1.102 +
   1.103 +            /* Free resources associated with old device channel. */
   1.104 +            free_page((unsigned long)blk_ring);
   1.105 +            free_irq(blkif_irq, NULL);
   1.106 +            unbind_evtchn_from_irq(blkif_evtchn);
   1.107          }
   1.108  
   1.109          /* Move from CLOSED to DISCONNECTED state. */
   1.110 @@ -617,16 +659,55 @@ static void blkif_status_change(blkif_fe
   1.111          blkif_evtchn = status->evtchn;
   1.112          blkif_irq = bind_evtchn_to_irq(blkif_evtchn);
   1.113          (void)request_irq(blkif_irq, blkif_int, 0, "blkif", NULL);
   1.114 -        
   1.115 -        /* Probe for discs that are attached to the interface. */
   1.116 -        xlvbd_init();
   1.117 -        
   1.118 +
   1.119 +        if ( recovery )
   1.120 +        {
   1.121 +            int i;
   1.122 +
   1.123 +	    /* Shouldn't need the io_request_lock here - the device is
   1.124 +	     * plugged and the recovery flag prevents the interrupt handler
   1.125 +	     * changing anything. */
   1.126 +
   1.127 +            /* Reissue requests from the private block ring. */
   1.128 +            for ( i = 0;
   1.129 +		  resp_cons_rec < blk_ring_rec->req_prod;
   1.130 +                  resp_cons_rec++, i++ )
   1.131 +            {
   1.132 +                blk_ring->ring[i].req
   1.133 +                    = blk_ring_rec->ring[MASK_BLK_IDX(resp_cons_rec)].req;
   1.134 +            }
   1.135 +
   1.136 +            /* Reset the private block ring to match the new ring. */
   1.137 +            memcpy(blk_ring_rec, blk_ring, sizeof(*blk_ring));
   1.138 +            resp_cons_rec = 0;
   1.139 +
   1.140 +            /* blk_ring->req_prod will be set when we flush_requests().*/
   1.141 +            blk_ring_rec->req_prod = req_prod = i;
   1.142 +
   1.143 +            wmb();
   1.144 +
   1.145 +            /* Switch off recovery mode, using a memory barrier to ensure that
   1.146 +             * it's seen before we flush requests - we don't want to miss any
   1.147 +             * interrupts. */
   1.148 +            recovery = 0;
   1.149 +            wmb();
   1.150 +
   1.151 +            /* Kicks things back into life. */
   1.152 +            flush_requests();
   1.153 +        }
   1.154 +        else
   1.155 +        {
   1.156 +            /* Probe for discs that are attached to the interface. */
   1.157 +            xlvbd_init();
   1.158 +        }
   1.159 +
   1.160          blkif_state = BLKIF_STATE_CONNECTED;
   1.161          
   1.162          /* Kick pending requests. */
   1.163          spin_lock_irq(&io_request_lock);
   1.164          kick_pending_request_queues();
   1.165          spin_unlock_irq(&io_request_lock);
   1.166 +
   1.167          break;
   1.168  
   1.169      default:
   1.170 @@ -671,9 +752,15 @@ int __init xlblk_init(void)
   1.171      ctrl_msg_t                       cmsg;
   1.172      blkif_fe_driver_status_changed_t st;
   1.173  
   1.174 -    if ( start_info.flags & SIF_INITDOMAIN )
   1.175 +    if ( (start_info.flags & SIF_INITDOMAIN) 
   1.176 +        || (start_info.flags & SIF_BLK_BE_DOMAIN) )
   1.177          return 0;
   1.178  
   1.179 +    printk(KERN_INFO "Initialising Xen virtual block device\n");
   1.180 +
   1.181 +    blk_ring_rec = (blkif_ring_t *)__get_free_page(GFP_KERNEL);
   1.182 +    memset(blk_ring_rec, 0, sizeof(*blk_ring_rec));
   1.183 +
   1.184      (void)ctrl_if_register_receiver(CMSG_BLKIF_FE, blkif_ctrlif_rx,
   1.185                                      CALLBACK_IN_BLOCKING_CONTEXT);
   1.186