ia64/xen-unstable

changeset 2259:b4498a444e43

bitkeeper revision 1.1159.35.1 (41222da4iDbEv0TvZWP90i6L3s4Z5Q)

small 2.6 blkfront / netfront fixes
author iap10@labyrinth.cl.cam.ac.uk
date Tue Aug 17 16:09:08 2004 +0000 (2004-08-17)
parents 1ec35141a882
children a7d13ecf88c5
files linux-2.6.7-xen-sparse/drivers/xen/blkfront/blkfront.c linux-2.6.7-xen-sparse/drivers/xen/netfront/netfront.c
line diff
     1.1 --- a/linux-2.6.7-xen-sparse/drivers/xen/blkfront/blkfront.c	Mon Aug 16 16:55:39 2004 +0000
     1.2 +++ b/linux-2.6.7-xen-sparse/drivers/xen/blkfront/blkfront.c	Tue Aug 17 16:09:08 2004 +0000
     1.3 @@ -49,15 +49,6 @@ static int recovery = 0;           /* "R
     1.4  #define BLKIF_RING_FULL (((req_prod - resp_cons) == BLKIF_RING_SIZE) || \
     1.5                           (blkif_state != BLKIF_STATE_CONNECTED))
     1.6  
     1.7 -/*
     1.8 - * Request queues with outstanding work, but ring is currently full.
     1.9 - * We need no special lock here, as we always access this with the
    1.10 - * blkif_io_lock held. We only need a small maximum list.
    1.11 - */
    1.12 -#define MAX_PENDING 8
    1.13 -static request_queue_t *pending_queues[MAX_PENDING];
    1.14 -static int nr_pending;
    1.15 -
    1.16  static inline void translate_req_to_mfn(blkif_request_t *xreq,
    1.17                                          blkif_request_t *req);
    1.18  
    1.19 @@ -99,6 +90,23 @@ static inline void ADD_ID_TO_FREELIST( u
    1.20  
    1.21  __initcall(xlblk_init);
    1.22  
    1.23 +
    1.24 +static void kick_pending_request_queues(void)
    1.25 +{
    1.26 +
    1.27 +    if ( (xlbd_blk_queue != NULL) &&
    1.28 +         test_bit(QUEUE_FLAG_STOPPED, &xlbd_blk_queue->queue_flags) )
    1.29 +    {
    1.30 +        blk_start_queue(xlbd_blk_queue);
    1.31 +        /* XXXcl call to request_fn should not be needed but
    1.32 +         * we get stuck without...  needs investigating
    1.33 +         */
    1.34 +        xlbd_blk_queue->request_fn(xlbd_blk_queue);
    1.35 +    }
    1.36 +
    1.37 +}
    1.38 +
    1.39 +
    1.40  int blkif_open(struct inode *inode, struct file *filep)
    1.41  {
    1.42      struct gendisk *gd = inode->i_bdev->bd_disk;
    1.43 @@ -244,7 +252,7 @@ static int blkif_queue_request(struct re
    1.44      id = GET_ID_FROM_FREELIST();
    1.45      rec_ring[id].id = (unsigned long) req;
    1.46  
    1.47 -//printk("r: %d req %p (%ld)\n",req_prod,req,id);
    1.48 +//printk(KERN_ALERT"r: %d req %p (%ld)\n",req_prod,req,id);
    1.49  
    1.50      ring_req->id = id;
    1.51      ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE :
    1.52 @@ -291,6 +299,8 @@ void do_blkif_request(request_queue_t *r
    1.53  
    1.54      DPRINTK("Entered do_blkif_request\n"); 
    1.55  
    1.56 +//printk(KERN_ALERT"r: %d req\n",req_prod);
    1.57 +
    1.58      queued = 0;
    1.59  
    1.60      while ((req = elv_next_request(rq)) != NULL) {
    1.61 @@ -347,7 +357,7 @@ static irqreturn_t blkif_int(int irq, vo
    1.62  	id = bret->id;
    1.63  	req = (struct request *)rec_ring[id].id;
    1.64  
    1.65 -//printk("i: %d req %p (%ld)\n",i,req,id);
    1.66 +//printk(KERN_ALERT"i: %d req %p (%ld)\n",i,req,id);
    1.67  
    1.68  	ADD_ID_TO_FREELIST(id);  // overwrites req
    1.69  
    1.70 @@ -378,15 +388,7 @@ static irqreturn_t blkif_int(int irq, vo
    1.71      
    1.72      resp_cons = i;
    1.73  
    1.74 -    if ( (xlbd_blk_queue != NULL) &&
    1.75 -         test_bit(QUEUE_FLAG_STOPPED, &xlbd_blk_queue->queue_flags) )
    1.76 -    {
    1.77 -        blk_start_queue(xlbd_blk_queue);
    1.78 -        /* XXXcl call to request_fn should not be needed but
    1.79 -         * we get stuck without...  needs investigating
    1.80 -         */
    1.81 -        xlbd_blk_queue->request_fn(xlbd_blk_queue);
    1.82 -    }
    1.83 +    kick_pending_request_queues();
    1.84  
    1.85      spin_unlock_irqrestore(&blkif_io_lock, flags);
    1.86  
    1.87 @@ -400,6 +402,16 @@ static kdev_t        sg_dev;
    1.88  static int           sg_operation = -1;
    1.89  static unsigned long sg_next_sect;
    1.90  
    1.91 +/*
    1.92 + * Request queues with outstanding work, but ring is currently full.
    1.93 + * We need no special lock here, as we always access this with the
    1.94 + * blkif_io_lock held. We only need a small maximum list.
    1.95 + */
    1.96 +#define MAX_PENDING 8
    1.97 +static request_queue_t *pending_queues[MAX_PENDING];
    1.98 +static int nr_pending;
    1.99 +
   1.100 +
   1.101  #define DISABLE_SCATTERGATHER() (sg_operation = -1)
   1.102  
   1.103  #define blkif_io_lock io_request_lock
   1.104 @@ -418,6 +430,18 @@ static void update_vbds_task(void *unuse
   1.105  }
   1.106  #endif
   1.107  
   1.108 +static void kick_pending_request_queues(void)
   1.109 +{
   1.110 +    /* We kick pending request queues if the ring is reasonably empty. */
   1.111 +    if ( (nr_pending != 0) && 
   1.112 +         ((req_prod - resp_cons) < (BLKIF_RING_SIZE >> 1)) )
   1.113 +    {
   1.114 +        /* Attempt to drain the queue, but bail if the ring becomes full. */
   1.115 +        while ( (nr_pending != 0) && !BLKIF_RING_FULL )
   1.116 +            do_blkif_request(pending_queues[--nr_pending]);
   1.117 +    }
   1.118 +}
   1.119 +
   1.120  int blkif_open(struct inode *inode, struct file *filep)
   1.121  {
   1.122      short xldev = inode->i_rdev; 
   1.123 @@ -949,24 +973,13 @@ static inline void translate_req_to_mfn(
   1.124  static inline void flush_requests(void)
   1.125  {
   1.126      DISABLE_SCATTERGATHER();
   1.127 +//printk(KERN_ALERT"flush %d\n",req_prod);
   1.128      wmb(); /* Ensure that the frontend can see the requests. */
   1.129      blk_ring->req_prod = req_prod;
   1.130      notify_via_evtchn(blkif_evtchn);
   1.131  }
   1.132  
   1.133  
   1.134 -static void kick_pending_request_queues(void)
   1.135 -{
   1.136 -    /* We kick pending request queues if the ring is reasonably empty. */
   1.137 -    if ( (nr_pending != 0) && 
   1.138 -         ((req_prod - resp_cons) < (BLKIF_RING_SIZE >> 1)) )
   1.139 -    {
   1.140 -        /* Attempt to drain the queue, but bail if the ring becomes full. */
   1.141 -        while ( (nr_pending != 0) && !BLKIF_RING_FULL )
   1.142 -            do_blkif_request(pending_queues[--nr_pending]);
   1.143 -    }
   1.144 -}
   1.145 -
   1.146  void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp)
   1.147  {
   1.148      unsigned long flags, id;
   1.149 @@ -1016,6 +1029,7 @@ static void blkif_status_change(blkif_fe
   1.150  {
   1.151      ctrl_msg_t                   cmsg;
   1.152      blkif_fe_interface_connect_t up;
   1.153 +    long rc;
   1.154  
   1.155      if ( status->handle != 0 )
   1.156      {
   1.157 @@ -1075,11 +1089,13 @@ static void blkif_status_change(blkif_fe
   1.158                     " in state %d\n", blkif_state);
   1.159              break;
   1.160          }
   1.161 -
   1.162          blkif_evtchn = status->evtchn;
   1.163          blkif_irq = bind_evtchn_to_irq(blkif_evtchn);
   1.164 -        (void)request_irq(blkif_irq, blkif_int, 
   1.165 -                          SA_SAMPLE_RANDOM, "blkif", NULL);
   1.166 +        if ( (rc=request_irq(blkif_irq, blkif_int, 
   1.167 +                          SA_SAMPLE_RANDOM, "blkif", NULL)) )
   1.168 +	{
   1.169 +	    printk(KERN_ALERT"blkfront request_irq failed (%ld)\n",rc);
   1.170 +	}
   1.171  
   1.172          if ( recovery )
   1.173          {
     2.1 --- a/linux-2.6.7-xen-sparse/drivers/xen/netfront/netfront.c	Mon Aug 16 16:55:39 2004 +0000
     2.2 +++ b/linux-2.6.7-xen-sparse/drivers/xen/netfront/netfront.c	Tue Aug 17 16:09:08 2004 +0000
     2.3 @@ -721,7 +721,6 @@ static void netif_status_change(netif_fe
     2.4          np->irq = bind_evtchn_to_irq(np->evtchn);
     2.5          (void)request_irq(np->irq, netif_int, SA_SAMPLE_RANDOM, 
     2.6                            dev->name, dev);
     2.7 -        
     2.8          netctrl_connected_count();
     2.9          break;
    2.10  
    2.11 @@ -906,10 +905,9 @@ void netif_suspend(void)
    2.12  	sprintf(name,"eth%d",i);
    2.13  	dev = __dev_get_by_name(name);
    2.14  
    2.15 -	if ( dev && (dev->flags & IFF_UP) )
    2.16 +	if ( dev )
    2.17  	{
    2.18  	    np  = dev->priv;
    2.19 -
    2.20  	    free_irq(np->irq, dev);
    2.21              unbind_evtchn_from_irq(np->evtchn);
    2.22  	}    
    2.23 @@ -925,8 +923,6 @@ void netif_resume(void)
    2.24      struct net_private *np = NULL;
    2.25      int i;
    2.26  
    2.27 -
    2.28 -
    2.29  #if 1
    2.30      /* XXX THIS IS TEMPORARY */
    2.31