ia64/xen-unstable

changeset 8449:8b74b9603d5e

If netfront fails to allocate a receive skbuff, push all pending
skbuffs out onto the shared ring. If there are no skbuffs to push,
schedule a timer to try again later. This will avoid interface
lockups in low-memory conditions.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Dec 28 12:29:15 2005 +0100 (2005-12-28)
parents 7c1f2e20123a
children 4c961318d216
files linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Tue Dec 27 10:40:33 2005 +0000
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Wed Dec 28 12:29:15 2005 +0100
     1.3 @@ -117,6 +117,8 @@ struct netfront_info
     1.4  	int rx_min_target, rx_max_target, rx_target;
     1.5  	struct sk_buff_head rx_batch;
     1.6  
     1.7 +	struct timer_list rx_refill_timer;
     1.8 +
     1.9  	/*
    1.10  	 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
    1.11  	 * array is an index into a chain of free entries.
    1.12 @@ -517,6 +519,13 @@ static void network_tx_buf_gc(struct net
    1.13  }
    1.14  
    1.15  
    1.16 +static void rx_refill_timeout(unsigned long data)
    1.17 +{
    1.18 +	struct net_device *dev = (struct net_device *)data;
    1.19 +	netif_rx_schedule(dev);
    1.20 +}
    1.21 +
    1.22 +
    1.23  static void network_alloc_rx_buffers(struct net_device *dev)
    1.24  {
    1.25  	unsigned short id;
    1.26 @@ -534,7 +543,7 @@ static void network_alloc_rx_buffers(str
    1.27  	 * Allocate skbuffs greedily, even though we batch updates to the
    1.28  	 * receive ring. This creates a less bursty demand on the memory
    1.29  	 * allocator, so should reduce the chance of failed allocation requests
    1.30 -	 *  both for ourself and for other kernel subsystems.
    1.31 +	 * both for ourself and for other kernel subsystems.
    1.32  	 */
    1.33  	batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
    1.34  	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
    1.35 @@ -545,8 +554,15 @@ static void network_alloc_rx_buffers(str
    1.36  		skb = alloc_xen_skb(
    1.37  			((PAGE_SIZE - sizeof(struct skb_shared_info)) &
    1.38  			 (-SKB_DATA_ALIGN(1))) - 16);
    1.39 -		if (skb == NULL)
    1.40 -			break;
    1.41 +		if (skb == NULL) {
    1.42 +			/* Any skbuffs queued for refill? Force them out. */
    1.43 +			if (i != 0)
    1.44 +				goto refill;
    1.45 +			/* Could not allocate any skbuffs. Try again later. */
    1.46 +			mod_timer(&np->rx_refill_timer,
    1.47 +				  jiffies + (HZ/10));
    1.48 +			return;
    1.49 +		}
    1.50  		__skb_queue_tail(&np->rx_batch, skb);
    1.51  	}
    1.52  
    1.53 @@ -554,6 +570,12 @@ static void network_alloc_rx_buffers(str
    1.54  	if (i < (np->rx_target/2))
    1.55  		return;
    1.56  
    1.57 +	/* Adjust our fill target if we risked running out of buffers. */
    1.58 +	if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
    1.59 +	    ((np->rx_target *= 2) > np->rx_max_target))
    1.60 +		np->rx_target = np->rx_max_target;
    1.61 +
    1.62 + refill:
    1.63  	for (i = 0; ; i++) {
    1.64  		if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
    1.65  			break;
    1.66 @@ -608,11 +630,6 @@ static void network_alloc_rx_buffers(str
    1.67  	/* Above is a suitable barrier to ensure backend will see requests. */
    1.68  	np->rx.req_prod_pvt = req_prod + i;
    1.69  	RING_PUSH_REQUESTS(&np->rx);
    1.70 -
    1.71 -	/* Adjust our fill target if we risked running out of buffers. */
    1.72 -	if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
    1.73 -	    ((np->rx_target *= 2) > np->rx_max_target))
    1.74 -		np->rx_target = np->rx_max_target;
    1.75  }
    1.76  
    1.77  
    1.78 @@ -1077,6 +1094,10 @@ static int create_netdev(int handle, str
    1.79  	np->rx_min_target = RX_MIN_TARGET;
    1.80  	np->rx_max_target = RX_MAX_TARGET;
    1.81  
    1.82 +	init_timer(&np->rx_refill_timer);
    1.83 +	np->rx_refill_timer.data = (unsigned long)netdev;
    1.84 +	np->rx_refill_timer.function = rx_refill_timeout;
    1.85 +
    1.86  	/* Initialise {tx,rx}_skbs as a free chain containing every entry. */
    1.87  	for (i = 0; i <= NET_TX_RING_SIZE; i++) {
    1.88  		np->tx_skbs[i] = (void *)((unsigned long) i+1);
    1.89 @@ -1223,7 +1244,7 @@ static void netif_disconnect_backend(str
    1.90  	/* info->backend_state = BEST_DISCONNECTED; */
    1.91  	spin_unlock(&info->rx_lock);
    1.92  	spin_unlock_irq(&info->tx_lock);
    1.93 -    
    1.94 +
    1.95  	end_access(info->tx_ring_ref, info->tx.sring);
    1.96  	end_access(info->rx_ring_ref, info->rx.sring);
    1.97  	info->tx_ring_ref = GRANT_INVALID_REF;
    1.98 @@ -1234,6 +1255,8 @@ static void netif_disconnect_backend(str
    1.99  	if (info->irq)
   1.100  		unbind_from_irqhandler(info->irq, info->netdev);
   1.101  	info->evtchn = info->irq = 0;
   1.102 +
   1.103 +	del_timer_sync(&info->rx_refill_timer);
   1.104  }
   1.105  
   1.106