ia64/xen-unstable

changeset 14845:abea8d171503

linux: netfront contains two locking problems found by lockdep:

1. rx_lock is a normal spinlock, and tx_lock is an irq spinlock. This
means that in normal use, tx_lock may be taken by an interrupt
routine while rx_lock is held. However, netif_disconnect_backend
takes them in the order tx_lock->rx_lock, which could lead to a
deadlock. Reverse them.
2. rx_lock can also be used in softirq context, so it should be
taken/released with spin_(un)lock_bh.

Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
author kfraser@localhost.localdomain
date Fri Apr 13 11:28:04 2007 +0100 (2007-04-13)
parents 986b102f84c2
children 30898de09289
files linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Fri Apr 13 11:23:26 2007 +0100
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Fri Apr 13 11:28:04 2007 +0100
     1.3 @@ -622,14 +622,14 @@ static int network_open(struct net_devic
     1.4  
     1.5  	memset(&np->stats, 0, sizeof(np->stats));
     1.6  
     1.7 -	spin_lock(&np->rx_lock);
     1.8 +	spin_lock_bh(&np->rx_lock);
     1.9  	if (netfront_carrier_ok(np)) {
    1.10  		network_alloc_rx_buffers(dev);
    1.11  		np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
    1.12  		if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
    1.13  			netif_rx_schedule(dev);
    1.14  	}
    1.15 -	spin_unlock(&np->rx_lock);
    1.16 +	spin_unlock_bh(&np->rx_lock);
    1.17  
    1.18  	network_maybe_wake_tx(dev);
    1.19  
    1.20 @@ -1307,10 +1307,10 @@ static int netif_poll(struct net_device 
    1.21  	int pages_flipped = 0;
    1.22  	int err;
    1.23  
    1.24 -	spin_lock(&np->rx_lock);
    1.25 +	spin_lock_bh(&np->rx_lock);
    1.26  
    1.27  	if (unlikely(!netfront_carrier_ok(np))) {
    1.28 -		spin_unlock(&np->rx_lock);
    1.29 +		spin_unlock_bh(&np->rx_lock);
    1.30  		return 0;
    1.31  	}
    1.32  
    1.33 @@ -1478,7 +1478,7 @@ err:
    1.34  		local_irq_restore(flags);
    1.35  	}
    1.36  
    1.37 -	spin_unlock(&np->rx_lock);
    1.38 +	spin_unlock_bh(&np->rx_lock);
    1.39  
    1.40  	return more_to_do;
    1.41  }
    1.42 @@ -1520,7 +1520,7 @@ static void netif_release_rx_bufs(struct
    1.43  
    1.44  	skb_queue_head_init(&free_list);
    1.45  
    1.46 -	spin_lock(&np->rx_lock);
    1.47 +	spin_lock_bh(&np->rx_lock);
    1.48  
    1.49  	for (id = 0; id < NET_RX_RING_SIZE; id++) {
    1.50  		if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) {
    1.51 @@ -1588,7 +1588,7 @@ static void netif_release_rx_bufs(struct
    1.52  	while ((skb = __skb_dequeue(&free_list)) != NULL)
    1.53  		dev_kfree_skb(skb);
    1.54  
    1.55 -	spin_unlock(&np->rx_lock);
    1.56 +	spin_unlock_bh(&np->rx_lock);
    1.57  }
    1.58  
    1.59  static int network_close(struct net_device *dev)
    1.60 @@ -1708,8 +1708,8 @@ static int network_connect(struct net_de
    1.61  	IPRINTK("device %s has %sing receive path.\n",
    1.62  		dev->name, np->copying_receiver ? "copy" : "flipp");
    1.63  
    1.64 +	spin_lock_bh(&np->rx_lock);
    1.65  	spin_lock_irq(&np->tx_lock);
    1.66 -	spin_lock(&np->rx_lock);
    1.67  
    1.68  	/*
    1.69  	 * Recovery procedure:
    1.70 @@ -1761,7 +1761,7 @@ static int network_connect(struct net_de
    1.71  	network_tx_buf_gc(dev);
    1.72  	network_alloc_rx_buffers(dev);
    1.73  
    1.74 -	spin_unlock(&np->rx_lock);
    1.75 +	spin_unlock_bh(&np->rx_lock);
    1.76  	spin_unlock_irq(&np->tx_lock);
    1.77  
    1.78  	return 0;
    1.79 @@ -1818,7 +1818,7 @@ static ssize_t store_rxbuf_min(struct cl
    1.80  	if (target > RX_MAX_TARGET)
    1.81  		target = RX_MAX_TARGET;
    1.82  
    1.83 -	spin_lock(&np->rx_lock);
    1.84 +	spin_lock_bh(&np->rx_lock);
    1.85  	if (target > np->rx_max_target)
    1.86  		np->rx_max_target = target;
    1.87  	np->rx_min_target = target;
    1.88 @@ -1827,7 +1827,7 @@ static ssize_t store_rxbuf_min(struct cl
    1.89  
    1.90  	network_alloc_rx_buffers(netdev);
    1.91  
    1.92 -	spin_unlock(&np->rx_lock);
    1.93 +	spin_unlock_bh(&np->rx_lock);
    1.94  	return len;
    1.95  }
    1.96  
    1.97 @@ -1861,7 +1861,7 @@ static ssize_t store_rxbuf_max(struct cl
    1.98  	if (target > RX_MAX_TARGET)
    1.99  		target = RX_MAX_TARGET;
   1.100  
   1.101 -	spin_lock(&np->rx_lock);
   1.102 +	spin_lock_bh(&np->rx_lock);
   1.103  	if (target < np->rx_min_target)
   1.104  		np->rx_min_target = target;
   1.105  	np->rx_max_target = target;
   1.106 @@ -1870,7 +1870,7 @@ static ssize_t store_rxbuf_max(struct cl
   1.107  
   1.108  	network_alloc_rx_buffers(netdev);
   1.109  
   1.110 -	spin_unlock(&np->rx_lock);
   1.111 +	spin_unlock_bh(&np->rx_lock);
   1.112  	return len;
   1.113  }
   1.114  
   1.115 @@ -2033,10 +2033,10 @@ inetdev_notify(struct notifier_block *th
   1.116  static void netif_disconnect_backend(struct netfront_info *info)
   1.117  {
   1.118  	/* Stop old i/f to prevent errors whilst we rebuild the state. */
   1.119 +	spin_lock_bh(&info->rx_lock);
   1.120  	spin_lock_irq(&info->tx_lock);
   1.121 -	spin_lock(&info->rx_lock);
   1.122  	netfront_carrier_off(info);
   1.123 -	spin_unlock(&info->rx_lock);
   1.124 +	spin_unlock_bh(&info->rx_lock);
   1.125  	spin_unlock_irq(&info->tx_lock);
   1.126  
   1.127  	if (info->irq)