]> xenbits.xensource.com Git - legacy/linux-2.6.18-xen.git/commitdiff
sfc_netfront: Only clear tx_skb when ready for netif_wake_queue
authorKeir Fraser <keir.fraser@citrix.com>
Tue, 31 Mar 2009 11:00:53 +0000 (12:00 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Tue, 31 Mar 2009 11:00:53 +0000 (12:00 +0100)
(doing otherwise could result in a lost packet) and document use of
locks to protect tx_skb

Signed-off-by: Kieran Mansley <kmansley@solarflare.com>
drivers/xen/sfc_netfront/accel.h
drivers/xen/sfc_netfront/accel_netfront.c
drivers/xen/sfc_netfront/accel_vi.c

index c81db28d15eefb53b9046dfb6fbf8bc0359b3171..a1241437b2c8f3c1e4eace3c8378b9e2c1ad89c2 100644 (file)
@@ -277,8 +277,10 @@ typedef struct netfront_accel_vnic {
 
        int poll_enabled;
 
-       /** A spare slot for a TX packet.  This is treated as an extension
-        * of the DMA queue. */
+       /** A spare slot for a TX packet.  This is treated as an
+        * extension of the DMA queue.  Reads require either
+        * netfront's tx_lock or the vnic tx_lock; writes require both
+        * locks */
        struct sk_buff *tx_skb;
 
        /** Keep track of fragments of SSR packets */
index 7d3f08c2391f6f6c0cc5418e4209285e2678d292..79c69e503f4ad6723023eb853e4224bc4e9956f6 100644 (file)
@@ -65,7 +65,7 @@ static int netfront_accel_netdev_start_xmit(struct sk_buff *skb,
                BUG_ON(vnic->net_dev != net_dev);
                DPRINTK("%s stopping queue\n", __FUNCTION__);
 
-               /* Netfront's lock protects tx_skb */
+               /* Need netfront's tx_lock and vnic tx_lock to write tx_skb */
                spin_lock_irqsave(&np->tx_lock, flags2);
                BUG_ON(vnic->tx_skb != NULL);
                vnic->tx_skb = skb;
@@ -183,7 +183,7 @@ static int netfront_accel_check_ready(struct net_device *net_dev)
 
        BUG_ON(vnic == NULL);
 
-       /* This is protected by netfront's lock */ 
+       /* Read of tx_skb is protected by netfront's tx_lock */ 
        return vnic->tx_skb == NULL;
 }
 
index b5f4f5410d5c51d0d318af37b6c32b34ac145064..d73126602dc99e311d5214506825ff79a93078fd 100644 (file)
@@ -991,39 +991,35 @@ static void netfront_accel_vi_not_busy(netfront_accel_vnic *vnic)
 {
        struct netfront_info *np = ((struct netfront_info *)
                                    netdev_priv(vnic->net_dev));
-       struct sk_buff *skb;
        int handled;
        unsigned long flags;
-       
+
        /*
-        * TODO if we could safely check tx_skb == NULL and return
-        * early without taking the lock, that would obviously help
-        * performance
+        * We hold the vnic tx_lock which is sufficient to exclude
+        * writes to tx_skb
         */
 
-       /* Take the netfront lock which protects tx_skb. */
-       spin_lock_irqsave(&np->tx_lock, flags);
        if (vnic->tx_skb != NULL) {
                DPRINTK("%s trying to send spare buffer\n", __FUNCTION__);
                
-               skb = vnic->tx_skb;
-               vnic->tx_skb = NULL;
-
-               spin_unlock_irqrestore(&np->tx_lock, flags);
-
-               handled = netfront_accel_vi_tx_post(vnic, skb);
+               handled = netfront_accel_vi_tx_post(vnic, vnic->tx_skb);
                
-               spin_lock_irqsave(&np->tx_lock, flags);
-
                if (handled != NETFRONT_ACCEL_STATUS_BUSY) {
                        DPRINTK("%s restarting tx\n", __FUNCTION__);
+
+                       /* Need netfront tx_lock and vnic tx_lock to
+                        * write tx_skb */
+                       spin_lock_irqsave(&np->tx_lock, flags);
+
+                       vnic->tx_skb = NULL;
+
                        if (netfront_check_queue_ready(vnic->net_dev)) {
                                netif_wake_queue(vnic->net_dev);
                                NETFRONT_ACCEL_STATS_OP
                                        (vnic->stats.queue_wakes++);
                        }
-               } else {
-                       vnic->tx_skb = skb;
+                       spin_unlock_irqrestore(&np->tx_lock, flags);
+
                }
                
                /*
@@ -1032,7 +1028,6 @@ static void netfront_accel_vi_not_busy(netfront_accel_vnic *vnic)
                 */
                BUG_ON(handled == NETFRONT_ACCEL_STATUS_CANT);
        }
-       spin_unlock_irqrestore(&np->tx_lock, flags);
 }