int poll_enabled;
- /** A spare slot for a TX packet. This is treated as an extension
- * of the DMA queue. */
+ /** A spare slot for a TX packet. This is treated as an
+ * extension of the DMA queue. Reads require either
+ * netfront's tx_lock or the vnic tx_lock; writes require both
+ * locks */
struct sk_buff *tx_skb;
/** Keep track of fragments of SSR packets */
BUG_ON(vnic->net_dev != net_dev);
DPRINTK("%s stopping queue\n", __FUNCTION__);
- /* Netfront's lock protects tx_skb */
+ /* Need netfront's tx_lock and vnic tx_lock to write tx_skb */
spin_lock_irqsave(&np->tx_lock, flags2);
BUG_ON(vnic->tx_skb != NULL);
vnic->tx_skb = skb;
BUG_ON(vnic == NULL);
- /* This is protected by netfront's lock */
+ /* Read of tx_skb is protected by netfront's tx_lock */
return vnic->tx_skb == NULL;
}
{
struct netfront_info *np = ((struct netfront_info *)
netdev_priv(vnic->net_dev));
- struct sk_buff *skb;
int handled;
unsigned long flags;
-
+
/*
- * TODO if we could safely check tx_skb == NULL and return
- * early without taking the lock, that would obviously help
- * performance
+ * We hold the vnic tx_lock which is sufficient to exclude
+ * writes to tx_skb
*/
- /* Take the netfront lock which protects tx_skb. */
- spin_lock_irqsave(&np->tx_lock, flags);
if (vnic->tx_skb != NULL) {
DPRINTK("%s trying to send spare buffer\n", __FUNCTION__);
- skb = vnic->tx_skb;
- vnic->tx_skb = NULL;
-
- spin_unlock_irqrestore(&np->tx_lock, flags);
-
- handled = netfront_accel_vi_tx_post(vnic, skb);
+ handled = netfront_accel_vi_tx_post(vnic, vnic->tx_skb);
- spin_lock_irqsave(&np->tx_lock, flags);
-
if (handled != NETFRONT_ACCEL_STATUS_BUSY) {
DPRINTK("%s restarting tx\n", __FUNCTION__);
+
+ /* Need netfront tx_lock and vnic tx_lock to
+ * write tx_skb */
+ spin_lock_irqsave(&np->tx_lock, flags);
+
+ vnic->tx_skb = NULL;
+
if (netfront_check_queue_ready(vnic->net_dev)) {
netif_wake_queue(vnic->net_dev);
NETFRONT_ACCEL_STATS_OP
(vnic->stats.queue_wakes++);
}
- } else {
- vnic->tx_skb = skb;
+ spin_unlock_irqrestore(&np->tx_lock, flags);
+
}
/*
*/
BUG_ON(handled == NETFRONT_ACCEL_STATUS_CANT);
}
- spin_unlock_irqrestore(&np->tx_lock, flags);
}