struct vm_struct *tx_comms_area;
struct vm_struct *rx_comms_area;
+ /* Set of features that can be turned on in dev->features. */
+ int features;
+ int can_queue;
+
/* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
RING_IDX rx_req_cons_peek;
struct net_device_stats *netif_be_get_stats(struct net_device *dev);
irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
+static inline int netbk_can_queue(struct net_device *dev)
+{
+ netif_t *netif = netdev_priv(dev);
+ return netif->can_queue;
+}
+
#endif /* __NETIF__BACKEND__COMMON_H__ */
netif_t *netif = netdev_priv(dev);
if (netif_carrier_ok(dev))
__netif_up(netif);
- netif_start_queue(dev);
return 0;
}
static int net_close(struct net_device *dev)
{
netif_t *netif = netdev_priv(dev);
- netif_stop_queue(dev);
if (netif_carrier_ok(dev))
__netif_down(netif);
return 0;
SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
- /* Disable queuing. */
- dev->tx_queue_len = 0;
+ /*
+ * Reduce default TX queuelen so that each guest interface only
+ * allows it to eat around 6.4MB of host memory.
+ */
+ dev->tx_queue_len = 100;
for (i = 0; i < ETH_ALEN; i++)
if (be_mac[i] != 0)
return (cp == skbuff_cachep);
}
+static inline int netbk_queue_full(netif_t *netif)
+{
+ RING_IDX peek = netif->rx_req_cons_peek;
+
+ return ((netif->rx.sring->req_prod - peek) <= 0) ||
+ ((netif->rx.rsp_prod_pvt + NET_RX_RING_SIZE - peek) <= 0);
+}
+
int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
netif_t *netif = netdev_priv(dev);
BUG_ON(skb->dev != dev);
/* Drop the packet if the target domain has no receive buffers. */
- if (unlikely(!netif_running(dev) || !netif_carrier_ok(dev)) ||
- (netif->rx_req_cons_peek == netif->rx.sring->req_prod) ||
- ((netif->rx_req_cons_peek - netif->rx.rsp_prod_pvt) ==
- NET_RX_RING_SIZE))
+ if (unlikely(!netif_running(dev) || !netif_carrier_ok(dev)))
goto drop;
+ if (unlikely(netbk_queue_full(netif))) {
+ /* Not a BUG_ON() -- misbehaving netfront can trigger this. */
+ if (netbk_can_queue(dev))
+ DPRINTK("Queue full but not stopped!\n");
+ goto drop;
+ }
+
/*
* We do not copy the packet unless:
* 1. The data is shared; or
netif->rx_req_cons_peek++;
netif_get(netif);
+ if (netbk_can_queue(dev) && netbk_queue_full(netif)) {
+ netif->rx.sring->req_event = netif->rx_req_cons_peek + 1;
+ mb(); /* request notification /then/ check & stop the queue */
+ if (netbk_queue_full(netif))
+ netif_stop_queue(dev);
+ }
+
skb_queue_tail(&rx_queue, skb);
tasklet_schedule(&net_rx_tasklet);
notify_list[notify_nr++] = irq;
}
+ if (netif_queue_stopped(netif->dev) &&
+ !netbk_queue_full(netif))
+ netif_wake_queue(netif->dev);
+
netif_put(netif);
dev_kfree_skb(skb);
gop++;
irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
{
netif_t *netif = dev_id;
+
add_to_net_schedule_list_tail(netif);
maybe_schedule_tx_action();
+
+ if (netif_queue_stopped(netif->dev) && !netbk_queue_full(netif))
+ netif_wake_queue(netif->dev);
+
return IRQ_HANDLED;
}
unsigned long tx_ring_ref, rx_ring_ref;
unsigned int evtchn;
int err;
+ int val;
DPRINTK("");
return err;
}
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-rx-notify", "%d",
+ &val) < 0)
+ val = 0;
+ if (val)
+ be->netif->can_queue = 1;
+ else
+ /* Must be non-zero for pfifo_fast to work. */
+ be->netif->dev->tx_queue_len = 1;
+
/* Map the shared frame, irq etc. */
err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn);
if (err) {