ia64/xen-unstable

changeset 10880:7bb2e0be9cc2

[NET] back: Added tx queue

This patch adds a tx queue to the backend if the frontend supports rx
refill notification. A queue is needed because SG/TSO greatly reduces
the number of packets that can be stored in the rx ring. Given an rx
ring with 256 entries, a maximum TSO packet can occupy as many as 18
entries, meaning that the entire ring can only hold 14 packets. This
is too small at high bandwidths with large TCP RX windows.

Having a tx queue does not present a new security risk as the queue is
a fixed size buffer just like the rx ring. So each guest can only
hold a
fixed amount of memory (proportional to the tx queue length) on the
host.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

Cannot BUG_ON netbk_queue_full yet !netbk_can_queue, as this can be
triggered by a misbehaving client. Set req_event appropriately when
stopping the packet queue, or we will not receive a notification.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Mon Jul 31 17:29:00 2006 +0100 (2006-07-31)
parents 2d2ed4d9b1c1
children 5f5a2f282032
files linux-2.6-xen-sparse/drivers/xen/netback/common.h linux-2.6-xen-sparse/drivers/xen/netback/interface.c linux-2.6-xen-sparse/drivers/xen/netback/netback.c linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/common.h	Mon Jul 31 10:40:21 2006 +0100
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h	Mon Jul 31 17:29:00 2006 +0100
     1.3 @@ -76,6 +76,10 @@ typedef struct netif_st {
     1.4  	struct vm_struct *tx_comms_area;
     1.5  	struct vm_struct *rx_comms_area;
     1.6  
     1.7 +	/* Set of features that can be turned on in dev->features. */
     1.8 +	int features;
     1.9 +	int can_queue;
    1.10 +
    1.11  	/* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
    1.12  	RING_IDX rx_req_cons_peek;
    1.13  
    1.14 @@ -119,4 +123,10 @@ int netif_be_start_xmit(struct sk_buff *
    1.15  struct net_device_stats *netif_be_get_stats(struct net_device *dev);
    1.16  irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
    1.17  
    1.18 +static inline int netbk_can_queue(struct net_device *dev)
    1.19 +{
    1.20 +	netif_t *netif = netdev_priv(dev);
    1.21 +	return netif->can_queue;
    1.22 +}
    1.23 +
    1.24  #endif /* __NETIF__BACKEND__COMMON_H__ */
     2.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Mon Jul 31 10:40:21 2006 +0100
     2.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Mon Jul 31 17:29:00 2006 +0100
     2.3 @@ -51,14 +51,12 @@ static int net_open(struct net_device *d
     2.4  	netif_t *netif = netdev_priv(dev);
     2.5  	if (netif_carrier_ok(dev))
     2.6  		__netif_up(netif);
     2.7 -	netif_start_queue(dev);
     2.8  	return 0;
     2.9  }
    2.10  
    2.11  static int net_close(struct net_device *dev)
    2.12  {
    2.13  	netif_t *netif = netdev_priv(dev);
    2.14 -	netif_stop_queue(dev);
    2.15  	if (netif_carrier_ok(dev))
    2.16  		__netif_down(netif);
    2.17  	return 0;
    2.18 @@ -107,8 +105,11 @@ netif_t *netif_alloc(domid_t domid, unsi
    2.19  
    2.20  	SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
    2.21  
    2.22 -	/* Disable queuing. */
    2.23 -	dev->tx_queue_len = 0;
    2.24 +	/*
    2.25 +	 * Reduce default TX queuelen so that each guest interface only
    2.26 +	 * allows it to eat around 6.4MB of host memory.
    2.27 +	 */
    2.28 +	dev->tx_queue_len = 100;
    2.29  
    2.30  	for (i = 0; i < ETH_ALEN; i++)
    2.31  		if (be_mac[i] != 0)
     3.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Mon Jul 31 10:40:21 2006 +0100
     3.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Mon Jul 31 17:29:00 2006 +0100
     3.3 @@ -136,6 +136,14 @@ static inline int is_xen_skb(struct sk_b
     3.4  	return (cp == skbuff_cachep);
     3.5  }
     3.6  
     3.7 +static inline int netbk_queue_full(netif_t *netif)
     3.8 +{
     3.9 +	RING_IDX peek = netif->rx_req_cons_peek;
    3.10 +
    3.11 +	return ((netif->rx.sring->req_prod - peek) <= 0) ||
    3.12 +	       ((netif->rx.rsp_prod_pvt + NET_RX_RING_SIZE - peek) <= 0);
    3.13 +}
    3.14 +
    3.15  int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
    3.16  {
    3.17  	netif_t *netif = netdev_priv(dev);
    3.18 @@ -143,12 +151,16 @@ int netif_be_start_xmit(struct sk_buff *
    3.19  	BUG_ON(skb->dev != dev);
    3.20  
    3.21  	/* Drop the packet if the target domain has no receive buffers. */
    3.22 -	if (unlikely(!netif_running(dev) || !netif_carrier_ok(dev)) ||
    3.23 -	    (netif->rx_req_cons_peek == netif->rx.sring->req_prod) ||
    3.24 -	    ((netif->rx_req_cons_peek - netif->rx.rsp_prod_pvt) ==
    3.25 -	     NET_RX_RING_SIZE))
    3.26 +	if (unlikely(!netif_running(dev) || !netif_carrier_ok(dev)))
    3.27  		goto drop;
    3.28  
    3.29 +	if (unlikely(netbk_queue_full(netif))) {
    3.30 +		/* Not a BUG_ON() -- misbehaving netfront can trigger this. */
    3.31 +		if (netbk_can_queue(dev))
    3.32 +			DPRINTK("Queue full but not stopped!\n");
    3.33 +		goto drop;
    3.34 +	}
    3.35 +
    3.36  	/*
    3.37  	 * We do not copy the packet unless:
    3.38  	 *  1. The data is shared; or
    3.39 @@ -178,6 +190,13 @@ int netif_be_start_xmit(struct sk_buff *
    3.40  	netif->rx_req_cons_peek++;
    3.41  	netif_get(netif);
    3.42  
    3.43 +	if (netbk_can_queue(dev) && netbk_queue_full(netif)) {
    3.44 +		netif->rx.sring->req_event = netif->rx_req_cons_peek + 1;
    3.45 +		mb(); /* request notification /then/ check & stop the queue */
    3.46 +		if (netbk_queue_full(netif))
    3.47 +			netif_stop_queue(dev);
    3.48 +	}
    3.49 +
    3.50  	skb_queue_tail(&rx_queue, skb);
    3.51  	tasklet_schedule(&net_rx_tasklet);
    3.52  
    3.53 @@ -351,6 +370,10 @@ static void net_rx_action(unsigned long 
    3.54  			notify_list[notify_nr++] = irq;
    3.55  		}
    3.56  
    3.57 +		if (netif_queue_stopped(netif->dev) &&
    3.58 +		    !netbk_queue_full(netif))
    3.59 +			netif_wake_queue(netif->dev);
    3.60 +
    3.61  		netif_put(netif);
    3.62  		dev_kfree_skb(skb);
    3.63  		gop++;
    3.64 @@ -974,8 +997,13 @@ static void netif_page_release(struct pa
    3.65  irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
    3.66  {
    3.67  	netif_t *netif = dev_id;
    3.68 +
    3.69  	add_to_net_schedule_list_tail(netif);
    3.70  	maybe_schedule_tx_action();
    3.71 +
    3.72 +	if (netif_queue_stopped(netif->dev) && !netbk_queue_full(netif))
    3.73 +		netif_wake_queue(netif->dev);
    3.74 +
    3.75  	return IRQ_HANDLED;
    3.76  }
    3.77  
     4.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c	Mon Jul 31 10:40:21 2006 +0100
     4.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c	Mon Jul 31 17:29:00 2006 +0100
     4.3 @@ -353,6 +353,7 @@ static int connect_rings(struct backend_
     4.4  	unsigned long tx_ring_ref, rx_ring_ref;
     4.5  	unsigned int evtchn;
     4.6  	int err;
     4.7 +	int val;
     4.8  
     4.9  	DPRINTK("");
    4.10  
    4.11 @@ -367,6 +368,15 @@ static int connect_rings(struct backend_
    4.12  		return err;
    4.13  	}
    4.14  
    4.15 +	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-rx-notify", "%d",
    4.16 +			 &val) < 0)
    4.17 +		val = 0;
    4.18 +	if (val)
    4.19 +		be->netif->can_queue = 1;
    4.20 +	else
    4.21 +		/* Must be non-zero for pfifo_fast to work. */
    4.22 +		be->netif->dev->tx_queue_len = 1;
    4.23 +
    4.24  	/* Map the shared frame, irq etc. */
    4.25  	err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn);
    4.26  	if (err) {