ia64/xen-unstable

changeset 8164:c55ac1858bbc

Move net split driver onto ring.h generic rings.

No backend should SHARED_RING_INIT(): that's the frontend's job.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Dec 01 11:27:27 2005 +0100 (2005-12-01)
parents 310746cf9f27
children dab434c9349c 36f09499bd8c
files linux-2.6-xen-sparse/arch/xen/Kconfig linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32 linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32 linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64 linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32 linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64 linux-2.6-xen-sparse/drivers/xen/blkback/interface.c linux-2.6-xen-sparse/drivers/xen/blktap/interface.c linux-2.6-xen-sparse/drivers/xen/netback/common.h linux-2.6-xen-sparse/drivers/xen/netback/interface.c linux-2.6-xen-sparse/drivers/xen/netback/netback.c linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c xen/include/public/io/netif.h xen/include/public/io/ring.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/xen/Kconfig	Thu Dec 01 11:10:40 2005 +0100
     1.2 +++ b/linux-2.6-xen-sparse/arch/xen/Kconfig	Thu Dec 01 11:27:27 2005 +0100
     1.3 @@ -70,6 +70,19 @@ config XEN_NETDEV_BACKEND
     1.4  	  network devices to other guests via a high-performance shared-memory
     1.5  	  interface.
     1.6  
     1.7 +config XEN_NETDEV_PIPELINED_TRANSMITTER
     1.8 +	bool "Pipelined transmitter (DANGEROUS)"
     1.9 +	depends on XEN_NETDEV_BACKEND
    1.10 +	default n
    1.11 +	help
    1.12 +	  If the net backend is a dumb domain, such as a transparent Ethernet
    1.13 +	  bridge with no local IP interface, it is safe to say Y here to get
    1.14 +	  slightly lower network overhead.
    1.15 +	  If the backend has a local IP interface; or may be doing smart things
    1.16 +	  like reassembling packets to perform firewall filtering; or if you
    1.17 +	  are unsure; or if you experience network hangs when this option is
    1.18 +	  enabled; then you must say N here.
    1.19 +
    1.20  config XEN_TPMDEV_FRONTEND
    1.21          bool "TPM-device frontend driver"
    1.22          default n
    1.23 @@ -111,23 +124,6 @@ config XEN_NETDEV_FRONTEND
    1.24  	  dedicated device-driver domain, or your master control domain
    1.25  	  (domain 0), then you almost certainly want to say Y here.
    1.26  
    1.27 -config XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
    1.28 -	bool "Pipelined transmitter (DANGEROUS)"
    1.29 -	depends on XEN_NETDEV_FRONTEND
    1.30 -	default n
    1.31 -	help
    1.32 -	  The driver will assume that the backend is pipelining packets for
    1.33 -	  transmission: whenever packets are pending in the remote backend,
    1.34 -	  the driver will not send asynchronous notifications when it queues
    1.35 -	  additional packets for transmission.
    1.36 -	  If the backend is a dumb domain, such as a transparent Ethernet
    1.37 -	  bridge with no local IP interface, it is safe to say Y here to get
    1.38 -	  slightly lower network overhead.
    1.39 -	  If the backend has a local IP interface; or may be doing smart things
    1.40 -	  like reassembling packets to perform firewall filtering; or if you
    1.41 -	  are unsure; or if you experience network hangs when this option is
    1.42 -	  enabled; then you must say N here.
    1.43 -
    1.44  config XEN_BLKDEV_TAP
    1.45  	bool "Block device tap driver"
    1.46  	default n
     2.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32	Thu Dec 01 11:10:40 2005 +0100
     2.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32	Thu Dec 01 11:27:27 2005 +0100
     2.3 @@ -15,11 +15,11 @@ CONFIG_XEN_PHYSDEV_ACCESS=y
     2.4  CONFIG_XEN_BLKDEV_BACKEND=y
     2.5  # CONFIG_XEN_BLKDEV_TAP_BE is not set
     2.6  CONFIG_XEN_NETDEV_BACKEND=y
     2.7 +# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
     2.8  # CONFIG_XEN_TPMDEV_FRONTEND is not set
     2.9  # CONFIG_XEN_TPMDEV_BACKEND is not set
    2.10  CONFIG_XEN_BLKDEV_FRONTEND=y
    2.11  CONFIG_XEN_NETDEV_FRONTEND=y
    2.12 -# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
    2.13  # CONFIG_XEN_BLKDEV_TAP is not set
    2.14  # CONFIG_XEN_SHADOW_MODE is not set
    2.15  CONFIG_XEN_SCRUB_PAGES=y
     3.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64	Thu Dec 01 11:10:40 2005 +0100
     3.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64	Thu Dec 01 11:27:27 2005 +0100
     3.3 @@ -15,11 +15,11 @@ CONFIG_XEN_PHYSDEV_ACCESS=y
     3.4  CONFIG_XEN_BLKDEV_BACKEND=y
     3.5  # CONFIG_XEN_BLKDEV_TAP_BE is not set
     3.6  CONFIG_XEN_NETDEV_BACKEND=y
     3.7 +# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
     3.8  # CONFIG_XEN_TPMDEV_FRONTEND is not set
     3.9  # CONFIG_XEN_TPMDEV_BACKEND is not set
    3.10  CONFIG_XEN_BLKDEV_FRONTEND=y
    3.11  CONFIG_XEN_NETDEV_FRONTEND=y
    3.12 -# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
    3.13  # CONFIG_XEN_BLKDEV_TAP is not set
    3.14  # CONFIG_XEN_SHADOW_MODE is not set
    3.15  CONFIG_XEN_SCRUB_PAGES=y
     4.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32	Thu Dec 01 11:10:40 2005 +0100
     4.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32	Thu Dec 01 11:27:27 2005 +0100
     4.3 @@ -16,7 +16,6 @@ CONFIG_NO_IDLE_HZ=y
     4.4  # CONFIG_XEN_TPMDEV_BACKEND is not set
     4.5  CONFIG_XEN_BLKDEV_FRONTEND=y
     4.6  CONFIG_XEN_NETDEV_FRONTEND=y
     4.7 -# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
     4.8  # CONFIG_XEN_BLKDEV_TAP is not set
     4.9  # CONFIG_XEN_SHADOW_MODE is not set
    4.10  CONFIG_XEN_SCRUB_PAGES=y
     5.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64	Thu Dec 01 11:10:40 2005 +0100
     5.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64	Thu Dec 01 11:27:27 2005 +0100
     5.3 @@ -16,7 +16,6 @@ CONFIG_NO_IDLE_HZ=y
     5.4  # CONFIG_XEN_TPMDEV_BACKEND is not set
     5.5  CONFIG_XEN_BLKDEV_FRONTEND=y
     5.6  CONFIG_XEN_NETDEV_FRONTEND=y
     5.7 -# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
     5.8  # CONFIG_XEN_BLKDEV_TAP is not set
     5.9  # CONFIG_XEN_SHADOW_MODE is not set
    5.10  CONFIG_XEN_SCRUB_PAGES=y
     6.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32	Thu Dec 01 11:10:40 2005 +0100
     6.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32	Thu Dec 01 11:27:27 2005 +0100
     6.3 @@ -15,11 +15,11 @@ CONFIG_XEN_PHYSDEV_ACCESS=y
     6.4  CONFIG_XEN_BLKDEV_BACKEND=y
     6.5  # CONFIG_XEN_BLKDEV_TAP_BE is not set
     6.6  CONFIG_XEN_NETDEV_BACKEND=y
     6.7 +# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
     6.8  # CONFIG_XEN_TPMDEV_FRONTEND is not set
     6.9  # CONFIG_XEN_TPMDEV_BACKEND is not set
    6.10  CONFIG_XEN_BLKDEV_FRONTEND=y
    6.11  CONFIG_XEN_NETDEV_FRONTEND=y
    6.12 -# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
    6.13  # CONFIG_XEN_BLKDEV_TAP is not set
    6.14  # CONFIG_XEN_SHADOW_MODE is not set
    6.15  CONFIG_XEN_SCRUB_PAGES=y
     7.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64	Thu Dec 01 11:10:40 2005 +0100
     7.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64	Thu Dec 01 11:27:27 2005 +0100
     7.3 @@ -15,11 +15,11 @@ CONFIG_XEN_PHYSDEV_ACCESS=y
     7.4  CONFIG_XEN_BLKDEV_BACKEND=y
     7.5  # CONFIG_XEN_BLKDEV_TAP_BE is not set
     7.6  CONFIG_XEN_NETDEV_BACKEND=y
     7.7 +# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
     7.8  # CONFIG_XEN_TPMDEV_FRONTEND is not set
     7.9  # CONFIG_XEN_TPMDEV_BACKEND is not set
    7.10  CONFIG_XEN_BLKDEV_FRONTEND=y
    7.11  CONFIG_XEN_NETDEV_FRONTEND=y
    7.12 -# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
    7.13  # CONFIG_XEN_BLKDEV_TAP is not set
    7.14  # CONFIG_XEN_SHADOW_MODE is not set
    7.15  CONFIG_XEN_SCRUB_PAGES=y
     8.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c	Thu Dec 01 11:10:40 2005 +0100
     8.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c	Thu Dec 01 11:27:27 2005 +0100
     8.3 @@ -107,7 +107,6 @@ int blkif_map(blkif_t *blkif, unsigned l
     8.4  	blkif->evtchn = op.u.bind_interdomain.local_port;
     8.5  
     8.6  	sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
     8.7 -	SHARED_RING_INIT(sring);
     8.8  	BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
     8.9  
    8.10  	blkif->irq = bind_evtchn_to_irqhandler(
     9.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/interface.c	Thu Dec 01 11:10:40 2005 +0100
     9.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/interface.c	Thu Dec 01 11:27:27 2005 +0100
     9.3 @@ -97,7 +97,6 @@ int blkif_map(blkif_t *blkif, unsigned l
     9.4  	blkif->evtchn = op.u.bind_interdomain.local_port;
     9.5  
     9.6  	sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
     9.7 -	SHARED_RING_INIT(sring);
     9.8  	BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
     9.9  
    9.10  	blkif->irq = bind_evtchn_to_irqhandler(
    10.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/common.h	Thu Dec 01 11:10:40 2005 +0100
    10.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h	Thu Dec 01 11:27:27 2005 +0100
    10.3 @@ -53,16 +53,12 @@ typedef struct netif_st {
    10.4  	unsigned int     irq;
    10.5  
    10.6  	/* The shared rings and indexes. */
    10.7 -	netif_tx_interface_t *tx;
    10.8 -	netif_rx_interface_t *rx;
    10.9 +	netif_tx_back_ring_t tx;
   10.10 +	netif_rx_back_ring_t rx;
   10.11  	struct vm_struct *comms_area;
   10.12  
   10.13 -	/* Private indexes into shared ring. */
   10.14 -	NETIF_RING_IDX rx_req_cons;
   10.15 -	NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */
   10.16 -	NETIF_RING_IDX rx_resp_prod_copy;
   10.17 -	NETIF_RING_IDX tx_req_cons;
   10.18 -	NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
   10.19 +	/* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
   10.20 +	RING_IDX rx_req_cons_peek;
   10.21  
   10.22  	/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
   10.23  	unsigned long   credit_bytes;
   10.24 @@ -81,6 +77,9 @@ typedef struct netif_st {
   10.25  	struct work_struct free_work;
   10.26  } netif_t;
   10.27  
   10.28 +#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
   10.29 +#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
   10.30 +
   10.31  void netif_creditlimit(netif_t *netif);
   10.32  int  netif_disconnect(netif_t *netif);
   10.33  
    11.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Thu Dec 01 11:10:40 2005 +0100
    11.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Thu Dec 01 11:27:27 2005 +0100
    11.3 @@ -184,6 +184,8 @@ int netif_map(netif_t *netif, unsigned l
    11.4  	      unsigned long rx_ring_ref, unsigned int evtchn)
    11.5  {
    11.6  	int err;
    11.7 +	netif_tx_sring_t *txs;
    11.8 +	netif_rx_sring_t *rxs;
    11.9  	evtchn_op_t op = {
   11.10  		.cmd = EVTCHNOP_bind_interdomain,
   11.11  		.u.bind_interdomain.remote_dom = netif->domid,
   11.12 @@ -216,10 +218,15 @@ int netif_map(netif_t *netif, unsigned l
   11.13  		netif->evtchn, netif_be_int, 0, netif->dev->name, netif);
   11.14  	disable_irq(netif->irq);
   11.15  
   11.16 -	netif->tx = (netif_tx_interface_t *)netif->comms_area->addr;
   11.17 -	netif->rx = (netif_rx_interface_t *)
   11.18 +	txs = (netif_tx_sring_t *)netif->comms_area->addr;
   11.19 +	BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);
   11.20 +
   11.21 +	rxs = (netif_rx_sring_t *)
   11.22  		((char *)netif->comms_area->addr + PAGE_SIZE);
   11.23 -	netif->tx->resp_prod = netif->rx->resp_prod = 0;
   11.24 +	BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);
   11.25 +
   11.26 +	netif->rx_req_cons_peek = 0;
   11.27 +
   11.28  	netif_get(netif);
   11.29  	wmb(); /* Other CPUs see new state before interface is started. */
   11.30  
   11.31 @@ -246,7 +253,7 @@ static void free_netif_callback(void *ar
   11.32  
   11.33  	unregister_netdev(netif->dev);
   11.34  
   11.35 -	if (netif->tx) {
   11.36 +	if (netif->tx.sring) {
   11.37  		unmap_frontend_pages(netif);
   11.38  		free_vm_area(netif->comms_area);
   11.39  	}
    12.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Thu Dec 01 11:10:40 2005 +0100
    12.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Thu Dec 01 11:27:27 2005 +0100
    12.3 @@ -38,8 +38,8 @@ static struct timer_list net_timer;
    12.4  #define MAX_PENDING_REQS 256
    12.5  
    12.6  static struct sk_buff_head rx_queue;
    12.7 -static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE*2+1];
    12.8 -static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
    12.9 +static multicall_entry_t rx_mcl[NET_RX_RING_SIZE*2+1];
   12.10 +static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
   12.11  
   12.12  static gnttab_transfer_t grant_rx_op[MAX_PENDING_REQS];
   12.13  static unsigned char rx_notify[NR_IRQS];
   12.14 @@ -126,8 +126,9 @@ int netif_be_start_xmit(struct sk_buff *
   12.15  
   12.16  	/* Drop the packet if the target domain has no receive buffers. */
   12.17  	if (!netif->active || 
   12.18 -	    (netif->rx_req_cons == netif->rx->req_prod) ||
   12.19 -	    ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE))
   12.20 +	    (netif->rx_req_cons_peek == netif->rx.sring->req_prod) ||
   12.21 +	    ((netif->rx_req_cons_peek - netif->rx.rsp_prod_pvt) ==
   12.22 +	     NET_RX_RING_SIZE))
   12.23  		goto drop;
   12.24  
   12.25  	/*
   12.26 @@ -154,7 +155,7 @@ int netif_be_start_xmit(struct sk_buff *
   12.27  		skb = nskb;
   12.28  	}
   12.29  
   12.30 -	netif->rx_req_cons++;
   12.31 +	netif->rx_req_cons_peek++;
   12.32  	netif_get(netif);
   12.33  
   12.34  	skb_queue_tail(&rx_queue, skb);
   12.35 @@ -198,7 +199,7 @@ static void net_rx_action(unsigned long 
   12.36  	unsigned long vdata, old_mfn, new_mfn;
   12.37  	struct sk_buff_head rxq;
   12.38  	struct sk_buff *skb;
   12.39 -	u16 notify_list[NETIF_RX_RING_SIZE];
   12.40 +	u16 notify_list[NET_RX_RING_SIZE];
   12.41  	int notify_nr = 0;
   12.42  	int ret;
   12.43  
   12.44 @@ -233,9 +234,9 @@ static void net_rx_action(unsigned long 
   12.45  
   12.46  		gop->mfn = old_mfn;
   12.47  		gop->domid = netif->domid;
   12.48 -		gop->ref = netif->rx->ring[
   12.49 -			MASK_NETIF_RX_IDX(netif->rx_resp_prod_copy)].req.gref;
   12.50 -		netif->rx_resp_prod_copy++;
   12.51 +		gop->ref = RING_GET_REQUEST(
   12.52 +			&netif->rx, netif->rx.req_cons)->gref;
   12.53 +		netif->rx.req_cons++;
   12.54  		gop++;
   12.55  
   12.56  		mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
   12.57 @@ -300,8 +301,7 @@ static void net_rx_action(unsigned long 
   12.58  			status = NETIF_RSP_ERROR; 
   12.59  		}
   12.60  		irq = netif->irq;
   12.61 -		id = netif->rx->ring[
   12.62 -			MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
   12.63 +		id = RING_GET_REQUEST(&netif->rx, netif->rx.rsp_prod_pvt)->id;
   12.64  		if (make_rx_response(netif, id, status,
   12.65  				     (unsigned long)skb->data & ~PAGE_MASK,
   12.66  				     size, skb->proto_csum_valid) &&
   12.67 @@ -371,13 +371,31 @@ static void add_to_net_schedule_list_tai
   12.68  	spin_unlock_irq(&net_schedule_list_lock);
   12.69  }
   12.70  
   12.71 +/*
   12.72 + * Note on CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER:
   12.73 + * If this driver is pipelining transmit requests then we can be very
   12.74 + * aggressive in avoiding new-packet notifications -- frontend only needs to
   12.75 + * send a notification if there are no outstanding unreceived responses.
   12.76 + * If we may be buffer transmit buffers for any reason then we must be rather
   12.77 + * more conservative and advertise that we are 'sleeping' this connection here.
   12.78 + */
   12.79  void netif_schedule_work(netif_t *netif)
   12.80  {
   12.81 -	if ((netif->tx_req_cons != netif->tx->req_prod) &&
   12.82 -	    ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE)) {
   12.83 +	if (RING_HAS_UNCONSUMED_REQUESTS(&netif->tx)) {
   12.84  		add_to_net_schedule_list_tail(netif);
   12.85  		maybe_schedule_tx_action();
   12.86  	}
   12.87 +#ifndef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
   12.88 +	else {
   12.89 +		netif->tx.sring->server_is_sleeping = 1;
   12.90 +		mb();
   12.91 +		if (RING_HAS_UNCONSUMED_REQUESTS(&netif->tx)) {
   12.92 +			netif->tx.sring->server_is_sleeping = 0;
   12.93 +			add_to_net_schedule_list_tail(netif);
   12.94 +			maybe_schedule_tx_action();
   12.95 +		}
   12.96 +	}
   12.97 +#endif
   12.98  }
   12.99  
  12.100  void netif_deschedule_work(netif_t *netif)
  12.101 @@ -437,11 +455,18 @@ inline static void net_tx_action_dealloc
  12.102                   * packets.
  12.103  		 */
  12.104  		mb();
  12.105 -		if ((netif->tx_req_cons != netif->tx->req_prod) &&
  12.106 -		    ((netif->tx_req_cons-netif->tx_resp_prod) !=
  12.107 -		     NETIF_TX_RING_SIZE))
  12.108 +
  12.109 +		if (RING_HAS_UNCONSUMED_REQUESTS(&netif->tx)) {
  12.110  			add_to_net_schedule_list_tail(netif);
  12.111 -        
  12.112 +		} else {
  12.113 +			netif->tx.sring->server_is_sleeping = 1;
  12.114 +			mb();
  12.115 +			if (RING_HAS_UNCONSUMED_REQUESTS(&netif->tx)) {
  12.116 +				netif->tx.sring->server_is_sleeping = 0;
  12.117 +				add_to_net_schedule_list_tail(netif);
  12.118 +			}
  12.119 +		}
  12.120 +
  12.121  		netif_put(netif);
  12.122  	}
  12.123  }
  12.124 @@ -454,7 +479,7 @@ static void net_tx_action(unsigned long 
  12.125  	netif_t *netif;
  12.126  	netif_tx_request_t txreq;
  12.127  	u16 pending_idx;
  12.128 -	NETIF_RING_IDX i;
  12.129 +	RING_IDX i;
  12.130  	gnttab_map_grant_ref_t *mop;
  12.131  	unsigned int data_len;
  12.132  	int ret;
  12.133 @@ -472,16 +497,14 @@ static void net_tx_action(unsigned long 
  12.134  		remove_from_net_schedule_list(netif);
  12.135  
  12.136  		/* Work to do? */
  12.137 -		i = netif->tx_req_cons;
  12.138 -		if ((i == netif->tx->req_prod) ||
  12.139 -		    ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE)) {
  12.140 +		if (!RING_HAS_UNCONSUMED_REQUESTS(&netif->tx)) {
  12.141  			netif_put(netif);
  12.142  			continue;
  12.143  		}
  12.144  
  12.145 +		i = netif->tx.req_cons;
  12.146  		rmb(); /* Ensure that we see the request before we copy it. */
  12.147 -		memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req, 
  12.148 -		       sizeof(txreq));
  12.149 +		memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
  12.150  		/* Credit-based scheduling. */
  12.151  		if (txreq.size > netif->remaining_credit) {
  12.152  			unsigned long now = jiffies;
  12.153 @@ -515,12 +538,7 @@ static void net_tx_action(unsigned long 
  12.154  		}
  12.155  		netif->remaining_credit -= txreq.size;
  12.156  
  12.157 -		/*
  12.158 -		 * Why the barrier? It ensures that the frontend sees updated
  12.159 -		 * req_cons before we check for more work to schedule.
  12.160 -		 */
  12.161 -		netif->tx->req_cons = ++netif->tx_req_cons;
  12.162 -		mb();
  12.163 +		netif->tx.req_cons++;
  12.164  
  12.165  		netif_schedule_work(netif);
  12.166  
  12.167 @@ -688,17 +706,18 @@ static void make_tx_response(netif_t *ne
  12.168                               u16      id,
  12.169                               s8       st)
  12.170  {
  12.171 -	NETIF_RING_IDX i = netif->tx_resp_prod;
  12.172 +	RING_IDX i = netif->tx.rsp_prod_pvt;
  12.173  	netif_tx_response_t *resp;
  12.174  
  12.175 -	resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
  12.176 +	resp = RING_GET_RESPONSE(&netif->tx, i);
  12.177  	resp->id     = id;
  12.178  	resp->status = st;
  12.179  	wmb();
  12.180 -	netif->tx->resp_prod = netif->tx_resp_prod = ++i;
  12.181 +	netif->tx.rsp_prod_pvt = ++i;
  12.182 +	RING_PUSH_RESPONSES(&netif->tx);
  12.183  
  12.184  	mb(); /* Update producer before checking event threshold. */
  12.185 -	if (i == netif->tx->event)
  12.186 +	if (i == netif->tx.sring->rsp_event)
  12.187  		notify_remote_via_irq(netif->irq);
  12.188  }
  12.189  
  12.190 @@ -709,10 +728,10 @@ static int make_rx_response(netif_t *net
  12.191                              u16      size,
  12.192                              u16      csum_valid)
  12.193  {
  12.194 -	NETIF_RING_IDX i = netif->rx_resp_prod;
  12.195 +	RING_IDX i = netif->rx.rsp_prod_pvt;
  12.196  	netif_rx_response_t *resp;
  12.197  
  12.198 -	resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
  12.199 +	resp = RING_GET_RESPONSE(&netif->rx, i);
  12.200  	resp->offset     = offset;
  12.201  	resp->csum_valid = csum_valid;
  12.202  	resp->id         = id;
  12.203 @@ -720,10 +739,11 @@ static int make_rx_response(netif_t *net
  12.204  	if (st < 0)
  12.205  		resp->status = (s16)st;
  12.206  	wmb();
  12.207 -	netif->rx->resp_prod = netif->rx_resp_prod = ++i;
  12.208 +	netif->rx.rsp_prod_pvt = ++i;
  12.209 +	RING_PUSH_RESPONSES(&netif->rx);
  12.210  
  12.211  	mb(); /* Update producer before checking event threshold. */
  12.212 -	return (i == netif->rx->event);
  12.213 +	return (i == netif->rx.sring->rsp_event);
  12.214  }
  12.215  
  12.216  static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
  12.217 @@ -739,16 +759,16 @@ static irqreturn_t netif_be_dbg(int irq,
  12.218  		netif = list_entry(ent, netif_t, list);
  12.219  		printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
  12.220  		       "rx_resp_prod=%08x\n",
  12.221 -		       i, netif->rx_req_cons, netif->rx_resp_prod);
  12.222 +		       i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
  12.223  		printk(KERN_ALERT "   tx_req_cons=%08x tx_resp_prod=%08x)\n",
  12.224 -		       netif->tx_req_cons, netif->tx_resp_prod);
  12.225 +		       netif->tx.req_cons, netif->tx.rsp_prod_pvt);
  12.226  		printk(KERN_ALERT "   shared(rx_req_prod=%08x "
  12.227  		       "rx_resp_prod=%08x\n",
  12.228 -		       netif->rx->req_prod, netif->rx->resp_prod);
  12.229 +		       netif->rx.sring->req_prod, netif->rx.sring->rsp_prod);
  12.230  		printk(KERN_ALERT "   rx_event=%08x tx_req_prod=%08x\n",
  12.231 -		       netif->rx->event, netif->tx->req_prod);
  12.232 +		       netif->rx.sring->rsp_event, netif->tx.sring->req_prod);
  12.233  		printk(KERN_ALERT "   tx_resp_prod=%08x, tx_event=%08x)\n",
  12.234 -		       netif->tx->resp_prod, netif->tx->event);
  12.235 +		       netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event);
  12.236  		i++;
  12.237  	}
  12.238  
  12.239 @@ -764,7 +784,7 @@ static int __init netback_init(void)
  12.240  	struct page *page;
  12.241  
  12.242  	/* We can increase reservation by this much in net_rx_action(). */
  12.243 -	balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
  12.244 +	balloon_update_driver_allowance(NET_RX_RING_SIZE);
  12.245  
  12.246  	skb_queue_head_init(&rx_queue);
  12.247  	skb_queue_head_init(&tx_queue);
    13.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Thu Dec 01 11:10:40 2005 +0100
    13.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Thu Dec 01 11:27:27 2005 +0100
    13.3 @@ -61,6 +61,9 @@
    13.4  
    13.5  #define GRANT_INVALID_REF	0
    13.6  
    13.7 +#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
    13.8 +#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
    13.9 +
   13.10  #ifndef __GFP_NOWARN
   13.11  #define __GFP_NOWARN 0
   13.12  #endif
   13.13 @@ -76,22 +79,9 @@
   13.14  /* Allow headroom on each rx pkt for Ethernet header, alignment padding, ... */
   13.15  #define RX_HEADROOM 200
   13.16  
   13.17 -/*
   13.18 - * If the backend driver is pipelining transmit requests then we can be very
   13.19 - * aggressive in avoiding new-packet notifications -- only need to send a
   13.20 - * notification if there are no outstanding unreceived responses.
   13.21 - * If the backend may be buffering our transmit buffers for any reason then we
   13.22 - * are rather more conservative.
   13.23 - */
   13.24 -#ifdef CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
   13.25 -#define TX_TEST_IDX resp_prod /* aggressive: any outstanding responses? */
   13.26 -#else
   13.27 -#define TX_TEST_IDX req_cons  /* conservative: not seen all our requests? */
   13.28 -#endif
   13.29 -
   13.30 -static unsigned long rx_pfn_array[NETIF_RX_RING_SIZE];
   13.31 -static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE+1];
   13.32 -static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
   13.33 +static unsigned long rx_pfn_array[NET_RX_RING_SIZE];
   13.34 +static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
   13.35 +static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
   13.36  
   13.37  struct netfront_info
   13.38  {
   13.39 @@ -99,11 +89,10 @@ struct netfront_info
   13.40  	struct net_device *netdev;
   13.41  
   13.42  	struct net_device_stats stats;
   13.43 -	NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
   13.44  	unsigned int tx_full;
   13.45      
   13.46 -	netif_tx_interface_t *tx;
   13.47 -	netif_rx_interface_t *rx;
   13.48 +	netif_tx_front_ring_t tx;
   13.49 +	netif_rx_front_ring_t rx;
   13.50  
   13.51  	spinlock_t   tx_lock;
   13.52  	spinlock_t   rx_lock;
   13.53 @@ -124,7 +113,7 @@ struct netfront_info
   13.54  
   13.55  	/* Receive-ring batched refills. */
   13.56  #define RX_MIN_TARGET 8
   13.57 -#define RX_MAX_TARGET NETIF_RX_RING_SIZE
   13.58 +#define RX_MAX_TARGET NET_RX_RING_SIZE
   13.59  	int rx_min_target, rx_max_target, rx_target;
   13.60  	struct sk_buff_head rx_batch;
   13.61  
   13.62 @@ -132,13 +121,13 @@ struct netfront_info
   13.63  	 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
   13.64  	 * array is an index into a chain of free entries.
   13.65  	 */
   13.66 -	struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
   13.67 -	struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
   13.68 +	struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1];
   13.69 +	struct sk_buff *rx_skbs[NET_RX_RING_SIZE+1];
   13.70  
   13.71  	grant_ref_t gref_tx_head;
   13.72 -	grant_ref_t grant_tx_ref[NETIF_TX_RING_SIZE + 1]; 
   13.73 +	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; 
   13.74  	grant_ref_t gref_rx_head;
   13.75 -	grant_ref_t grant_rx_ref[NETIF_TX_RING_SIZE + 1]; 
   13.76 +	grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1]; 
   13.77  
   13.78  	struct xenbus_device *xbdev;
   13.79  	int tx_ring_ref;
   13.80 @@ -337,37 +326,45 @@ again:
   13.81  
   13.82  static int setup_device(struct xenbus_device *dev, struct netfront_info *info)
   13.83  {
   13.84 +	netif_tx_sring_t *txs;
   13.85 +	netif_rx_sring_t *rxs;
   13.86  	int err;
   13.87  	struct net_device *netdev = info->netdev;
   13.88  
   13.89  	info->tx_ring_ref = GRANT_INVALID_REF;
   13.90  	info->rx_ring_ref = GRANT_INVALID_REF;
   13.91 -	info->rx = NULL;
   13.92 -	info->tx = NULL;
   13.93 +	info->rx.sring = NULL;
   13.94 +	info->tx.sring = NULL;
   13.95  	info->irq = 0;
   13.96  
   13.97 -	info->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
   13.98 -	if (!info->tx) {
   13.99 +	txs = (netif_tx_sring_t *)__get_free_page(GFP_KERNEL);
  13.100 +	if (!txs) {
  13.101  		err = -ENOMEM;
  13.102  		xenbus_dev_fatal(dev, err, "allocating tx ring page");
  13.103  		goto fail;
  13.104  	}
  13.105 -	info->rx = (netif_rx_interface_t *)__get_free_page(GFP_KERNEL);
  13.106 -	if (!info->rx) {
  13.107 +	rxs = (netif_rx_sring_t *)__get_free_page(GFP_KERNEL);
  13.108 +	if (!rxs) {
  13.109  		err = -ENOMEM;
  13.110  		xenbus_dev_fatal(dev, err, "allocating rx ring page");
  13.111  		goto fail;
  13.112  	}
  13.113 -	memset(info->tx, 0, PAGE_SIZE);
  13.114 -	memset(info->rx, 0, PAGE_SIZE);
  13.115 +	memset(txs, 0, PAGE_SIZE);
  13.116 +	memset(rxs, 0, PAGE_SIZE);
  13.117  	info->backend_state = BEST_DISCONNECTED;
  13.118  
  13.119 -	err = xenbus_grant_ring(dev, virt_to_mfn(info->tx));
  13.120 +	SHARED_RING_INIT(txs);
  13.121 +	FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
  13.122 +
  13.123 +	SHARED_RING_INIT(rxs);
  13.124 +	FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
  13.125 +
  13.126 +	err = xenbus_grant_ring(dev, virt_to_mfn(txs));
  13.127  	if (err < 0)
  13.128  		goto fail;
  13.129  	info->tx_ring_ref = err;
  13.130  
  13.131 -	err = xenbus_grant_ring(dev, virt_to_mfn(info->rx));
  13.132 +	err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
  13.133  	if (err < 0)
  13.134  		goto fail;
  13.135  	info->rx_ring_ref = err;
  13.136 @@ -454,7 +451,7 @@ static int network_open(struct net_devic
  13.137  	np->user_state = UST_OPEN;
  13.138  
  13.139  	network_alloc_rx_buffers(dev);
  13.140 -	np->rx->event = np->rx_resp_cons + 1;
  13.141 +	np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
  13.142  
  13.143  	netif_start_queue(dev);
  13.144  
  13.145 @@ -463,7 +460,7 @@ static int network_open(struct net_devic
  13.146  
  13.147  static void network_tx_buf_gc(struct net_device *dev)
  13.148  {
  13.149 -	NETIF_RING_IDX i, prod;
  13.150 +	RING_IDX i, prod;
  13.151  	unsigned short id;
  13.152  	struct netfront_info *np = netdev_priv(dev);
  13.153  	struct sk_buff *skb;
  13.154 @@ -472,11 +469,11 @@ static void network_tx_buf_gc(struct net
  13.155  		return;
  13.156  
  13.157  	do {
  13.158 -		prod = np->tx->resp_prod;
  13.159 +		prod = np->tx.sring->rsp_prod;
  13.160  		rmb(); /* Ensure we see responses up to 'rp'. */
  13.161  
  13.162 -		for (i = np->tx_resp_cons; i != prod; i++) {
  13.163 -			id  = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
  13.164 +		for (i = np->tx.rsp_cons; i != prod; i++) {
  13.165 +			id  = RING_GET_RESPONSE(&np->tx, i)->id;
  13.166  			skb = np->tx_skbs[id];
  13.167  			if (unlikely(gnttab_query_foreign_access(
  13.168  				np->grant_tx_ref[id]) != 0)) {
  13.169 @@ -494,7 +491,7 @@ static void network_tx_buf_gc(struct net
  13.170  			dev_kfree_skb_irq(skb);
  13.171  		}
  13.172          
  13.173 -		np->tx_resp_cons = prod;
  13.174 +		np->tx.rsp_cons = prod;
  13.175          
  13.176  		/*
  13.177  		 * Set a new event, then check for race with update of tx_cons.
  13.178 @@ -504,12 +501,14 @@ static void network_tx_buf_gc(struct net
  13.179  		 * data is outstanding: in such cases notification from Xen is
  13.180  		 * likely to be the only kick that we'll get.
  13.181  		 */
  13.182 -		np->tx->event = prod + ((np->tx->req_prod - prod) >> 1) + 1;
  13.183 +		np->tx.sring->rsp_event =
  13.184 +			prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
  13.185  		mb();
  13.186 -	} while (prod != np->tx->resp_prod);
  13.187 +	} while (prod != np->tx.sring->rsp_prod);
  13.188  
  13.189   out: 
  13.190 -	if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
  13.191 +	if (np->tx_full &&
  13.192 +	    ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
  13.193  		np->tx_full = 0;
  13.194  		if (np->user_state == UST_OPEN)
  13.195  			netif_wake_queue(dev);
  13.196 @@ -523,7 +522,7 @@ static void network_alloc_rx_buffers(str
  13.197  	struct netfront_info *np = netdev_priv(dev);
  13.198  	struct sk_buff *skb;
  13.199  	int i, batch_target;
  13.200 -	NETIF_RING_IDX req_prod = np->rx->req_prod;
  13.201 +	RING_IDX req_prod = np->rx.req_prod_pvt;
  13.202  	struct xen_memory_reservation reservation;
  13.203  	grant_ref_t ref;
  13.204  
  13.205 @@ -536,7 +535,7 @@ static void network_alloc_rx_buffers(str
  13.206  	 * allocator, so should reduce the chance of failed allocation requests
  13.207  	 *  both for ourself and for other kernel subsystems.
  13.208  	 */
  13.209 -	batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
  13.210 +	batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
  13.211  	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
  13.212  		skb = alloc_xen_skb(dev->mtu + RX_HEADROOM);
  13.213  		if (skb == NULL)
  13.214 @@ -558,13 +557,13 @@ static void network_alloc_rx_buffers(str
  13.215  
  13.216  		np->rx_skbs[id] = skb;
  13.217          
  13.218 -		np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
  13.219 +		RING_GET_REQUEST(&np->rx, req_prod + i)->id = id;
  13.220  		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
  13.221  		BUG_ON((signed short)ref < 0);
  13.222  		np->grant_rx_ref[id] = ref;
  13.223  		gnttab_grant_foreign_transfer_ref(ref,
  13.224  						  np->xbdev->otherend_id);
  13.225 -		np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.gref = ref;
  13.226 +		RING_GET_REQUEST(&np->rx, req_prod + i)->gref = ref;
  13.227  		rx_pfn_array[i] = virt_to_mfn(skb->head);
  13.228  
  13.229  		/* Remove this page from map before passing back to Xen. */
  13.230 @@ -599,10 +598,11 @@ static void network_alloc_rx_buffers(str
  13.231  		panic("Unable to reduce memory reservation\n");
  13.232  
  13.233  	/* Above is a suitable barrier to ensure backend will see requests. */
  13.234 -	np->rx->req_prod = req_prod + i;
  13.235 +	np->rx.req_prod_pvt = req_prod + i;
  13.236 +	RING_PUSH_REQUESTS(&np->rx);
  13.237  
  13.238  	/* Adjust our fill target if we risked running out of buffers. */
  13.239 -	if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
  13.240 +	if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
  13.241  	    ((np->rx_target *= 2) > np->rx_max_target))
  13.242  		np->rx_target = np->rx_max_target;
  13.243  }
  13.244 @@ -613,7 +613,7 @@ static int network_start_xmit(struct sk_
  13.245  	unsigned short id;
  13.246  	struct netfront_info *np = netdev_priv(dev);
  13.247  	netif_tx_request_t *tx;
  13.248 -	NETIF_RING_IDX i;
  13.249 +	RING_IDX i;
  13.250  	grant_ref_t ref;
  13.251  	unsigned long mfn;
  13.252  
  13.253 @@ -643,12 +643,12 @@ static int network_start_xmit(struct sk_
  13.254  		goto drop;
  13.255  	}
  13.256  
  13.257 -	i = np->tx->req_prod;
  13.258 +	i = np->tx.req_prod_pvt;
  13.259  
  13.260  	id = GET_ID_FROM_FREELIST(np->tx_skbs);
  13.261  	np->tx_skbs[id] = skb;
  13.262  
  13.263 -	tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
  13.264 +	tx = RING_GET_REQUEST(&np->tx, i);
  13.265  
  13.266  	tx->id   = id;
  13.267  	ref = gnttab_claim_grant_reference(&np->gref_tx_head);
  13.268 @@ -662,11 +662,12 @@ static int network_start_xmit(struct sk_
  13.269  	tx->csum_blank = (skb->ip_summed == CHECKSUM_HW);
  13.270  
  13.271  	wmb(); /* Ensure that backend will see the request. */
  13.272 -	np->tx->req_prod = i + 1;
  13.273 +	np->tx.req_prod_pvt = i + 1;
  13.274 +	RING_PUSH_REQUESTS(&np->tx);
  13.275  
  13.276  	network_tx_buf_gc(dev);
  13.277  
  13.278 -	if ((i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1)) {
  13.279 +	if (RING_FULL(&np->tx)) {
  13.280  		np->tx_full = 1;
  13.281  		netif_stop_queue(dev);
  13.282  	}
  13.283 @@ -678,8 +679,10 @@ static int network_start_xmit(struct sk_
  13.284  
  13.285  	/* Only notify Xen if we really have to. */
  13.286  	mb();
  13.287 -	if (np->tx->TX_TEST_IDX == i)
  13.288 +	if (np->tx.sring->server_is_sleeping) {
  13.289 +		np->tx.sring->server_is_sleeping = 0;
  13.290  		notify_remote_via_irq(np->irq);
  13.291 +	}
  13.292  
  13.293  	return 0;
  13.294  
  13.295 @@ -699,7 +702,7 @@ static irqreturn_t netif_int(int irq, vo
  13.296  	network_tx_buf_gc(dev);
  13.297  	spin_unlock_irqrestore(&np->tx_lock, flags);
  13.298  
  13.299 -	if ((np->rx_resp_cons != np->rx->resp_prod) &&
  13.300 +	if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx) &&
  13.301  	    (np->user_state == UST_OPEN))
  13.302  		netif_rx_schedule(dev);
  13.303  
  13.304 @@ -712,7 +715,7 @@ static int netif_poll(struct net_device 
  13.305  	struct netfront_info *np = netdev_priv(dev);
  13.306  	struct sk_buff *skb, *nskb;
  13.307  	netif_rx_response_t *rx;
  13.308 -	NETIF_RING_IDX i, rp;
  13.309 +	RING_IDX i, rp;
  13.310  	mmu_update_t *mmu = rx_mmu;
  13.311  	multicall_entry_t *mcl = rx_mcl;
  13.312  	int work_done, budget, more_to_do = 1;
  13.313 @@ -732,13 +735,13 @@ static int netif_poll(struct net_device 
  13.314  
  13.315  	if ((budget = *pbudget) > dev->quota)
  13.316  		budget = dev->quota;
  13.317 -	rp = np->rx->resp_prod;
  13.318 +	rp = np->rx.sring->rsp_prod;
  13.319  	rmb(); /* Ensure we see queued responses up to 'rp'. */
  13.320  
  13.321 -	for (i = np->rx_resp_cons, work_done = 0; 
  13.322 +	for (i = np->rx.rsp_cons, work_done = 0; 
  13.323  	     (i != rp) && (work_done < budget);
  13.324  	     i++, work_done++) {
  13.325 -		rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
  13.326 +		rx = RING_GET_RESPONSE(&np->rx, i);
  13.327  
  13.328  		/*
  13.329                   * This definitely indicates a bug, either in this driver or
  13.330 @@ -756,10 +759,11 @@ static int netif_poll(struct net_device 
  13.331  			if (net_ratelimit())
  13.332  				WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n",
  13.333  					rx->id, rx->status);
  13.334 -			np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].
  13.335 -				req.id = rx->id;
  13.336 +			RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id =
  13.337 +				rx->id;
  13.338  			wmb();
  13.339 -			np->rx->req_prod++;
  13.340 +			np->rx.req_prod_pvt++;
  13.341 +			RING_PUSH_REQUESTS(&np->rx);
  13.342  			work_done--;
  13.343  			continue;
  13.344  		}
  13.345 @@ -861,11 +865,11 @@ static int netif_poll(struct net_device 
  13.346  		dev->last_rx = jiffies;
  13.347  	}
  13.348  
  13.349 -	np->rx_resp_cons = i;
  13.350 +	np->rx.rsp_cons = i;
  13.351  
  13.352  	/* If we get a callback with very few responses, reduce fill target. */
  13.353  	/* NB. Note exponential increase, linear decrease. */
  13.354 -	if (((np->rx->req_prod - np->rx->resp_prod) >
  13.355 +	if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
  13.356  	     ((3*np->rx_target) / 4)) &&
  13.357  	    (--np->rx_target < np->rx_min_target))
  13.358  		np->rx_target = np->rx_min_target;
  13.359 @@ -878,11 +882,11 @@ static int netif_poll(struct net_device 
  13.360  	if (work_done < budget) {
  13.361  		local_irq_save(flags);
  13.362  
  13.363 -		np->rx->event = i + 1;
  13.364 +		np->rx.sring->rsp_event = i + 1;
  13.365      
  13.366  		/* Deal with hypervisor racing our resetting of rx_event. */
  13.367  		mb();
  13.368 -		if (np->rx->resp_prod == i) {
  13.369 +		if (np->rx.sring->rsp_prod == i) {
  13.370  			__netif_rx_complete(dev);
  13.371  			more_to_do = 0;
  13.372  		}
  13.373 @@ -925,8 +929,8 @@ static void network_connect(struct net_d
  13.374  	/* Recovery procedure: */
  13.375  
  13.376  	/* Step 1: Reinitialise variables. */
  13.377 -	np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
  13.378 -	np->rx->event = np->tx->event = 1;
  13.379 +	np->tx_full = 0;
  13.380 +	np->rx.sring->rsp_event = np->tx.sring->rsp_event = 1;
  13.381  
  13.382  	/*
  13.383  	 * Step 2: Rebuild the RX and TX ring contents.
  13.384 @@ -946,13 +950,14 @@ static void network_connect(struct net_d
  13.385  	 * to avoid this but maybe it doesn't matter so much given the
  13.386  	 * interface has been down.
  13.387  	 */
  13.388 -	for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
  13.389 +	for (requeue_idx = 0, i = 1; i <= NET_TX_RING_SIZE; i++) {
  13.390  		if ((unsigned long)np->tx_skbs[i] < __PAGE_OFFSET)
  13.391  			continue;
  13.392  
  13.393  		skb = np->tx_skbs[i];
  13.394  
  13.395 -		tx = &np->tx->ring[requeue_idx++].req;
  13.396 +		tx = RING_GET_REQUEST(&np->tx, requeue_idx);
  13.397 +		requeue_idx++;
  13.398  
  13.399  		tx->id = i;
  13.400  		gnttab_grant_foreign_access_ref(
  13.401 @@ -968,21 +973,23 @@ static void network_connect(struct net_d
  13.402  		np->stats.tx_packets++;
  13.403  	}
  13.404  	wmb();
  13.405 -	np->tx->req_prod = requeue_idx;
  13.406 +	np->tx.req_prod_pvt = requeue_idx;
  13.407 +	RING_PUSH_REQUESTS(&np->tx);
  13.408  
  13.409  	/* Rebuild the RX buffer freelist and the RX ring itself. */
  13.410 -	for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++) { 
  13.411 +	for (requeue_idx = 0, i = 1; i <= NET_RX_RING_SIZE; i++) { 
  13.412  		if ((unsigned long)np->rx_skbs[i] < __PAGE_OFFSET)
  13.413  			continue;
  13.414  		gnttab_grant_foreign_transfer_ref(
  13.415  			np->grant_rx_ref[i], np->xbdev->otherend_id);
  13.416 -		np->rx->ring[requeue_idx].req.gref =
  13.417 +		RING_GET_REQUEST(&np->rx, requeue_idx)->gref =
  13.418  			np->grant_rx_ref[i];
  13.419 -		np->rx->ring[requeue_idx].req.id = i;
  13.420 +		RING_GET_REQUEST(&np->rx, requeue_idx)->id = i;
  13.421  		requeue_idx++; 
  13.422  	}
  13.423  	wmb();                
  13.424 -	np->rx->req_prod = requeue_idx;
  13.425 +	np->rx.req_prod_pvt = requeue_idx;
  13.426 +	RING_PUSH_REQUESTS(&np->rx);
  13.427  
  13.428  	/*
  13.429  	 * Step 3: All public and private state should now be sane.  Get
  13.430 @@ -1066,25 +1073,25 @@ static int create_netdev(int handle, str
  13.431  	np->rx_max_target = RX_MAX_TARGET;
  13.432  
  13.433  	/* Initialise {tx,rx}_skbs as a free chain containing every entry. */
  13.434 -	for (i = 0; i <= NETIF_TX_RING_SIZE; i++) {
  13.435 +	for (i = 0; i <= NET_TX_RING_SIZE; i++) {
  13.436  		np->tx_skbs[i] = (void *)((unsigned long) i+1);
  13.437  		np->grant_tx_ref[i] = GRANT_INVALID_REF;
  13.438  	}
  13.439  
  13.440 -	for (i = 0; i <= NETIF_RX_RING_SIZE; i++) {
  13.441 +	for (i = 0; i <= NET_RX_RING_SIZE; i++) {
  13.442  		np->rx_skbs[i] = (void *)((unsigned long) i+1);
  13.443  		np->grant_rx_ref[i] = GRANT_INVALID_REF;
  13.444  	}
  13.445  
  13.446  	/* A grant for every tx ring slot */
  13.447 -	if (gnttab_alloc_grant_references(NETIF_TX_RING_SIZE,
  13.448 +	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
  13.449  					  &np->gref_tx_head) < 0) {
  13.450  		printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
  13.451  		err = -ENOMEM;
  13.452  		goto exit;
  13.453  	}
  13.454  	/* A grant for every rx ring slot */
  13.455 -	if (gnttab_alloc_grant_references(NETIF_RX_RING_SIZE,
  13.456 +	if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
  13.457  					  &np->gref_rx_head) < 0) {
  13.458  		printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
  13.459  		gnttab_free_grant_references(np->gref_tx_head);
  13.460 @@ -1212,12 +1219,12 @@ static void netif_disconnect_backend(str
  13.461  	spin_unlock(&info->rx_lock);
  13.462  	spin_unlock_irq(&info->tx_lock);
  13.463      
  13.464 -	end_access(info->tx_ring_ref, info->tx);
  13.465 -	end_access(info->rx_ring_ref, info->rx);
  13.466 +	end_access(info->tx_ring_ref, info->tx.sring);
  13.467 +	end_access(info->rx_ring_ref, info->rx.sring);
  13.468  	info->tx_ring_ref = GRANT_INVALID_REF;
  13.469  	info->rx_ring_ref = GRANT_INVALID_REF;
  13.470 -	info->tx = NULL;
  13.471 -	info->rx = NULL;
  13.472 +	info->tx.sring = NULL;
  13.473 +	info->rx.sring = NULL;
  13.474  
  13.475  	if (info->irq)
  13.476  		unbind_from_irqhandler(info->irq, info->netdev);
    14.1 --- a/xen/include/public/io/netif.h	Thu Dec 01 11:10:40 2005 +0100
    14.2 +++ b/xen/include/public/io/netif.h	Thu Dec 01 11:27:27 2005 +0100
    14.3 @@ -9,6 +9,8 @@
    14.4  #ifndef __XEN_PUBLIC_IO_NETIF_H__
    14.5  #define __XEN_PUBLIC_IO_NETIF_H__
    14.6  
    14.7 +#include "ring.h"
    14.8 +
    14.9  typedef struct netif_tx_request {
   14.10      grant_ref_t gref;      /* Reference to buffer page */
   14.11      uint16_t offset:15;    /* Offset within buffer page */
   14.12 @@ -35,57 +37,12 @@ typedef struct {
   14.13  } netif_rx_response_t;
   14.14  
   14.15  /*
   14.16 - * We use a special capitalised type name because it is _essential_ that all 
   14.17 - * arithmetic on indexes is done on an integer type of the correct size.
   14.18 + * Generate netif ring structures and types.
   14.19   */
   14.20 -typedef uint32_t NETIF_RING_IDX;
   14.21 -
   14.22 -/*
   14.23 - * Ring indexes are 'free running'. That is, they are not stored modulo the
   14.24 - * size of the ring buffer. The following macros convert a free-running counter
   14.25 - * into a value that can directly index a ring-buffer array.
   14.26 - */
   14.27 -#define MASK_NETIF_RX_IDX(_i) ((_i)&(NETIF_RX_RING_SIZE-1))
   14.28 -#define MASK_NETIF_TX_IDX(_i) ((_i)&(NETIF_TX_RING_SIZE-1))
   14.29 -
   14.30 -#define NETIF_TX_RING_SIZE 256
   14.31 -#define NETIF_RX_RING_SIZE 256
   14.32  
   14.33 -/* This structure must fit in a memory page. */
   14.34 -typedef struct netif_tx_interface {
   14.35 -    /*
   14.36 -     * Frontend places packets into ring at tx_req_prod.
   14.37 -     * Frontend receives event when tx_resp_prod passes tx_event.
   14.38 -     * 'req_cons' is a shadow of the backend's request consumer -- the frontend
   14.39 -     * may use it to determine if all queued packets have been seen by the
   14.40 -     * backend.
   14.41 -     */
   14.42 -    NETIF_RING_IDX req_prod;
   14.43 -    NETIF_RING_IDX req_cons;
   14.44 -    NETIF_RING_IDX resp_prod;
   14.45 -    NETIF_RING_IDX event;
   14.46 -    union {
   14.47 -        netif_tx_request_t  req;
   14.48 -        netif_tx_response_t resp;
   14.49 -    } ring[NETIF_TX_RING_SIZE];
   14.50 -} netif_tx_interface_t;
   14.51 +DEFINE_RING_TYPES(netif_tx, netif_tx_request_t, netif_tx_response_t);
   14.52 +DEFINE_RING_TYPES(netif_rx, netif_rx_request_t, netif_rx_response_t);
   14.53  
   14.54 -/* This structure must fit in a memory page. */
   14.55 -typedef struct netif_rx_interface {
   14.56 -    /*
   14.57 -     * Frontend places empty buffers into ring at rx_req_prod.
   14.58 -     * Frontend receives event when rx_resp_prod passes rx_event.
   14.59 -     */
   14.60 -    NETIF_RING_IDX req_prod;
   14.61 -    NETIF_RING_IDX resp_prod;
   14.62 -    NETIF_RING_IDX event;
   14.63 -    union {
   14.64 -        netif_rx_request_t  req;
   14.65 -        netif_rx_response_t resp;
   14.66 -    } ring[NETIF_RX_RING_SIZE];
   14.67 -} netif_rx_interface_t;
   14.68 -
   14.69 -/* Descriptor status values */
   14.70  #define NETIF_RSP_DROPPED         -2
   14.71  #define NETIF_RSP_ERROR           -1
   14.72  #define NETIF_RSP_OKAY             0
    15.1 --- a/xen/include/public/io/ring.h	Thu Dec 01 11:10:40 2005 +0100
    15.2 +++ b/xen/include/public/io/ring.h	Thu Dec 01 11:27:27 2005 +0100
    15.3 @@ -1,3 +1,6 @@
    15.4 +
    15.5 +
    15.6 +
    15.7  /*
    15.8   * Shared producer-consumer ring macros.
    15.9   * Tim Deegan and Andrew Warfield November 2004.
   15.10 @@ -22,7 +25,7 @@ typedef unsigned int RING_IDX;
   15.11   * power of two (so we can mask with (size-1) to loop around).
   15.12   */
   15.13  #define __RING_SIZE(_s, _sz) \
   15.14 -    (__RD32(((_sz) - 2*sizeof(RING_IDX)) / sizeof((_s)->ring[0])))
   15.15 +    (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
   15.16  
   15.17  /*
   15.18   *  Macros to make the correct C datatypes for a new kind of ring.
   15.19 @@ -65,6 +68,8 @@ union __name##_sring_entry {            
   15.20  struct __name##_sring {                                                 \
   15.21      RING_IDX req_prod;                                                  \
   15.22      RING_IDX rsp_prod;                                                  \
   15.23 +    RING_IDX rsp_event; /* notify client when rsp_prod == rsp_event */  \
   15.24 +    uint8_t  server_is_sleeping; /* notify server to kick off work  */  \
   15.25      union __name##_sring_entry ring[1]; /* variable-length */           \
   15.26  };                                                                      \
   15.27                                                                          \