ia64/xen-unstable

changeset 10855:03c8002068d9

Merge.
author kfraser@localhost.localdomain
date Fri Jul 28 16:53:58 2006 +0100 (2006-07-28)
parents 0115a927ee99
children 1fb835267a50
files linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Fri Jul 28 16:53:35 2006 +0100
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Fri Jul 28 16:53:58 2006 +0100
     1.3 @@ -99,17 +99,17 @@ struct netfront_info {
     1.4  	struct timer_list rx_refill_timer;
     1.5  
     1.6  	/*
     1.7 -	 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
     1.8 -	 * array is an index into a chain of free entries.
     1.9 +	 * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs
    1.10 +	 * is an index into a chain of free entries.
    1.11  	 */
    1.12  	struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1];
    1.13 -	struct sk_buff *rx_skbs[NET_RX_RING_SIZE+1];
    1.14 +	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
    1.15  
    1.16  #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
    1.17  	grant_ref_t gref_tx_head;
    1.18  	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
    1.19  	grant_ref_t gref_rx_head;
    1.20 -	grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
    1.21 +	grant_ref_t grant_rx_ref[NET_TX_RING_SIZE];
    1.22  
    1.23  	struct xenbus_device *xbdev;
    1.24  	int tx_ring_ref;
    1.25 @@ -122,7 +122,7 @@ struct netfront_info {
    1.26  };
    1.27  
    1.28  /*
    1.29 - * Access macros for acquiring freeing slots in {tx,rx}_skbs[].
    1.30 + * Access macros for acquiring freeing slots in tx_skbs[].
    1.31   */
    1.32  
    1.33  static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id)
    1.34 @@ -138,6 +138,29 @@ static inline unsigned short get_id_from
    1.35  	return id;
    1.36  }
    1.37  
    1.38 +static inline int xennet_rxidx(RING_IDX idx)
    1.39 +{
    1.40 +	return idx & (NET_RX_RING_SIZE - 1);
    1.41 +}
    1.42 +
    1.43 +static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
    1.44 +						RING_IDX ri)
    1.45 +{
    1.46 +	int i = xennet_rxidx(ri);
    1.47 +	struct sk_buff *skb = np->rx_skbs[i];
    1.48 +	np->rx_skbs[i] = NULL;
    1.49 +	return skb;
    1.50 +}
    1.51 +
    1.52 +static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
    1.53 +					    RING_IDX ri)
    1.54 +{
    1.55 +	int i = xennet_rxidx(ri);
    1.56 +	grant_ref_t ref = np->grant_rx_ref[i];
    1.57 +	np->grant_rx_ref[i] = GRANT_INVALID_REF;
    1.58 +	return ref;
    1.59 +}
    1.60 +
    1.61  #define DPRINTK(fmt, args...)				\
    1.62  	pr_debug("netfront (%s:%d) " fmt,		\
    1.63  		 __FUNCTION__, __LINE__, ##args)
    1.64 @@ -598,8 +621,9 @@ static void network_alloc_rx_buffers(str
    1.65  
    1.66  		skb->dev = dev;
    1.67  
    1.68 -		id = get_id_from_freelist(np->rx_skbs);
    1.69 +		id = xennet_rxidx(req_prod + i);
    1.70  
    1.71 +		BUG_ON(np->rx_skbs[id]);
    1.72  		np->rx_skbs[id] = skb;
    1.73  
    1.74  		RING_GET_REQUEST(&np->rx, req_prod + i)->id = id;
    1.75 @@ -840,6 +864,19 @@ static irqreturn_t netif_int(int irq, vo
    1.76  	return IRQ_HANDLED;
    1.77  }
    1.78  
    1.79 +static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
    1.80 +				grant_ref_t ref)
    1.81 +{
    1.82 +	int new = xennet_rxidx(np->rx.req_prod_pvt);
    1.83 +
    1.84 +	BUG_ON(np->rx_skbs[new]);
    1.85 +	np->rx_skbs[new] = skb;
    1.86 +	np->grant_rx_ref[new] = ref;
    1.87 +	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
    1.88 +	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
    1.89 +	np->rx.req_prod_pvt++;
    1.90 +	RING_PUSH_REQUESTS(&np->rx);
    1.91 +}
    1.92  
    1.93  static int netif_poll(struct net_device *dev, int *pbudget)
    1.94  {
    1.95 @@ -874,12 +911,15 @@ static int netif_poll(struct net_device 
    1.96  	     i++, work_done++) {
    1.97  		rx = RING_GET_RESPONSE(&np->rx, i);
    1.98  
    1.99 +		skb = xennet_get_rx_skb(np, i);
   1.100 +		ref = xennet_get_rx_ref(np, i);
   1.101 +
   1.102  		/*
   1.103  		 * This definitely indicates a bug, either in this driver or in
   1.104  		 * the backend driver. In future this should flag the bad
   1.105  		 * situation to the system controller to reboot the backed.
   1.106  		 */
   1.107 -		if ((ref = np->grant_rx_ref[rx->id]) == GRANT_INVALID_REF) {
   1.108 +		if (ref == GRANT_INVALID_REF) {
   1.109  			WPRINTK("Bad rx response id %d.\n", rx->id);
   1.110  			work_done--;
   1.111  			continue;
   1.112 @@ -890,21 +930,12 @@ static int netif_poll(struct net_device 
   1.113  			if (net_ratelimit())
   1.114  				WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n",
   1.115  					rx->id, rx->status);
   1.116 -			RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id =
   1.117 -				rx->id;
   1.118 -			RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref =
   1.119 -				ref;
   1.120 -			np->rx.req_prod_pvt++;
   1.121 -			RING_PUSH_REQUESTS(&np->rx);
   1.122 +			xennet_move_rx_slot(np, skb, ref);
   1.123  			work_done--;
   1.124  			continue;
   1.125  		}
   1.126  
   1.127  		gnttab_release_grant_reference(&np->gref_rx_head, ref);
   1.128 -		np->grant_rx_ref[rx->id] = GRANT_INVALID_REF;
   1.129 -
   1.130 -		skb = np->rx_skbs[rx->id];
   1.131 -		add_id_to_freelist(np->rx_skbs, rx->id);
   1.132  
   1.133  		/* NB. We handle skb overflow later. */
   1.134  		skb->data = skb->head + rx->offset;
   1.135 @@ -1158,15 +1189,23 @@ static void network_connect(struct net_d
   1.136  	}
   1.137  
   1.138  	/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
   1.139 -	for (requeue_idx = 0, i = 1; i <= NET_RX_RING_SIZE; i++) {
   1.140 -		if ((unsigned long)np->rx_skbs[i] < PAGE_OFFSET)
   1.141 +	for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
   1.142 +		if (!np->rx_skbs[i])
   1.143  			continue;
   1.144 +
   1.145  		gnttab_grant_foreign_transfer_ref(
   1.146  			np->grant_rx_ref[i], np->xbdev->otherend_id,
   1.147  			__pa(np->rx_skbs[i]->data) >> PAGE_SHIFT);
   1.148  		RING_GET_REQUEST(&np->rx, requeue_idx)->gref =
   1.149  			np->grant_rx_ref[i];
   1.150 -		RING_GET_REQUEST(&np->rx, requeue_idx)->id = i;
   1.151 +		RING_GET_REQUEST(&np->rx, requeue_idx)->id = requeue_idx;
   1.152 +
   1.153 +		if (requeue_idx < i) {
   1.154 +			np->rx_skbs[requeue_idx] = np->rx_skbs[i];
   1.155 +			np->grant_rx_ref[requeue_idx] = np->grant_rx_ref[i];
   1.156 +			np->rx_skbs[i] = NULL;
   1.157 +			np->grant_rx_ref[i] = GRANT_INVALID_REF;
   1.158 +		}
   1.159  		requeue_idx++;
   1.160  	}
   1.161  
   1.162 @@ -1392,8 +1431,8 @@ static struct net_device * __devinit cre
   1.163  		np->grant_tx_ref[i] = GRANT_INVALID_REF;
   1.164  	}
   1.165  
   1.166 -	for (i = 0; i <= NET_RX_RING_SIZE; i++) {
   1.167 -		np->rx_skbs[i] = (void *)((unsigned long) i+1);
   1.168 +	for (i = 0; i < NET_RX_RING_SIZE; i++) {
   1.169 +		np->rx_skbs[i] = NULL;
   1.170  		np->grant_rx_ref[i] = GRANT_INVALID_REF;
   1.171  	}
   1.172