direct-io.hg

changeset 9378:bd930874222e

Clean up netfront. Eliminate some macros in favour of inline functions.
Fix allocation of receive batching arrays (should be per interface).

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Mar 22 16:48:48 2006 +0100 (2006-03-22)
parents 787b39f5fc83
children 5d3c2cb42ec4
files linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Wed Mar 22 16:48:00 2006 +0100
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Wed Mar 22 16:48:48 2006 +0100
     1.3 @@ -68,18 +68,12 @@
     1.4  #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
     1.5  #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
     1.6  
     1.7 -#define alloc_xen_skb(_l) __dev_alloc_skb((_l), GFP_ATOMIC|__GFP_NOWARN)
     1.8 -
     1.9 -#define init_skb_shinfo(_skb)                         \
    1.10 -    do {                                              \
    1.11 -        atomic_set(&(skb_shinfo(_skb)->dataref), 1);  \
    1.12 -        skb_shinfo(_skb)->nr_frags = 0;               \
    1.13 -        skb_shinfo(_skb)->frag_list = NULL;           \
    1.14 -    } while (0)
    1.15 -
    1.16 -static unsigned long rx_pfn_array[NET_RX_RING_SIZE];
    1.17 -static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
    1.18 -static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
    1.19 +static inline void init_skb_shinfo(struct sk_buff *skb)
    1.20 +{
    1.21 +	atomic_set(&(skb_shinfo(skb)->dataref), 1);
    1.22 +	skb_shinfo(skb)->nr_frags = 0;
    1.23 +	skb_shinfo(skb)->frag_list = NULL;
    1.24 +}
    1.25  
    1.26  struct netfront_info
    1.27  {
    1.28 @@ -134,16 +128,28 @@ struct netfront_info
    1.29  	int tx_ring_ref;
    1.30  	int rx_ring_ref;
    1.31  	u8 mac[ETH_ALEN];
    1.32 +
    1.33 +	unsigned long rx_pfn_array[NET_RX_RING_SIZE];
    1.34 +	multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
    1.35 +	mmu_update_t rx_mmu[NET_RX_RING_SIZE];
    1.36  };
    1.37  
    1.38 -/* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
    1.39 -#define ADD_ID_TO_FREELIST(_list, _id)			\
    1.40 -	(_list)[(_id)] = (_list)[0];			\
    1.41 -	(_list)[0]     = (void *)(unsigned long)(_id);
    1.42 -#define GET_ID_FROM_FREELIST(_list)				\
    1.43 -	({ unsigned long _id = (unsigned long)(_list)[0];	\
    1.44 -	   (_list)[0]  = (_list)[_id];				\
    1.45 -	   (unsigned short)_id; })
    1.46 +/*
    1.47 + * Access macros for acquiring freeing slots in {tx,rx}_skbs[].
    1.48 + */
    1.49 +
    1.50 +static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id)
    1.51 +{
    1.52 +	list[id] = list[0];
    1.53 +	list[0]  = (void *)(unsigned long)id;
    1.54 +}
    1.55 +
    1.56 +static inline unsigned short get_id_from_freelist(struct sk_buff **list)
    1.57 +{
    1.58 +	unsigned int id = (unsigned int)(unsigned long)list[0];
    1.59 +	list[0] = list[id];
    1.60 +	return id;
    1.61 +}
    1.62  
    1.63  #ifdef DEBUG
    1.64  static char *be_state_name[] = {
    1.65 @@ -484,7 +490,7 @@ static void network_tx_buf_gc(struct net
    1.66  			gnttab_release_grant_reference(
    1.67  				&np->gref_tx_head, np->grant_tx_ref[id]);
    1.68  			np->grant_tx_ref[id] = GRANT_INVALID_REF;
    1.69 -			ADD_ID_TO_FREELIST(np->tx_skbs, id);
    1.70 +			add_id_to_freelist(np->tx_skbs, id);
    1.71  			dev_kfree_skb_irq(skb);
    1.72  		}
    1.73  
    1.74 @@ -545,9 +551,10 @@ static void network_alloc_rx_buffers(str
    1.75  		 * Subtract dev_alloc_skb headroom (16 bytes) and shared info
    1.76  		 * tailroom then round down to SKB_DATA_ALIGN boundary.
    1.77  		 */
    1.78 -		skb = alloc_xen_skb(
    1.79 +		skb = __dev_alloc_skb(
    1.80  			((PAGE_SIZE - sizeof(struct skb_shared_info)) &
    1.81 -			 (-SKB_DATA_ALIGN(1))) - 16);
    1.82 +			 (-SKB_DATA_ALIGN(1))) - 16,
    1.83 +			GFP_ATOMIC|__GFP_NOWARN);
    1.84  		if (skb == NULL) {
    1.85  			/* Any skbuffs queued for refill? Force them out. */
    1.86  			if (i != 0)
    1.87 @@ -576,7 +583,7 @@ static void network_alloc_rx_buffers(str
    1.88  
    1.89  		skb->dev = dev;
    1.90  
    1.91 -		id = GET_ID_FROM_FREELIST(np->rx_skbs);
    1.92 +		id = get_id_from_freelist(np->rx_skbs);
    1.93  
    1.94  		np->rx_skbs[id] = skb;
    1.95  
    1.96 @@ -588,13 +595,13 @@ static void network_alloc_rx_buffers(str
    1.97  						  np->xbdev->otherend_id,
    1.98  						  __pa(skb->head) >> PAGE_SHIFT);
    1.99  		RING_GET_REQUEST(&np->rx, req_prod + i)->gref = ref;
   1.100 -		rx_pfn_array[i] = virt_to_mfn(skb->head);
   1.101 +		np->rx_pfn_array[i] = virt_to_mfn(skb->head);
   1.102  
   1.103  		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
   1.104  			/* Remove this page before passing back to Xen. */
   1.105  			set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
   1.106  					    INVALID_P2M_ENTRY);
   1.107 -			MULTI_update_va_mapping(rx_mcl+i,
   1.108 +			MULTI_update_va_mapping(np->rx_mcl+i,
   1.109  						(unsigned long)skb->head,
   1.110  						__pte(0), 0);
   1.111  		}
   1.112 @@ -603,7 +610,7 @@ static void network_alloc_rx_buffers(str
   1.113  	/* Tell the ballon driver what is going on. */
   1.114  	balloon_update_driver_allowance(i);
   1.115  
   1.116 -	reservation.extent_start = rx_pfn_array;
   1.117 +	reservation.extent_start = np->rx_pfn_array;
   1.118  	reservation.nr_extents   = i;
   1.119  	reservation.extent_order = 0;
   1.120  	reservation.address_bits = 0;
   1.121 @@ -611,19 +618,19 @@ static void network_alloc_rx_buffers(str
   1.122  
   1.123  	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
   1.124  		/* After all PTEs have been zapped, flush the TLB. */
   1.125 -		rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
   1.126 +		np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
   1.127  			UVMF_TLB_FLUSH|UVMF_ALL;
   1.128  
   1.129  		/* Give away a batch of pages. */
   1.130 -		rx_mcl[i].op = __HYPERVISOR_memory_op;
   1.131 -		rx_mcl[i].args[0] = XENMEM_decrease_reservation;
   1.132 -		rx_mcl[i].args[1] = (unsigned long)&reservation;
   1.133 +		np->rx_mcl[i].op = __HYPERVISOR_memory_op;
   1.134 +		np->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
   1.135 +		np->rx_mcl[i].args[1] = (unsigned long)&reservation;
   1.136  
   1.137  		/* Zap PTEs and give away pages in one big multicall. */
   1.138 -		(void)HYPERVISOR_multicall(rx_mcl, i+1);
   1.139 +		(void)HYPERVISOR_multicall(np->rx_mcl, i+1);
   1.140  
   1.141  		/* Check return status of HYPERVISOR_memory_op(). */
   1.142 -		if (unlikely(rx_mcl[i].result != i))
   1.143 +		if (unlikely(np->rx_mcl[i].result != i))
   1.144  			panic("Unable to reduce memory reservation\n");
   1.145  	} else
   1.146  		if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
   1.147 @@ -656,7 +663,8 @@ static int network_start_xmit(struct sk_
   1.148  	if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
   1.149  		     PAGE_SIZE)) {
   1.150  		struct sk_buff *nskb;
   1.151 -		if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
   1.152 +		nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC|__GFP_NOWARN);
   1.153 +		if (unlikely(nskb == NULL))
   1.154  			goto drop;
   1.155  		skb_put(nskb, skb->len);
   1.156  		memcpy(nskb->data, skb->data, skb->len);
   1.157 @@ -674,7 +682,7 @@ static int network_start_xmit(struct sk_
   1.158  
   1.159  	i = np->tx.req_prod_pvt;
   1.160  
   1.161 -	id = GET_ID_FROM_FREELIST(np->tx_skbs);
   1.162 +	id = get_id_from_freelist(np->tx_skbs);
   1.163  	np->tx_skbs[id] = skb;
   1.164  
   1.165  	tx = RING_GET_REQUEST(&np->tx, i);
   1.166 @@ -739,8 +747,8 @@ static int netif_poll(struct net_device 
   1.167  	struct sk_buff *skb, *nskb;
   1.168  	netif_rx_response_t *rx;
   1.169  	RING_IDX i, rp;
   1.170 -	mmu_update_t *mmu = rx_mmu;
   1.171 -	multicall_entry_t *mcl = rx_mcl;
   1.172 +	mmu_update_t *mmu = np->rx_mmu;
   1.173 +	multicall_entry_t *mcl = np->rx_mcl;
   1.174  	int work_done, budget, more_to_do = 1;
   1.175  	struct sk_buff_head rxq;
   1.176  	unsigned long flags;
   1.177 @@ -796,7 +804,7 @@ static int netif_poll(struct net_device 
   1.178  		np->grant_rx_ref[rx->id] = GRANT_INVALID_REF;
   1.179  
   1.180  		skb = np->rx_skbs[rx->id];
   1.181 -		ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
   1.182 +		add_id_to_freelist(np->rx_skbs, rx->id);
   1.183  
   1.184  		/* NB. We handle skb overflow later. */
   1.185  		skb->data = skb->head + rx->offset;
   1.186 @@ -831,14 +839,14 @@ static int netif_poll(struct net_device 
   1.187  	balloon_update_driver_allowance(-work_done);
   1.188  
   1.189  	/* Do all the remapping work, and M2P updates, in one big hypercall. */
   1.190 -	if (likely((mcl - rx_mcl) != 0)) {
   1.191 +	if (likely((mcl - np->rx_mcl) != 0)) {
   1.192  		mcl->op = __HYPERVISOR_mmu_update;
   1.193 -		mcl->args[0] = (unsigned long)rx_mmu;
   1.194 -		mcl->args[1] = mmu - rx_mmu;
   1.195 +		mcl->args[0] = (unsigned long)np->rx_mmu;
   1.196 +		mcl->args[1] = mmu - np->rx_mmu;
   1.197  		mcl->args[2] = 0;
   1.198  		mcl->args[3] = DOMID_SELF;
   1.199  		mcl++;
   1.200 -		(void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
   1.201 +		(void)HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
   1.202  	}
   1.203  
   1.204  	while ((skb = __skb_dequeue(&rxq)) != NULL) {
   1.205 @@ -871,7 +879,8 @@ static int netif_poll(struct net_device 
   1.206  					       16 - (skb->data - skb->head));
   1.207  			}
   1.208  
   1.209 -			nskb = alloc_xen_skb(skb->len + 2);
   1.210 +			nskb = __dev_alloc_skb(skb->len + 2,
   1.211 +					       GFP_ATOMIC|__GFP_NOWARN);
   1.212  			if (nskb != NULL) {
   1.213  				skb_reserve(nskb, 2);
   1.214  				skb_put(nskb, skb->len);