ia64/xen-unstable
changeset 11099:922931ed15ae
[NET] back: Tidy up and remove communications via references to global structures.
Signed-off-by: Steven Smith <ssmith@xensource.com>
Signed-off-by: Steven Smith <ssmith@xensource.com>
author | kfraser@localhost.localdomain |
---|---|
date | Mon Aug 14 11:32:24 2006 +0100 (2006-08-14) |
parents | 4852b556d912 |
children | 905ff6e616cc |
files | linux-2.6-xen-sparse/drivers/xen/netback/netback.c |
line diff
1.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Mon Aug 14 11:18:37 2006 +0100 1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Mon Aug 14 11:32:24 2006 +0100 1.3 @@ -68,10 +68,6 @@ static struct timer_list net_timer; 1.4 #define MAX_PENDING_REQS 256 1.5 1.6 static struct sk_buff_head rx_queue; 1.7 -static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1]; 1.8 -static mmu_update_t rx_mmu[NET_RX_RING_SIZE]; 1.9 -static gnttab_transfer_t grant_rx_op[NET_RX_RING_SIZE]; 1.10 -static unsigned char rx_notify[NR_IRQS]; 1.11 1.12 static unsigned long mmap_vstart; 1.13 #define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE)) 1.14 @@ -314,11 +310,23 @@ int xen_network_done(void) 1.15 } 1.16 #endif 1.17 1.18 -static u16 netbk_gop_frag(netif_t *netif, struct page *page, int count, int i) 1.19 +struct netrx_pending_operations { 1.20 + unsigned trans_prod, trans_cons; 1.21 + unsigned mmu_prod, mmu_cons; 1.22 + unsigned mcl_prod, mcl_cons; 1.23 + unsigned meta_prod, meta_cons; 1.24 + mmu_update_t *mmu; 1.25 + gnttab_transfer_t *trans; 1.26 + multicall_entry_t *mcl; 1.27 + struct netbk_rx_meta *meta; 1.28 +}; 1.29 + 1.30 +static u16 netbk_gop_frag(netif_t *netif, struct page *page, 1.31 + int i, struct netrx_pending_operations *npo) 1.32 { 1.33 - multicall_entry_t *mcl = rx_mcl + count; 1.34 - mmu_update_t *mmu = rx_mmu + count; 1.35 - gnttab_transfer_t *gop = grant_rx_op + count; 1.36 + mmu_update_t *mmu; 1.37 + gnttab_transfer_t *gop; 1.38 + multicall_entry_t *mcl; 1.39 netif_rx_request_t *req; 1.40 unsigned long old_mfn, new_mfn; 1.41 1.42 @@ -334,46 +342,53 @@ static u16 netbk_gop_frag(netif_t *netif 1.43 */ 1.44 set_phys_to_machine(page_to_pfn(page), new_mfn); 1.45 1.46 + mcl = npo->mcl + npo->mcl_prod++; 1.47 MULTI_update_va_mapping(mcl, (unsigned long)page_address(page), 1.48 pfn_pte_ma(new_mfn, PAGE_KERNEL), 0); 1.49 1.50 + mmu = npo->mmu + npo->mmu_prod++; 1.51 mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) | 1.52 MMU_MACHPHYS_UPDATE; 1.53 mmu->val = page_to_pfn(page); 1.54 } 1.55 1.56 req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i); 1.57 + gop = npo->trans + npo->trans_prod++; 1.58 gop->mfn = old_mfn; 1.59 gop->domid = netif->domid; 1.60 gop->ref = req->gref; 1.61 return req->id; 1.62 } 1.63 1.64 -static void netbk_gop_skb(struct sk_buff *skb, struct netbk_rx_meta *meta, 1.65 - int count) 1.66 +static void netbk_gop_skb(struct sk_buff *skb, 1.67 + struct netrx_pending_operations *npo) 1.68 { 1.69 netif_t *netif = netdev_priv(skb->dev); 1.70 int nr_frags = skb_shinfo(skb)->nr_frags; 1.71 int i; 1.72 int extra; 1.73 + struct netbk_rx_meta *head_meta, *meta; 1.74 1.75 - meta[count].frag.page_offset = skb_shinfo(skb)->gso_type; 1.76 - meta[count].frag.size = skb_shinfo(skb)->gso_size; 1.77 - extra = !!meta[count].frag.size + 1; 1.78 + head_meta = npo->meta + npo->meta_prod++; 1.79 + head_meta->frag.page_offset = skb_shinfo(skb)->gso_type; 1.80 + head_meta->frag.size = skb_shinfo(skb)->gso_size; 1.81 + extra = !!head_meta->frag.size + 1; 1.82 1.83 for (i = 0; i < nr_frags; i++) { 1.84 - meta[++count].frag = skb_shinfo(skb)->frags[i]; 1.85 - meta[count].id = netbk_gop_frag(netif, meta[count].frag.page, 1.86 - count, i + extra); 1.87 + meta = npo->meta + npo->meta_prod++; 1.88 + meta->frag = skb_shinfo(skb)->frags[i]; 1.89 + meta->id = netbk_gop_frag(netif, meta->frag.page, 1.90 + i + extra, npo); 1.91 } 1.92 1.93 /* 1.94 * This must occur at the end to ensure that we don't trash 1.95 * skb_shinfo until we're done. 1.96 */ 1.97 - meta[count - nr_frags].id = netbk_gop_frag(netif, 1.98 - virt_to_page(skb->data), 1.99 - count - nr_frags, 0); 1.100 + head_meta->id = netbk_gop_frag(netif, 1.101 + virt_to_page(skb->data), 1.102 + 0, 1.103 + npo); 1.104 netif->rx.req_cons += nr_frags + extra; 1.105 } 1.106 1.107 @@ -385,22 +400,28 @@ static inline void netbk_free_pages(int 1.108 put_page(meta[i].frag.page); 1.109 } 1.110 1.111 -static int netbk_check_gop(int nr_frags, domid_t domid, int count) 1.112 +/* This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was 1.113 + used to set up the operations on the top of 1.114 + netrx_pending_operations, which have since been done. Check that 1.115 + they didn't give any errors and advance over them. */ 1.116 +static int netbk_check_gop(int nr_frags, domid_t domid, int count, 1.117 + struct netrx_pending_operations *npo) 1.118 { 1.119 - multicall_entry_t *mcl = rx_mcl + count; 1.120 - gnttab_transfer_t *gop = grant_rx_op + count; 1.121 + multicall_entry_t *mcl; 1.122 + gnttab_transfer_t *gop; 1.123 int status = NETIF_RSP_OKAY; 1.124 int i; 1.125 1.126 for (i = 0; i <= nr_frags; i++) { 1.127 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1.128 + mcl = npo->mcl + npo->mcl_cons++; 1.129 /* The update_va_mapping() must not fail. */ 1.130 BUG_ON(mcl->result != 0); 1.131 - mcl++; 1.132 } 1.133 1.134 + gop = npo->trans + npo->trans_cons++; 1.135 /* Check the reassignment error code. */ 1.136 - if (gop->status != 0) { 1.137 + if (gop->status != 0) { 1.138 DPRINTK("Bad status %d from grant transfer to DOM%u\n", 1.139 gop->status, domid); 1.140 /* 1.141 @@ -408,9 +429,8 @@ static int netbk_check_gop(int nr_frags, 1.142 * but that should be a fatal error anyway. 1.143 */ 1.144 BUG_ON(gop->status == GNTST_bad_page); 1.145 - status = NETIF_RSP_ERROR; 1.146 + status = NETIF_RSP_ERROR; 1.147 } 1.148 - gop++; 1.149 } 1.150 1.151 return status; 1.152 @@ -449,9 +469,19 @@ static void net_rx_action(unsigned long 1.153 * Putting hundreds of bytes on the stack is considered rude. 1.154 * Static works because a tasklet can only be on one CPU at any time. 1.155 */ 1.156 + static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+3]; 1.157 + static mmu_update_t rx_mmu[NET_RX_RING_SIZE]; 1.158 + static gnttab_transfer_t grant_rx_op[NET_RX_RING_SIZE]; 1.159 + static unsigned char rx_notify[NR_IRQS]; 1.160 static u16 notify_list[NET_RX_RING_SIZE]; 1.161 static struct netbk_rx_meta meta[NET_RX_RING_SIZE]; 1.162 1.163 + struct netrx_pending_operations npo = { 1.164 + mmu: rx_mmu, 1.165 + trans: grant_rx_op, 1.166 + mcl: rx_mcl, 1.167 + meta: meta}; 1.168 + 1.169 skb_queue_head_init(&rxq); 1.170 1.171 count = 0; 1.172 @@ -471,7 +501,7 @@ static void net_rx_action(unsigned long 1.173 break; 1.174 } 1.175 1.176 - netbk_gop_skb(skb, meta, count); 1.177 + netbk_gop_skb(skb, &npo); 1.178 1.179 count += nr_frags + 1; 1.180 1.181 @@ -486,8 +516,11 @@ static void net_rx_action(unsigned long 1.182 return; 1.183 1.184 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1.185 - mcl = rx_mcl + count; 1.186 + BUG_ON(npo.mcl_prod == 0); 1.187 1.188 + mcl = npo.mcl + npo.mcl_prod++; 1.189 + 1.190 + BUG_ON(mcl[-1].op != __HYPERVISOR_update_va_mapping); 1.191 mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; 1.192 1.193 mcl->op = __HYPERVISOR_mmu_update; 1.194 @@ -495,13 +528,17 @@ static void net_rx_action(unsigned long 1.195 mcl->args[1] = count; 1.196 mcl->args[2] = 0; 1.197 mcl->args[3] = DOMID_SELF; 1.198 - 1.199 - ret = HYPERVISOR_multicall(rx_mcl, count + 1); 1.200 - BUG_ON(ret != 0); 1.201 } 1.202 1.203 - ret = HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, count); 1.204 + mcl = npo.mcl + npo.mcl_prod++; 1.205 + mcl->op = __HYPERVISOR_grant_table_op; 1.206 + mcl->args[0] = GNTTABOP_transfer; 1.207 + mcl->args[1] = (unsigned long)grant_rx_op; 1.208 + mcl->args[2] = npo.trans_prod; 1.209 + 1.210 + ret = HYPERVISOR_multicall(npo.mcl, npo.mcl_prod); 1.211 BUG_ON(ret != 0); 1.212 + BUG_ON(mcl->result != 0); 1.213 1.214 count = 0; 1.215 while ((skb = __skb_dequeue(&rxq)) != NULL) { 1.216 @@ -515,10 +552,11 @@ static void net_rx_action(unsigned long 1.217 netif->stats.tx_bytes += skb->len; 1.218 netif->stats.tx_packets++; 1.219 1.220 - netbk_free_pages(nr_frags, meta + count + 1); 1.221 - status = netbk_check_gop(nr_frags, netif->domid, count); 1.222 + netbk_free_pages(nr_frags, meta + npo.meta_cons + 1); 1.223 + status = netbk_check_gop(nr_frags, netif->domid, count, 1.224 + &npo); 1.225 1.226 - id = meta[count].id; 1.227 + id = meta[npo.meta_cons].id; 1.228 flags = nr_frags ? NETRXF_more_data : 0; 1.229 1.230 if (skb->ip_summed == CHECKSUM_HW) /* local packet? */ 1.231 @@ -532,7 +570,7 @@ static void net_rx_action(unsigned long 1.232 1.233 extra = NULL; 1.234 1.235 - if (meta[count].frag.size) { 1.236 + if (meta[npo.meta_cons].frag.size) { 1.237 struct netif_extra_info *gso = 1.238 (struct netif_extra_info *) 1.239 RING_GET_RESPONSE(&netif->rx, 1.240 @@ -543,7 +581,7 @@ static void net_rx_action(unsigned long 1.241 else 1.242 resp->flags |= NETRXF_extra_info; 1.243 1.244 - gso->u.gso.size = meta[count].frag.size; 1.245 + gso->u.gso.size = meta[npo.meta_cons].frag.size; 1.246 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 1.247 gso->u.gso.pad = 0; 1.248 gso->u.gso.features = 0; 1.249 @@ -553,7 +591,8 @@ static void net_rx_action(unsigned long 1.250 extra = gso; 1.251 } 1.252 1.253 - netbk_add_frag_responses(netif, status, meta + count + 1, 1.254 + netbk_add_frag_responses(netif, status, 1.255 + meta + npo.meta_cons + 1, 1.256 nr_frags); 1.257 1.258 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret); 1.259 @@ -569,7 +608,8 @@ static void net_rx_action(unsigned long 1.260 1.261 netif_put(netif); 1.262 dev_kfree_skb(skb); 1.263 - count += nr_frags + 1; 1.264 + 1.265 + npo.meta_cons += nr_frags + 1; 1.266 } 1.267 1.268 while (notify_nr != 0) {