ia64/xen-unstable
changeset 1384:ef31166760c9
bitkeeper revision 1.891.1.20 (40a49b4eh4S4Ig3jHhBe8oIy85TETw)
Add NAPI support to network frontend interface.
Improve batching in network frontend interface.
Add NAPI support to network frontend interface.
Improve batching in network frontend interface.
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Fri May 14 10:11:26 2004 +0000 (2004-05-14) |
parents | e9b14013a2e7 |
children | a693ae3c0f60 |
files | xenolinux-2.4.26-sparse/arch/xen/drivers/netif/frontend/main.c |
line diff
1.1 --- a/xenolinux-2.4.26-sparse/arch/xen/drivers/netif/frontend/main.c Fri May 14 10:10:54 2004 +0000 1.2 +++ b/xenolinux-2.4.26-sparse/arch/xen/drivers/netif/frontend/main.c Fri May 14 10:11:26 2004 +0000 1.3 @@ -37,6 +37,10 @@ static void network_tx_buf_gc(struct net 1.4 static void network_alloc_rx_buffers(struct net_device *dev); 1.5 static void cleanup_module(void); 1.6 1.7 +static unsigned long rx_pfn_array[NETIF_RX_RING_SIZE]; 1.8 +static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE+1]; 1.9 +static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE]; 1.10 + 1.11 static struct list_head dev_list; 1.12 1.13 struct net_private 1.14 @@ -178,8 +182,7 @@ static void network_alloc_rx_buffers(str 1.15 struct sk_buff *skb; 1.16 NETIF_RING_IDX i = np->rx->req_prod; 1.17 dom_mem_op_t op; 1.18 - unsigned long pfn_array[NETIF_RX_RING_SIZE]; 1.19 - int ret, nr_pfns = 0; 1.20 + int nr_pfns = 0; 1.21 1.22 /* Make sure the batch is large enough to be worthwhile (1/2 ring). */ 1.23 if ( unlikely((i - np->rx_resp_cons) > (NETIF_RX_RING_SIZE/2)) || 1.24 @@ -201,9 +204,14 @@ static void network_alloc_rx_buffers(str 1.25 1.26 np->rx->ring[MASK_NET_RX_IDX(i)].req.id = id; 1.27 1.28 - pfn_array[nr_pfns++] = virt_to_machine(skb->head) >> PAGE_SHIFT; 1.29 - HYPERVISOR_update_va_mapping((unsigned long)skb->head >> PAGE_SHIFT, 1.30 - (pte_t) { 0 }, UVMF_INVLPG); 1.31 + rx_pfn_array[nr_pfns] = virt_to_machine(skb->head) >> PAGE_SHIFT; 1.32 + 1.33 + rx_mcl[nr_pfns].op = __HYPERVISOR_update_va_mapping; 1.34 + rx_mcl[nr_pfns].args[0] = (unsigned long)skb->head >> PAGE_SHIFT; 1.35 + rx_mcl[nr_pfns].args[1] = 0; 1.36 + rx_mcl[nr_pfns].args[2] = 0; 1.37 + 1.38 + nr_pfns++; 1.39 } 1.40 while ( (++i - np->rx_resp_cons) != NETIF_RX_RING_SIZE ); 1.41 1.42 @@ -213,14 +221,22 @@ static void network_alloc_rx_buffers(str 1.43 */ 1.44 flush_page_update_queue(); 1.45 1.46 + /* After all PTEs have been zapped we blow away stale TLB entries. */ 1.47 + rx_mcl[nr_pfns-1].args[2] = UVMF_FLUSH_TLB; 1.48 + 1.49 + /* Give away a batch of pages. */ 1.50 op.op = MEMOP_RESERVATION_DECREASE; 1.51 op.u.decrease.size = nr_pfns; 1.52 - op.u.decrease.pages = pfn_array; 1.53 - if ( (ret = HYPERVISOR_dom_mem_op(&op)) != nr_pfns ) 1.54 - { 1.55 - printk(KERN_WARNING "Unable to reduce memory reservation (%d)\n", ret); 1.56 - BUG(); 1.57 - } 1.58 + op.u.decrease.pages = rx_pfn_array; 1.59 + rx_mcl[nr_pfns].op = __HYPERVISOR_dom_mem_op; 1.60 + rx_mcl[nr_pfns].args[0] = (unsigned long)&op; 1.61 + 1.62 + /* Zap PTEs and give away pages in one big multicall. */ 1.63 + (void)HYPERVISOR_multicall(rx_mcl, nr_pfns+1); 1.64 + 1.65 + /* Check return status of HYPERVISOR_dom_mem_op(). */ 1.66 + if ( rx_mcl[nr_pfns].args[5] != nr_pfns ) 1.67 + panic("Unable to reduce memory reservation\n"); 1.68 1.69 np->rx->req_prod = i; 1.70 } 1.71 @@ -295,17 +311,36 @@ static void netif_int(int irq, void *dev 1.72 struct net_device *dev = dev_id; 1.73 struct net_private *np = dev->priv; 1.74 unsigned long flags; 1.75 - struct sk_buff *skb; 1.76 - netif_rx_response_t *rx; 1.77 - NETIF_RING_IDX i; 1.78 - mmu_update_t mmu; 1.79 1.80 spin_lock_irqsave(&np->tx_lock, flags); 1.81 network_tx_buf_gc(dev); 1.82 spin_unlock_irqrestore(&np->tx_lock, flags); 1.83 1.84 - again: 1.85 - for ( i = np->rx_resp_cons; i != np->rx->resp_prod; i++ ) 1.86 + if ( np->rx_resp_cons != np->rx->resp_prod ) 1.87 + netif_rx_schedule(dev); 1.88 +} 1.89 + 1.90 + 1.91 +static int netif_poll(struct net_device *dev, int *pbudget) 1.92 +{ 1.93 + struct net_private *np = dev->priv; 1.94 + struct sk_buff *skb; 1.95 + netif_rx_response_t *rx; 1.96 + NETIF_RING_IDX i; 1.97 + mmu_update_t *mmu = rx_mmu; 1.98 + multicall_entry_t *mcl = rx_mcl; 1.99 + int work_done, budget, more_to_do = 1; 1.100 + struct sk_buff_head rxq; 1.101 + unsigned long flags; 1.102 + 1.103 + skb_queue_head_init(&rxq); 1.104 + 1.105 + if ( (budget = *pbudget) > dev->quota ) 1.106 + budget = dev->quota; 1.107 + 1.108 + for ( i = np->rx_resp_cons, work_done = 0; 1.109 + (i != np->rx->resp_prod) && (work_done < budget); 1.110 + i++, work_done++ ) 1.111 { 1.112 rx = &np->rx->ring[MASK_NET_RX_IDX(i)].resp; 1.113 1.114 @@ -317,38 +352,53 @@ static void netif_int(int irq, void *dev 1.115 /* Gate this error. We get a (valid) slew of them on suspend. */ 1.116 if ( np->state == NETIF_STATE_ACTIVE ) 1.117 printk(KERN_ALERT "bad buffer on RX ring!(%d)\n", rx->status); 1.118 - dev_kfree_skb_any(skb); 1.119 + dev_kfree_skb(skb); 1.120 continue; 1.121 } 1.122 1.123 + skb->data = skb->tail = skb->head + (rx->addr & ~PAGE_MASK); 1.124 + skb_put(skb, rx->status); 1.125 + 1.126 + np->stats.rx_packets++; 1.127 + np->stats.rx_bytes += rx->status; 1.128 + 1.129 /* Remap the page. */ 1.130 - mmu.ptr = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE; 1.131 - mmu.val = __pa(skb->head) >> PAGE_SHIFT; 1.132 - if ( HYPERVISOR_mmu_update(&mmu, 1) != 0 ) 1.133 - BUG(); 1.134 - HYPERVISOR_update_va_mapping((unsigned long)skb->head >> PAGE_SHIFT, 1.135 - (pte_t) { (rx->addr & PAGE_MASK) | 1.136 - __PAGE_KERNEL }, 1.137 - 0); 1.138 + mmu->ptr = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE; 1.139 + mmu->val = __pa(skb->head) >> PAGE_SHIFT; 1.140 + mmu++; 1.141 + mcl->op = __HYPERVISOR_update_va_mapping; 1.142 + mcl->args[0] = (unsigned long)skb->head >> PAGE_SHIFT; 1.143 + mcl->args[1] = (rx->addr & PAGE_MASK) | __PAGE_KERNEL; 1.144 + mcl->args[2] = 0; 1.145 + mcl++; 1.146 + 1.147 phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = 1.148 rx->addr >> PAGE_SHIFT; 1.149 1.150 - /* 1.151 - * Set up shinfo -- from alloc_skb This was particularily nasty: the 1.152 - * shared info is hidden at the back of the data area (presumably so it 1.153 - * can be shared), but on page flip it gets very spunked. 1.154 - */ 1.155 + __skb_queue_tail(&rxq, skb); 1.156 + } 1.157 + 1.158 + /* Do all the remapping work, and M->P updates, in one big hypercall. */ 1.159 + if ( likely((mcl - rx_mcl) != 0) ) 1.160 + { 1.161 + mcl->op = __HYPERVISOR_mmu_update; 1.162 + mcl->args[0] = (unsigned long)rx_mmu; 1.163 + mcl->args[1] = mmu - rx_mmu; 1.164 + mcl++; 1.165 + (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl); 1.166 + } 1.167 + 1.168 + while ( (skb = __skb_dequeue(&rxq)) != NULL ) 1.169 + { 1.170 + /* Set the shared-info area, which is hidden behind the real data. */ 1.171 atomic_set(&(skb_shinfo(skb)->dataref), 1); 1.172 skb_shinfo(skb)->nr_frags = 0; 1.173 skb_shinfo(skb)->frag_list = NULL; 1.174 1.175 - skb->data = skb->tail = skb->head + (rx->addr & ~PAGE_MASK); 1.176 - skb_put(skb, rx->status); 1.177 + /* Ethernet-specific work. Delayed to here as it peeks the header. */ 1.178 skb->protocol = eth_type_trans(skb, dev); 1.179 1.180 - np->stats.rx_packets++; 1.181 - 1.182 - np->stats.rx_bytes += rx->status; 1.183 + /* Pass it up. */ 1.184 netif_rx(skb); 1.185 dev->last_rx = jiffies; 1.186 } 1.187 @@ -356,12 +406,28 @@ static void netif_int(int irq, void *dev 1.188 np->rx_resp_cons = i; 1.189 1.190 network_alloc_rx_buffers(dev); 1.191 - np->rx->event = np->rx_resp_cons + 1; 1.192 + 1.193 + *pbudget -= work_done; 1.194 + dev->quota -= work_done; 1.195 + 1.196 + if ( work_done < budget ) 1.197 + { 1.198 + local_irq_save(flags); 1.199 + 1.200 + np->rx->event = i + 1; 1.201 1.202 - /* Deal with hypervisor racing our resetting of rx_event. */ 1.203 - mb(); 1.204 - if ( np->rx->resp_prod != i ) 1.205 - goto again; 1.206 + /* Deal with hypervisor racing our resetting of rx_event. */ 1.207 + mb(); 1.208 + if ( np->rx->resp_prod == i ) 1.209 + { 1.210 + __netif_rx_complete(dev); 1.211 + more_to_do = 0; 1.212 + } 1.213 + 1.214 + local_irq_restore(flags); 1.215 + } 1.216 + 1.217 + return more_to_do; 1.218 } 1.219 1.220 1.221 @@ -524,6 +590,8 @@ static int __init init_module(void) 1.222 dev->hard_start_xmit = network_start_xmit; 1.223 dev->stop = network_close; 1.224 dev->get_stats = network_get_stats; 1.225 + dev->poll = netif_poll; 1.226 + dev->weight = 64; 1.227 1.228 if ( (err = register_netdev(dev)) != 0 ) 1.229 {