ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/netback/netback.c @ 9602:ea12fbe91f39

Remove unused code in netback's net_rx_action():
new_mfn and old_mfn are set in the end half of the function
but they are not used.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author kaf24@firebug.cl.cam.ac.uk
date Thu Apr 06 09:34:30 2006 +0100 (2006-04-06)
parents 806d04252761
children c3bb51c443a7
line source
1 /******************************************************************************
2 * drivers/xen/netback/netback.c
3 *
4 * Back-end of the driver for virtual network devices. This portion of the
5 * driver exports a 'unified' network-device interface that can be accessed
6 * by any operating system that implements a compatible front end. A
7 * reference front-end implementation can be found in:
8 * drivers/xen/netfront/netfront.c
9 *
10 * Copyright (c) 2002-2005, K A Fraser
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
35 */
37 #include "common.h"
38 #include <xen/balloon.h>
39 #include <xen/interface/memory.h>
41 /*#define NETBE_DEBUG_INTERRUPT*/
43 static void netif_idx_release(u16 pending_idx);
44 static void netif_page_release(struct page *page);
45 static void make_tx_response(netif_t *netif,
46 u16 id,
47 s8 st);
48 static int make_rx_response(netif_t *netif,
49 u16 id,
50 s8 st,
51 u16 offset,
52 u16 size,
53 u16 flags);
55 static void net_tx_action(unsigned long unused);
56 static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
58 static void net_rx_action(unsigned long unused);
59 static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
61 static struct timer_list net_timer;
63 #define MAX_PENDING_REQS 256
65 static struct sk_buff_head rx_queue;
66 static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
67 static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
68 static gnttab_transfer_t grant_rx_op[NET_RX_RING_SIZE];
69 static unsigned char rx_notify[NR_IRQS];
71 static unsigned long mmap_vstart;
72 #define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE))
74 #define PKT_PROT_LEN 64
76 static struct {
77 netif_tx_request_t req;
78 netif_t *netif;
79 } pending_tx_info[MAX_PENDING_REQS];
80 static u16 pending_ring[MAX_PENDING_REQS];
81 typedef unsigned int PEND_RING_IDX;
82 #define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
83 static PEND_RING_IDX pending_prod, pending_cons;
84 #define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
86 /* Freed TX SKBs get batched on this ring before return to pending_ring. */
87 static u16 dealloc_ring[MAX_PENDING_REQS];
88 static PEND_RING_IDX dealloc_prod, dealloc_cons;
90 static struct sk_buff_head tx_queue;
92 static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
93 static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
94 static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
96 static struct list_head net_schedule_list;
97 static spinlock_t net_schedule_list_lock;
99 #define MAX_MFN_ALLOC 64
100 static unsigned long mfn_list[MAX_MFN_ALLOC];
101 static unsigned int alloc_index = 0;
102 static spinlock_t mfn_lock = SPIN_LOCK_UNLOCKED;
104 static unsigned long alloc_mfn(void)
105 {
106 unsigned long mfn = 0, flags;
107 struct xen_memory_reservation reservation = {
108 .extent_start = mfn_list,
109 .nr_extents = MAX_MFN_ALLOC,
110 .extent_order = 0,
111 .domid = DOMID_SELF
112 };
113 spin_lock_irqsave(&mfn_lock, flags);
114 if ( unlikely(alloc_index == 0) )
115 alloc_index = HYPERVISOR_memory_op(
116 XENMEM_increase_reservation, &reservation);
117 if ( alloc_index != 0 )
118 mfn = mfn_list[--alloc_index];
119 spin_unlock_irqrestore(&mfn_lock, flags);
120 return mfn;
121 }
123 static inline void maybe_schedule_tx_action(void)
124 {
125 smp_mb();
126 if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
127 !list_empty(&net_schedule_list))
128 tasklet_schedule(&net_tx_tasklet);
129 }
131 /*
132 * A gross way of confirming the origin of an skb data page. The slab
133 * allocator abuses a field in the page struct to cache the kmem_cache_t ptr.
134 */
135 static inline int is_xen_skb(struct sk_buff *skb)
136 {
137 extern kmem_cache_t *skbuff_cachep;
138 kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
139 return (cp == skbuff_cachep);
140 }
142 int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
143 {
144 netif_t *netif = netdev_priv(dev);
146 BUG_ON(skb->dev != dev);
148 /* Drop the packet if the target domain has no receive buffers. */
149 if (!netif->active ||
150 (netif->rx_req_cons_peek == netif->rx.sring->req_prod) ||
151 ((netif->rx_req_cons_peek - netif->rx.rsp_prod_pvt) ==
152 NET_RX_RING_SIZE))
153 goto drop;
155 /*
156 * We do not copy the packet unless:
157 * 1. The data is shared; or
158 * 2. The data is not allocated from our special cache.
159 * NB. We also couldn't cope with fragmented packets, but we won't get
160 * any because we not advertise the NETIF_F_SG feature.
161 */
162 if (skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb)) {
163 int hlen = skb->data - skb->head;
164 int ret;
165 struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
166 if ( unlikely(nskb == NULL) )
167 goto drop;
168 skb_reserve(nskb, hlen);
169 __skb_put(nskb, skb->len);
170 ret = skb_copy_bits(skb, -hlen, nskb->data - hlen,
171 skb->len + hlen);
172 BUG_ON(ret);
173 nskb->dev = skb->dev;
174 nskb->proto_data_valid = skb->proto_data_valid;
175 dev_kfree_skb(skb);
176 skb = nskb;
177 }
179 netif->rx_req_cons_peek++;
180 netif_get(netif);
182 skb_queue_tail(&rx_queue, skb);
183 tasklet_schedule(&net_rx_tasklet);
185 return 0;
187 drop:
188 netif->stats.tx_dropped++;
189 dev_kfree_skb(skb);
190 return 0;
191 }
193 #if 0
194 static void xen_network_done_notify(void)
195 {
196 static struct net_device *eth0_dev = NULL;
197 if (unlikely(eth0_dev == NULL))
198 eth0_dev = __dev_get_by_name("eth0");
199 netif_rx_schedule(eth0_dev);
200 }
201 /*
202 * Add following to poll() function in NAPI driver (Tigon3 is example):
203 * if ( xen_network_done() )
204 * tg3_enable_ints(tp);
205 */
206 int xen_network_done(void)
207 {
208 return skb_queue_empty(&rx_queue);
209 }
210 #endif
212 static void net_rx_action(unsigned long unused)
213 {
214 netif_t *netif = NULL;
215 s8 status;
216 u16 size, id, irq, flags;
217 multicall_entry_t *mcl;
218 mmu_update_t *mmu;
219 gnttab_transfer_t *gop;
220 unsigned long vdata, old_mfn, new_mfn;
221 struct sk_buff_head rxq;
222 struct sk_buff *skb;
223 u16 notify_list[NET_RX_RING_SIZE];
224 int notify_nr = 0;
225 int ret;
227 skb_queue_head_init(&rxq);
229 mcl = rx_mcl;
230 mmu = rx_mmu;
231 gop = grant_rx_op;
233 while ((skb = skb_dequeue(&rx_queue)) != NULL) {
234 netif = netdev_priv(skb->dev);
235 vdata = (unsigned long)skb->data;
236 old_mfn = virt_to_mfn(vdata);
238 /* Memory squeeze? Back off for an arbitrary while. */
239 if ((new_mfn = alloc_mfn()) == 0) {
240 if ( net_ratelimit() )
241 WPRINTK("Memory squeeze in netback driver.\n");
242 mod_timer(&net_timer, jiffies + HZ);
243 skb_queue_head(&rx_queue, skb);
244 break;
245 }
246 /*
247 * Set the new P2M table entry before reassigning the old data
248 * page. Heed the comment in pgtable-2level.h:pte_page(). :-)
249 */
250 set_phys_to_machine(__pa(skb->data) >> PAGE_SHIFT, new_mfn);
252 MULTI_update_va_mapping(mcl, vdata,
253 pfn_pte_ma(new_mfn, PAGE_KERNEL), 0);
254 mcl++;
256 gop->mfn = old_mfn;
257 gop->domid = netif->domid;
258 gop->ref = RING_GET_REQUEST(
259 &netif->rx, netif->rx.req_cons)->gref;
260 netif->rx.req_cons++;
261 gop++;
263 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
264 mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
265 MMU_MACHPHYS_UPDATE;
266 mmu->val = __pa(vdata) >> PAGE_SHIFT;
267 mmu++;
268 }
270 __skb_queue_tail(&rxq, skb);
272 /* Filled the batch queue? */
273 if ((gop - grant_rx_op) == ARRAY_SIZE(grant_rx_op))
274 break;
275 }
277 if (mcl == rx_mcl)
278 return;
280 mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
282 if (mmu - rx_mmu) {
283 mcl->op = __HYPERVISOR_mmu_update;
284 mcl->args[0] = (unsigned long)rx_mmu;
285 mcl->args[1] = mmu - rx_mmu;
286 mcl->args[2] = 0;
287 mcl->args[3] = DOMID_SELF;
288 mcl++;
289 }
291 ret = HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
292 BUG_ON(ret != 0);
294 ret = HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op,
295 gop - grant_rx_op);
296 BUG_ON(ret != 0);
298 mcl = rx_mcl;
299 gop = grant_rx_op;
300 while ((skb = __skb_dequeue(&rxq)) != NULL) {
301 netif = netdev_priv(skb->dev);
302 size = skb->tail - skb->data;
304 atomic_set(&(skb_shinfo(skb)->dataref), 1);
305 skb_shinfo(skb)->nr_frags = 0;
306 skb_shinfo(skb)->frag_list = NULL;
308 netif->stats.tx_bytes += size;
309 netif->stats.tx_packets++;
311 /* The update_va_mapping() must not fail. */
312 BUG_ON(mcl->result != 0);
314 /* Check the reassignment error code. */
315 status = NETIF_RSP_OKAY;
316 if (gop->status != 0) {
317 DPRINTK("Bad status %d from grant transfer to DOM%u\n",
318 gop->status, netif->domid);
319 /*
320 * Page no longer belongs to us unless GNTST_bad_page,
321 * but that should be a fatal error anyway.
322 */
323 BUG_ON(gop->status == GNTST_bad_page);
324 status = NETIF_RSP_ERROR;
325 }
326 irq = netif->irq;
327 id = RING_GET_REQUEST(&netif->rx, netif->rx.rsp_prod_pvt)->id;
328 flags = 0;
329 if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
330 flags |= NETRXF_csum_blank | NETRXF_data_validated;
331 else if (skb->proto_data_valid) /* remote but checksummed? */
332 flags |= NETRXF_data_validated;
333 if (make_rx_response(netif, id, status,
334 (unsigned long)skb->data & ~PAGE_MASK,
335 size, flags) &&
336 (rx_notify[irq] == 0)) {
337 rx_notify[irq] = 1;
338 notify_list[notify_nr++] = irq;
339 }
341 netif_put(netif);
342 dev_kfree_skb(skb);
343 mcl++;
344 gop++;
345 }
347 while (notify_nr != 0) {
348 irq = notify_list[--notify_nr];
349 rx_notify[irq] = 0;
350 notify_remote_via_irq(irq);
351 }
353 /* More work to do? */
354 if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
355 tasklet_schedule(&net_rx_tasklet);
356 #if 0
357 else
358 xen_network_done_notify();
359 #endif
360 }
362 static void net_alarm(unsigned long unused)
363 {
364 tasklet_schedule(&net_rx_tasklet);
365 }
367 struct net_device_stats *netif_be_get_stats(struct net_device *dev)
368 {
369 netif_t *netif = netdev_priv(dev);
370 return &netif->stats;
371 }
373 static int __on_net_schedule_list(netif_t *netif)
374 {
375 return netif->list.next != NULL;
376 }
378 static void remove_from_net_schedule_list(netif_t *netif)
379 {
380 spin_lock_irq(&net_schedule_list_lock);
381 if (likely(__on_net_schedule_list(netif))) {
382 list_del(&netif->list);
383 netif->list.next = NULL;
384 netif_put(netif);
385 }
386 spin_unlock_irq(&net_schedule_list_lock);
387 }
389 static void add_to_net_schedule_list_tail(netif_t *netif)
390 {
391 if (__on_net_schedule_list(netif))
392 return;
394 spin_lock_irq(&net_schedule_list_lock);
395 if (!__on_net_schedule_list(netif) && netif->active) {
396 list_add_tail(&netif->list, &net_schedule_list);
397 netif_get(netif);
398 }
399 spin_unlock_irq(&net_schedule_list_lock);
400 }
402 /*
403 * Note on CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER:
404 * If this driver is pipelining transmit requests then we can be very
405 * aggressive in avoiding new-packet notifications -- frontend only needs to
406 * send a notification if there are no outstanding unreceived responses.
407 * If we may be buffer transmit buffers for any reason then we must be rather
408 * more conservative and treat this as the final check for pending work.
409 */
410 void netif_schedule_work(netif_t *netif)
411 {
412 int more_to_do;
414 #ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
415 more_to_do = RING_HAS_UNCONSUMED_REQUESTS(&netif->tx);
416 #else
417 RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
418 #endif
420 if (more_to_do) {
421 add_to_net_schedule_list_tail(netif);
422 maybe_schedule_tx_action();
423 }
424 }
426 void netif_deschedule_work(netif_t *netif)
427 {
428 remove_from_net_schedule_list(netif);
429 }
432 static void tx_credit_callback(unsigned long data)
433 {
434 netif_t *netif = (netif_t *)data;
435 netif->remaining_credit = netif->credit_bytes;
436 netif_schedule_work(netif);
437 }
439 inline static void net_tx_action_dealloc(void)
440 {
441 gnttab_unmap_grant_ref_t *gop;
442 u16 pending_idx;
443 PEND_RING_IDX dc, dp;
444 netif_t *netif;
445 int ret;
447 dc = dealloc_cons;
448 dp = dealloc_prod;
450 /*
451 * Free up any grants we have finished using
452 */
453 gop = tx_unmap_ops;
454 while (dc != dp) {
455 pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
456 gop->host_addr = MMAP_VADDR(pending_idx);
457 gop->dev_bus_addr = 0;
458 gop->handle = grant_tx_handle[pending_idx];
459 gop++;
460 }
461 ret = HYPERVISOR_grant_table_op(
462 GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops);
463 BUG_ON(ret);
465 while (dealloc_cons != dp) {
466 pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
468 netif = pending_tx_info[pending_idx].netif;
470 make_tx_response(netif, pending_tx_info[pending_idx].req.id,
471 NETIF_RSP_OKAY);
473 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
475 netif_put(netif);
476 }
477 }
479 /* Called after netfront has transmitted */
480 static void net_tx_action(unsigned long unused)
481 {
482 struct list_head *ent;
483 struct sk_buff *skb;
484 netif_t *netif;
485 netif_tx_request_t txreq;
486 u16 pending_idx;
487 RING_IDX i;
488 gnttab_map_grant_ref_t *mop;
489 unsigned int data_len;
490 int ret, work_to_do;
492 if (dealloc_cons != dealloc_prod)
493 net_tx_action_dealloc();
495 mop = tx_map_ops;
496 while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
497 !list_empty(&net_schedule_list)) {
498 /* Get a netif from the list with work to do. */
499 ent = net_schedule_list.next;
500 netif = list_entry(ent, netif_t, list);
501 netif_get(netif);
502 remove_from_net_schedule_list(netif);
504 RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
505 if (!work_to_do) {
506 netif_put(netif);
507 continue;
508 }
510 i = netif->tx.req_cons;
511 rmb(); /* Ensure that we see the request before we copy it. */
512 memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
513 /* Credit-based scheduling. */
514 if (txreq.size > netif->remaining_credit) {
515 unsigned long now = jiffies;
516 unsigned long next_credit =
517 netif->credit_timeout.expires +
518 msecs_to_jiffies(netif->credit_usec / 1000);
520 /* Timer could already be pending in rare cases. */
521 if (timer_pending(&netif->credit_timeout))
522 break;
524 /* Passed the point where we can replenish credit? */
525 if (time_after_eq(now, next_credit)) {
526 netif->credit_timeout.expires = now;
527 netif->remaining_credit = netif->credit_bytes;
528 }
530 /* Still too big to send right now? Set a callback. */
531 if (txreq.size > netif->remaining_credit) {
532 netif->remaining_credit = 0;
533 netif->credit_timeout.data =
534 (unsigned long)netif;
535 netif->credit_timeout.function =
536 tx_credit_callback;
537 __mod_timer(&netif->credit_timeout,
538 next_credit);
539 break;
540 }
541 }
542 netif->remaining_credit -= txreq.size;
544 netif->tx.req_cons++;
546 netif_schedule_work(netif);
548 if (unlikely(txreq.size < ETH_HLEN) ||
549 unlikely(txreq.size > ETH_FRAME_LEN)) {
550 DPRINTK("Bad packet size: %d\n", txreq.size);
551 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
552 netif_put(netif);
553 continue;
554 }
556 /* No crossing a page as the payload mustn't fragment. */
557 if (unlikely((txreq.offset + txreq.size) >= PAGE_SIZE)) {
558 DPRINTK("txreq.offset: %x, size: %u, end: %lu\n",
559 txreq.offset, txreq.size,
560 (txreq.offset &~PAGE_MASK) + txreq.size);
561 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
562 netif_put(netif);
563 continue;
564 }
566 pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
568 data_len = (txreq.size > PKT_PROT_LEN) ?
569 PKT_PROT_LEN : txreq.size;
571 skb = alloc_skb(data_len+16, GFP_ATOMIC);
572 if (unlikely(skb == NULL)) {
573 DPRINTK("Can't allocate a skb in start_xmit.\n");
574 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
575 netif_put(netif);
576 break;
577 }
579 /* Packets passed to netif_rx() must have some headroom. */
580 skb_reserve(skb, 16);
582 mop->host_addr = MMAP_VADDR(pending_idx);
583 mop->dom = netif->domid;
584 mop->ref = txreq.gref;
585 mop->flags = GNTMAP_host_map | GNTMAP_readonly;
586 mop++;
588 memcpy(&pending_tx_info[pending_idx].req,
589 &txreq, sizeof(txreq));
590 pending_tx_info[pending_idx].netif = netif;
591 *((u16 *)skb->data) = pending_idx;
593 __skb_queue_tail(&tx_queue, skb);
595 pending_cons++;
597 if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
598 break;
599 }
601 if (mop == tx_map_ops)
602 return;
604 ret = HYPERVISOR_grant_table_op(
605 GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops);
606 BUG_ON(ret);
608 mop = tx_map_ops;
609 while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
610 pending_idx = *((u16 *)skb->data);
611 netif = pending_tx_info[pending_idx].netif;
612 memcpy(&txreq, &pending_tx_info[pending_idx].req,
613 sizeof(txreq));
615 /* Check the remap error code. */
616 if (unlikely(mop->status)) {
617 printk(KERN_ALERT "#### netback grant fails\n");
618 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
619 netif_put(netif);
620 kfree_skb(skb);
621 mop++;
622 pending_ring[MASK_PEND_IDX(pending_prod++)] =
623 pending_idx;
624 continue;
625 }
626 set_phys_to_machine(
627 __pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT,
628 FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
629 grant_tx_handle[pending_idx] = mop->handle;
631 data_len = (txreq.size > PKT_PROT_LEN) ?
632 PKT_PROT_LEN : txreq.size;
634 __skb_put(skb, data_len);
635 memcpy(skb->data,
636 (void *)(MMAP_VADDR(pending_idx)|txreq.offset),
637 data_len);
638 if (data_len < txreq.size) {
639 /* Append the packet payload as a fragment. */
640 skb_shinfo(skb)->frags[0].page =
641 virt_to_page(MMAP_VADDR(pending_idx));
642 skb_shinfo(skb)->frags[0].size =
643 txreq.size - data_len;
644 skb_shinfo(skb)->frags[0].page_offset =
645 txreq.offset + data_len;
646 skb_shinfo(skb)->nr_frags = 1;
647 } else {
648 /* Schedule a response immediately. */
649 netif_idx_release(pending_idx);
650 }
652 skb->data_len = txreq.size - data_len;
653 skb->len += skb->data_len;
655 skb->dev = netif->dev;
656 skb->protocol = eth_type_trans(skb, skb->dev);
658 /*
659 * Old frontends do not assert data_validated but we
660 * can infer it from csum_blank so test both flags.
661 */
662 if (txreq.flags & (NETTXF_data_validated|NETTXF_csum_blank)) {
663 skb->ip_summed = CHECKSUM_UNNECESSARY;
664 skb->proto_data_valid = 1;
665 } else {
666 skb->ip_summed = CHECKSUM_NONE;
667 skb->proto_data_valid = 0;
668 }
669 skb->proto_csum_blank = !!(txreq.flags & NETTXF_csum_blank);
671 netif->stats.rx_bytes += txreq.size;
672 netif->stats.rx_packets++;
674 netif_rx(skb);
675 netif->dev->last_rx = jiffies;
677 mop++;
678 }
679 }
681 static void netif_idx_release(u16 pending_idx)
682 {
683 static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
684 unsigned long flags;
686 spin_lock_irqsave(&_lock, flags);
687 dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
688 spin_unlock_irqrestore(&_lock, flags);
690 tasklet_schedule(&net_tx_tasklet);
691 }
693 static void netif_page_release(struct page *page)
694 {
695 u16 pending_idx = page - virt_to_page(mmap_vstart);
697 /* Ready for next use. */
698 set_page_count(page, 1);
700 netif_idx_release(pending_idx);
701 }
703 irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
704 {
705 netif_t *netif = dev_id;
706 add_to_net_schedule_list_tail(netif);
707 maybe_schedule_tx_action();
708 return IRQ_HANDLED;
709 }
711 static void make_tx_response(netif_t *netif,
712 u16 id,
713 s8 st)
714 {
715 RING_IDX i = netif->tx.rsp_prod_pvt;
716 netif_tx_response_t *resp;
717 int notify;
719 resp = RING_GET_RESPONSE(&netif->tx, i);
720 resp->id = id;
721 resp->status = st;
723 netif->tx.rsp_prod_pvt = ++i;
724 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->tx, notify);
725 if (notify)
726 notify_remote_via_irq(netif->irq);
728 #ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
729 if (i == netif->tx.req_cons) {
730 int more_to_do;
731 RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
732 if (more_to_do)
733 add_to_net_schedule_list_tail(netif);
734 }
735 #endif
736 }
738 static int make_rx_response(netif_t *netif,
739 u16 id,
740 s8 st,
741 u16 offset,
742 u16 size,
743 u16 flags)
744 {
745 RING_IDX i = netif->rx.rsp_prod_pvt;
746 netif_rx_response_t *resp;
747 int notify;
749 resp = RING_GET_RESPONSE(&netif->rx, i);
750 resp->offset = offset;
751 resp->flags = flags;
752 resp->id = id;
753 resp->status = (s16)size;
754 if (st < 0)
755 resp->status = (s16)st;
757 netif->rx.rsp_prod_pvt = ++i;
758 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, notify);
760 return notify;
761 }
763 #ifdef NETBE_DEBUG_INTERRUPT
764 static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
765 {
766 struct list_head *ent;
767 netif_t *netif;
768 int i = 0;
770 printk(KERN_ALERT "netif_schedule_list:\n");
771 spin_lock_irq(&net_schedule_list_lock);
773 list_for_each (ent, &net_schedule_list) {
774 netif = list_entry(ent, netif_t, list);
775 printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
776 "rx_resp_prod=%08x\n",
777 i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
778 printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
779 netif->tx.req_cons, netif->tx.rsp_prod_pvt);
780 printk(KERN_ALERT " shared(rx_req_prod=%08x "
781 "rx_resp_prod=%08x\n",
782 netif->rx.sring->req_prod, netif->rx.sring->rsp_prod);
783 printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
784 netif->rx.sring->rsp_event, netif->tx.sring->req_prod);
785 printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
786 netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event);
787 i++;
788 }
790 spin_unlock_irq(&net_schedule_list_lock);
791 printk(KERN_ALERT " ** End of netif_schedule_list **\n");
793 return IRQ_HANDLED;
794 }
795 #endif
797 static int __init netback_init(void)
798 {
799 int i;
800 struct page *page;
802 /* We can increase reservation by this much in net_rx_action(). */
803 balloon_update_driver_allowance(NET_RX_RING_SIZE);
805 skb_queue_head_init(&rx_queue);
806 skb_queue_head_init(&tx_queue);
808 init_timer(&net_timer);
809 net_timer.data = 0;
810 net_timer.function = net_alarm;
812 page = balloon_alloc_empty_page_range(MAX_PENDING_REQS);
813 BUG_ON(page == NULL);
814 mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
816 for (i = 0; i < MAX_PENDING_REQS; i++) {
817 page = virt_to_page(MMAP_VADDR(i));
818 set_page_count(page, 1);
819 SetPageForeign(page, netif_page_release);
820 }
822 pending_cons = 0;
823 pending_prod = MAX_PENDING_REQS;
824 for (i = 0; i < MAX_PENDING_REQS; i++)
825 pending_ring[i] = i;
827 spin_lock_init(&net_schedule_list_lock);
828 INIT_LIST_HEAD(&net_schedule_list);
830 netif_xenbus_init();
832 #ifdef NETBE_DEBUG_INTERRUPT
833 (void)bind_virq_to_irqhandler(
834 VIRQ_DEBUG,
835 0,
836 netif_be_dbg,
837 SA_SHIRQ,
838 "net-be-dbg",
839 &netif_be_dbg);
840 #endif
842 __unsafe(THIS_MODULE);
844 return 0;
845 }
847 static void netback_cleanup(void)
848 {
849 BUG();
850 }
852 module_init(netback_init);
853 module_exit(netback_cleanup);
855 MODULE_LICENSE("Dual BSD/GPL");
857 /*
858 * Local variables:
859 * c-file-style: "linux"
860 * indent-tabs-mode: t
861 * c-indent-level: 8
862 * c-basic-offset: 8
863 * tab-width: 8
864 * End:
865 */