ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/netback/netback.c @ 10038:60f7b567bb2b

Simply do not declare module_exit() handlers for netback/blkback, rather
than declaring the modules unsafe.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed May 10 16:47:00 2006 +0100 (2006-05-10)
parents b61908e30015
children 91c77df11b43
line source
1 /******************************************************************************
2 * drivers/xen/netback/netback.c
3 *
4 * Back-end of the driver for virtual network devices. This portion of the
5 * driver exports a 'unified' network-device interface that can be accessed
6 * by any operating system that implements a compatible front end. A
7 * reference front-end implementation can be found in:
8 * drivers/xen/netfront/netfront.c
9 *
10 * Copyright (c) 2002-2005, K A Fraser
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
35 */
37 #include "common.h"
38 #include <xen/balloon.h>
39 #include <xen/interface/memory.h>
41 /*#define NETBE_DEBUG_INTERRUPT*/
43 static void netif_idx_release(u16 pending_idx);
44 static void netif_page_release(struct page *page);
45 static void make_tx_response(netif_t *netif,
46 u16 id,
47 s8 st);
48 static int make_rx_response(netif_t *netif,
49 u16 id,
50 s8 st,
51 u16 offset,
52 u16 size,
53 u16 flags);
55 static void net_tx_action(unsigned long unused);
56 static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
58 static void net_rx_action(unsigned long unused);
59 static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
61 static struct timer_list net_timer;
63 #define MAX_PENDING_REQS 256
65 static struct sk_buff_head rx_queue;
66 static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
67 static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
68 static gnttab_transfer_t grant_rx_op[NET_RX_RING_SIZE];
69 static unsigned char rx_notify[NR_IRQS];
71 static unsigned long mmap_vstart;
72 #define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE))
74 #define PKT_PROT_LEN 64
76 static struct {
77 netif_tx_request_t req;
78 netif_t *netif;
79 } pending_tx_info[MAX_PENDING_REQS];
80 static u16 pending_ring[MAX_PENDING_REQS];
81 typedef unsigned int PEND_RING_IDX;
82 #define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
83 static PEND_RING_IDX pending_prod, pending_cons;
84 #define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
86 /* Freed TX SKBs get batched on this ring before return to pending_ring. */
87 static u16 dealloc_ring[MAX_PENDING_REQS];
88 static PEND_RING_IDX dealloc_prod, dealloc_cons;
90 static struct sk_buff_head tx_queue;
92 static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
93 static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
94 static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
96 static struct list_head net_schedule_list;
97 static spinlock_t net_schedule_list_lock;
99 #define MAX_MFN_ALLOC 64
100 static unsigned long mfn_list[MAX_MFN_ALLOC];
101 static unsigned int alloc_index = 0;
102 static spinlock_t mfn_lock = SPIN_LOCK_UNLOCKED;
104 static unsigned long alloc_mfn(void)
105 {
106 unsigned long mfn = 0, flags;
107 struct xen_memory_reservation reservation = {
108 .nr_extents = MAX_MFN_ALLOC,
109 .extent_order = 0,
110 .domid = DOMID_SELF
111 };
112 set_xen_guest_handle(reservation.extent_start, mfn_list);
113 spin_lock_irqsave(&mfn_lock, flags);
114 if ( unlikely(alloc_index == 0) )
115 alloc_index = HYPERVISOR_memory_op(
116 XENMEM_increase_reservation, &reservation);
117 if ( alloc_index != 0 )
118 mfn = mfn_list[--alloc_index];
119 spin_unlock_irqrestore(&mfn_lock, flags);
120 return mfn;
121 }
123 static inline void maybe_schedule_tx_action(void)
124 {
125 smp_mb();
126 if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
127 !list_empty(&net_schedule_list))
128 tasklet_schedule(&net_tx_tasklet);
129 }
131 /*
132 * A gross way of confirming the origin of an skb data page. The slab
133 * allocator abuses a field in the page struct to cache the kmem_cache_t ptr.
134 */
135 static inline int is_xen_skb(struct sk_buff *skb)
136 {
137 extern kmem_cache_t *skbuff_cachep;
138 kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
139 return (cp == skbuff_cachep);
140 }
142 int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
143 {
144 netif_t *netif = netdev_priv(dev);
146 BUG_ON(skb->dev != dev);
148 /* Drop the packet if the target domain has no receive buffers. */
149 if (!netif->active ||
150 (netif->rx_req_cons_peek == netif->rx.sring->req_prod) ||
151 ((netif->rx_req_cons_peek - netif->rx.rsp_prod_pvt) ==
152 NET_RX_RING_SIZE))
153 goto drop;
155 /*
156 * We do not copy the packet unless:
157 * 1. The data is shared; or
158 * 2. The data is not allocated from our special cache.
159 * NB. We also couldn't cope with fragmented packets, but we won't get
160 * any because we not advertise the NETIF_F_SG feature.
161 */
162 if (skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb)) {
163 int hlen = skb->data - skb->head;
164 int ret;
165 struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
166 if ( unlikely(nskb == NULL) )
167 goto drop;
168 skb_reserve(nskb, hlen);
169 __skb_put(nskb, skb->len);
170 ret = skb_copy_bits(skb, -hlen, nskb->data - hlen,
171 skb->len + hlen);
172 BUG_ON(ret);
173 nskb->dev = skb->dev;
174 nskb->proto_data_valid = skb->proto_data_valid;
175 nskb->proto_csum_blank = skb->proto_csum_blank;
176 dev_kfree_skb(skb);
177 skb = nskb;
178 }
180 netif->rx_req_cons_peek++;
181 netif_get(netif);
183 skb_queue_tail(&rx_queue, skb);
184 tasklet_schedule(&net_rx_tasklet);
186 return 0;
188 drop:
189 netif->stats.tx_dropped++;
190 dev_kfree_skb(skb);
191 return 0;
192 }
194 #if 0
195 static void xen_network_done_notify(void)
196 {
197 static struct net_device *eth0_dev = NULL;
198 if (unlikely(eth0_dev == NULL))
199 eth0_dev = __dev_get_by_name("eth0");
200 netif_rx_schedule(eth0_dev);
201 }
202 /*
203 * Add following to poll() function in NAPI driver (Tigon3 is example):
204 * if ( xen_network_done() )
205 * tg3_enable_ints(tp);
206 */
207 int xen_network_done(void)
208 {
209 return skb_queue_empty(&rx_queue);
210 }
211 #endif
213 static void net_rx_action(unsigned long unused)
214 {
215 netif_t *netif = NULL;
216 s8 status;
217 u16 size, id, irq, flags;
218 multicall_entry_t *mcl;
219 mmu_update_t *mmu;
220 gnttab_transfer_t *gop;
221 unsigned long vdata, old_mfn, new_mfn;
222 struct sk_buff_head rxq;
223 struct sk_buff *skb;
224 u16 notify_list[NET_RX_RING_SIZE];
225 int notify_nr = 0;
226 int ret;
228 skb_queue_head_init(&rxq);
230 mcl = rx_mcl;
231 mmu = rx_mmu;
232 gop = grant_rx_op;
234 while ((skb = skb_dequeue(&rx_queue)) != NULL) {
235 netif = netdev_priv(skb->dev);
236 vdata = (unsigned long)skb->data;
237 old_mfn = virt_to_mfn(vdata);
239 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
240 /* Memory squeeze? Back off for an arbitrary while. */
241 if ((new_mfn = alloc_mfn()) == 0) {
242 if ( net_ratelimit() )
243 WPRINTK("Memory squeeze in netback "
244 "driver.\n");
245 mod_timer(&net_timer, jiffies + HZ);
246 skb_queue_head(&rx_queue, skb);
247 break;
248 }
249 /*
250 * Set the new P2M table entry before reassigning
251 * the old data page. Heed the comment in
252 * pgtable-2level.h:pte_page(). :-)
253 */
254 set_phys_to_machine(
255 __pa(skb->data) >> PAGE_SHIFT,
256 new_mfn);
258 MULTI_update_va_mapping(mcl, vdata,
259 pfn_pte_ma(new_mfn,
260 PAGE_KERNEL), 0);
261 mcl++;
263 mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
264 MMU_MACHPHYS_UPDATE;
265 mmu->val = __pa(vdata) >> PAGE_SHIFT;
266 mmu++;
267 }
269 gop->mfn = old_mfn;
270 gop->domid = netif->domid;
271 gop->ref = RING_GET_REQUEST(
272 &netif->rx, netif->rx.req_cons)->gref;
273 netif->rx.req_cons++;
274 gop++;
276 __skb_queue_tail(&rxq, skb);
278 /* Filled the batch queue? */
279 if ((gop - grant_rx_op) == ARRAY_SIZE(grant_rx_op))
280 break;
281 }
283 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
284 if (mcl == rx_mcl)
285 return;
287 mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
289 if (mmu - rx_mmu) {
290 mcl->op = __HYPERVISOR_mmu_update;
291 mcl->args[0] = (unsigned long)rx_mmu;
292 mcl->args[1] = mmu - rx_mmu;
293 mcl->args[2] = 0;
294 mcl->args[3] = DOMID_SELF;
295 mcl++;
296 }
298 ret = HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
299 BUG_ON(ret != 0);
300 }
302 ret = HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op,
303 gop - grant_rx_op);
304 BUG_ON(ret != 0);
306 mcl = rx_mcl;
307 gop = grant_rx_op;
308 while ((skb = __skb_dequeue(&rxq)) != NULL) {
309 netif = netdev_priv(skb->dev);
310 size = skb->tail - skb->data;
312 atomic_set(&(skb_shinfo(skb)->dataref), 1);
313 skb_shinfo(skb)->nr_frags = 0;
314 skb_shinfo(skb)->frag_list = NULL;
316 netif->stats.tx_bytes += size;
317 netif->stats.tx_packets++;
319 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
320 /* The update_va_mapping() must not fail. */
321 BUG_ON(mcl->result != 0);
322 mcl++;
323 }
325 /* Check the reassignment error code. */
326 status = NETIF_RSP_OKAY;
327 if (gop->status != 0) {
328 DPRINTK("Bad status %d from grant transfer to DOM%u\n",
329 gop->status, netif->domid);
330 /*
331 * Page no longer belongs to us unless GNTST_bad_page,
332 * but that should be a fatal error anyway.
333 */
334 BUG_ON(gop->status == GNTST_bad_page);
335 status = NETIF_RSP_ERROR;
336 }
337 irq = netif->irq;
338 id = RING_GET_REQUEST(&netif->rx, netif->rx.rsp_prod_pvt)->id;
339 flags = 0;
340 if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
341 flags |= NETRXF_csum_blank | NETRXF_data_validated;
342 else if (skb->proto_data_valid) /* remote but checksummed? */
343 flags |= NETRXF_data_validated;
344 if (make_rx_response(netif, id, status,
345 (unsigned long)skb->data & ~PAGE_MASK,
346 size, flags) &&
347 (rx_notify[irq] == 0)) {
348 rx_notify[irq] = 1;
349 notify_list[notify_nr++] = irq;
350 }
352 netif_put(netif);
353 dev_kfree_skb(skb);
354 gop++;
355 }
357 while (notify_nr != 0) {
358 irq = notify_list[--notify_nr];
359 rx_notify[irq] = 0;
360 notify_remote_via_irq(irq);
361 }
363 /* More work to do? */
364 if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
365 tasklet_schedule(&net_rx_tasklet);
366 #if 0
367 else
368 xen_network_done_notify();
369 #endif
370 }
372 static void net_alarm(unsigned long unused)
373 {
374 tasklet_schedule(&net_rx_tasklet);
375 }
377 struct net_device_stats *netif_be_get_stats(struct net_device *dev)
378 {
379 netif_t *netif = netdev_priv(dev);
380 return &netif->stats;
381 }
383 static int __on_net_schedule_list(netif_t *netif)
384 {
385 return netif->list.next != NULL;
386 }
388 static void remove_from_net_schedule_list(netif_t *netif)
389 {
390 spin_lock_irq(&net_schedule_list_lock);
391 if (likely(__on_net_schedule_list(netif))) {
392 list_del(&netif->list);
393 netif->list.next = NULL;
394 netif_put(netif);
395 }
396 spin_unlock_irq(&net_schedule_list_lock);
397 }
399 static void add_to_net_schedule_list_tail(netif_t *netif)
400 {
401 if (__on_net_schedule_list(netif))
402 return;
404 spin_lock_irq(&net_schedule_list_lock);
405 if (!__on_net_schedule_list(netif) && netif->active) {
406 list_add_tail(&netif->list, &net_schedule_list);
407 netif_get(netif);
408 }
409 spin_unlock_irq(&net_schedule_list_lock);
410 }
412 /*
413 * Note on CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER:
414 * If this driver is pipelining transmit requests then we can be very
415 * aggressive in avoiding new-packet notifications -- frontend only needs to
416 * send a notification if there are no outstanding unreceived responses.
417 * If we may be buffer transmit buffers for any reason then we must be rather
418 * more conservative and treat this as the final check for pending work.
419 */
420 void netif_schedule_work(netif_t *netif)
421 {
422 int more_to_do;
424 #ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
425 more_to_do = RING_HAS_UNCONSUMED_REQUESTS(&netif->tx);
426 #else
427 RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
428 #endif
430 if (more_to_do) {
431 add_to_net_schedule_list_tail(netif);
432 maybe_schedule_tx_action();
433 }
434 }
436 void netif_deschedule_work(netif_t *netif)
437 {
438 remove_from_net_schedule_list(netif);
439 }
442 static void tx_credit_callback(unsigned long data)
443 {
444 netif_t *netif = (netif_t *)data;
445 netif->remaining_credit = netif->credit_bytes;
446 netif_schedule_work(netif);
447 }
449 inline static void net_tx_action_dealloc(void)
450 {
451 gnttab_unmap_grant_ref_t *gop;
452 u16 pending_idx;
453 PEND_RING_IDX dc, dp;
454 netif_t *netif;
455 int ret;
457 dc = dealloc_cons;
458 dp = dealloc_prod;
460 /*
461 * Free up any grants we have finished using
462 */
463 gop = tx_unmap_ops;
464 while (dc != dp) {
465 pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
466 gnttab_set_unmap_op(gop, MMAP_VADDR(pending_idx),
467 GNTMAP_host_map,
468 grant_tx_handle[pending_idx]);
469 gop++;
470 }
471 ret = HYPERVISOR_grant_table_op(
472 GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops);
473 BUG_ON(ret);
475 while (dealloc_cons != dp) {
476 pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
478 netif = pending_tx_info[pending_idx].netif;
480 make_tx_response(netif, pending_tx_info[pending_idx].req.id,
481 NETIF_RSP_OKAY);
483 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
485 netif_put(netif);
486 }
487 }
489 /* Called after netfront has transmitted */
490 static void net_tx_action(unsigned long unused)
491 {
492 struct list_head *ent;
493 struct sk_buff *skb;
494 netif_t *netif;
495 netif_tx_request_t txreq;
496 u16 pending_idx;
497 RING_IDX i;
498 gnttab_map_grant_ref_t *mop;
499 unsigned int data_len;
500 int ret, work_to_do;
502 if (dealloc_cons != dealloc_prod)
503 net_tx_action_dealloc();
505 mop = tx_map_ops;
506 while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
507 !list_empty(&net_schedule_list)) {
508 /* Get a netif from the list with work to do. */
509 ent = net_schedule_list.next;
510 netif = list_entry(ent, netif_t, list);
511 netif_get(netif);
512 remove_from_net_schedule_list(netif);
514 RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
515 if (!work_to_do) {
516 netif_put(netif);
517 continue;
518 }
520 i = netif->tx.req_cons;
521 rmb(); /* Ensure that we see the request before we copy it. */
522 memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
523 /* Credit-based scheduling. */
524 if (txreq.size > netif->remaining_credit) {
525 unsigned long now = jiffies;
526 unsigned long next_credit =
527 netif->credit_timeout.expires +
528 msecs_to_jiffies(netif->credit_usec / 1000);
530 /* Timer could already be pending in rare cases. */
531 if (timer_pending(&netif->credit_timeout))
532 break;
534 /* Passed the point where we can replenish credit? */
535 if (time_after_eq(now, next_credit)) {
536 netif->credit_timeout.expires = now;
537 netif->remaining_credit = netif->credit_bytes;
538 }
540 /* Still too big to send right now? Set a callback. */
541 if (txreq.size > netif->remaining_credit) {
542 netif->remaining_credit = 0;
543 netif->credit_timeout.data =
544 (unsigned long)netif;
545 netif->credit_timeout.function =
546 tx_credit_callback;
547 __mod_timer(&netif->credit_timeout,
548 next_credit);
549 break;
550 }
551 }
552 netif->remaining_credit -= txreq.size;
554 netif->tx.req_cons++;
556 netif_schedule_work(netif);
558 if (unlikely(txreq.size < ETH_HLEN) ||
559 unlikely(txreq.size > ETH_FRAME_LEN)) {
560 DPRINTK("Bad packet size: %d\n", txreq.size);
561 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
562 netif_put(netif);
563 continue;
564 }
566 /* No crossing a page as the payload mustn't fragment. */
567 if (unlikely((txreq.offset + txreq.size) >= PAGE_SIZE)) {
568 DPRINTK("txreq.offset: %x, size: %u, end: %lu\n",
569 txreq.offset, txreq.size,
570 (txreq.offset &~PAGE_MASK) + txreq.size);
571 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
572 netif_put(netif);
573 continue;
574 }
576 pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
578 data_len = (txreq.size > PKT_PROT_LEN) ?
579 PKT_PROT_LEN : txreq.size;
581 skb = alloc_skb(data_len+16, GFP_ATOMIC);
582 if (unlikely(skb == NULL)) {
583 DPRINTK("Can't allocate a skb in start_xmit.\n");
584 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
585 netif_put(netif);
586 break;
587 }
589 /* Packets passed to netif_rx() must have some headroom. */
590 skb_reserve(skb, 16);
592 gnttab_set_map_op(mop, MMAP_VADDR(pending_idx),
593 GNTMAP_host_map | GNTMAP_readonly,
594 txreq.gref, netif->domid);
595 mop++;
597 memcpy(&pending_tx_info[pending_idx].req,
598 &txreq, sizeof(txreq));
599 pending_tx_info[pending_idx].netif = netif;
600 *((u16 *)skb->data) = pending_idx;
602 __skb_queue_tail(&tx_queue, skb);
604 pending_cons++;
606 if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
607 break;
608 }
610 if (mop == tx_map_ops)
611 return;
613 ret = HYPERVISOR_grant_table_op(
614 GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops);
615 BUG_ON(ret);
617 mop = tx_map_ops;
618 while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
619 pending_idx = *((u16 *)skb->data);
620 netif = pending_tx_info[pending_idx].netif;
621 memcpy(&txreq, &pending_tx_info[pending_idx].req,
622 sizeof(txreq));
624 /* Check the remap error code. */
625 if (unlikely(mop->status)) {
626 printk(KERN_ALERT "#### netback grant fails\n");
627 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
628 netif_put(netif);
629 kfree_skb(skb);
630 mop++;
631 pending_ring[MASK_PEND_IDX(pending_prod++)] =
632 pending_idx;
633 continue;
634 }
635 set_phys_to_machine(
636 __pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT,
637 FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
638 grant_tx_handle[pending_idx] = mop->handle;
640 data_len = (txreq.size > PKT_PROT_LEN) ?
641 PKT_PROT_LEN : txreq.size;
643 __skb_put(skb, data_len);
644 memcpy(skb->data,
645 (void *)(MMAP_VADDR(pending_idx)|txreq.offset),
646 data_len);
647 if (data_len < txreq.size) {
648 /* Append the packet payload as a fragment. */
649 skb_shinfo(skb)->frags[0].page =
650 virt_to_page(MMAP_VADDR(pending_idx));
651 skb_shinfo(skb)->frags[0].size =
652 txreq.size - data_len;
653 skb_shinfo(skb)->frags[0].page_offset =
654 txreq.offset + data_len;
655 skb_shinfo(skb)->nr_frags = 1;
656 } else {
657 /* Schedule a response immediately. */
658 netif_idx_release(pending_idx);
659 }
661 skb->data_len = txreq.size - data_len;
662 skb->len += skb->data_len;
663 skb->truesize += skb->data_len;
665 skb->dev = netif->dev;
666 skb->protocol = eth_type_trans(skb, skb->dev);
668 /*
669 * Old frontends do not assert data_validated but we
670 * can infer it from csum_blank so test both flags.
671 */
672 if (txreq.flags & (NETTXF_data_validated|NETTXF_csum_blank)) {
673 skb->ip_summed = CHECKSUM_UNNECESSARY;
674 skb->proto_data_valid = 1;
675 } else {
676 skb->ip_summed = CHECKSUM_NONE;
677 skb->proto_data_valid = 0;
678 }
679 skb->proto_csum_blank = !!(txreq.flags & NETTXF_csum_blank);
681 netif->stats.rx_bytes += txreq.size;
682 netif->stats.rx_packets++;
684 netif_rx(skb);
685 netif->dev->last_rx = jiffies;
687 mop++;
688 }
689 }
691 static void netif_idx_release(u16 pending_idx)
692 {
693 static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
694 unsigned long flags;
696 spin_lock_irqsave(&_lock, flags);
697 dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
698 spin_unlock_irqrestore(&_lock, flags);
700 tasklet_schedule(&net_tx_tasklet);
701 }
703 static void netif_page_release(struct page *page)
704 {
705 u16 pending_idx = page - virt_to_page(mmap_vstart);
707 /* Ready for next use. */
708 set_page_count(page, 1);
710 netif_idx_release(pending_idx);
711 }
713 irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
714 {
715 netif_t *netif = dev_id;
716 add_to_net_schedule_list_tail(netif);
717 maybe_schedule_tx_action();
718 return IRQ_HANDLED;
719 }
721 static void make_tx_response(netif_t *netif,
722 u16 id,
723 s8 st)
724 {
725 RING_IDX i = netif->tx.rsp_prod_pvt;
726 netif_tx_response_t *resp;
727 int notify;
729 resp = RING_GET_RESPONSE(&netif->tx, i);
730 resp->id = id;
731 resp->status = st;
733 netif->tx.rsp_prod_pvt = ++i;
734 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->tx, notify);
735 if (notify)
736 notify_remote_via_irq(netif->irq);
738 #ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
739 if (i == netif->tx.req_cons) {
740 int more_to_do;
741 RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
742 if (more_to_do)
743 add_to_net_schedule_list_tail(netif);
744 }
745 #endif
746 }
748 static int make_rx_response(netif_t *netif,
749 u16 id,
750 s8 st,
751 u16 offset,
752 u16 size,
753 u16 flags)
754 {
755 RING_IDX i = netif->rx.rsp_prod_pvt;
756 netif_rx_response_t *resp;
757 int notify;
759 resp = RING_GET_RESPONSE(&netif->rx, i);
760 resp->offset = offset;
761 resp->flags = flags;
762 resp->id = id;
763 resp->status = (s16)size;
764 if (st < 0)
765 resp->status = (s16)st;
767 netif->rx.rsp_prod_pvt = ++i;
768 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, notify);
770 return notify;
771 }
773 #ifdef NETBE_DEBUG_INTERRUPT
774 static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
775 {
776 struct list_head *ent;
777 netif_t *netif;
778 int i = 0;
780 printk(KERN_ALERT "netif_schedule_list:\n");
781 spin_lock_irq(&net_schedule_list_lock);
783 list_for_each (ent, &net_schedule_list) {
784 netif = list_entry(ent, netif_t, list);
785 printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
786 "rx_resp_prod=%08x\n",
787 i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
788 printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
789 netif->tx.req_cons, netif->tx.rsp_prod_pvt);
790 printk(KERN_ALERT " shared(rx_req_prod=%08x "
791 "rx_resp_prod=%08x\n",
792 netif->rx.sring->req_prod, netif->rx.sring->rsp_prod);
793 printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
794 netif->rx.sring->rsp_event, netif->tx.sring->req_prod);
795 printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
796 netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event);
797 i++;
798 }
800 spin_unlock_irq(&net_schedule_list_lock);
801 printk(KERN_ALERT " ** End of netif_schedule_list **\n");
803 return IRQ_HANDLED;
804 }
805 #endif
807 static int __init netback_init(void)
808 {
809 int i;
810 struct page *page;
812 /* We can increase reservation by this much in net_rx_action(). */
813 balloon_update_driver_allowance(NET_RX_RING_SIZE);
815 skb_queue_head_init(&rx_queue);
816 skb_queue_head_init(&tx_queue);
818 init_timer(&net_timer);
819 net_timer.data = 0;
820 net_timer.function = net_alarm;
822 page = balloon_alloc_empty_page_range(MAX_PENDING_REQS);
823 BUG_ON(page == NULL);
824 mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
826 for (i = 0; i < MAX_PENDING_REQS; i++) {
827 page = virt_to_page(MMAP_VADDR(i));
828 set_page_count(page, 1);
829 SetPageForeign(page, netif_page_release);
830 }
832 pending_cons = 0;
833 pending_prod = MAX_PENDING_REQS;
834 for (i = 0; i < MAX_PENDING_REQS; i++)
835 pending_ring[i] = i;
837 spin_lock_init(&net_schedule_list_lock);
838 INIT_LIST_HEAD(&net_schedule_list);
840 netif_xenbus_init();
842 #ifdef NETBE_DEBUG_INTERRUPT
843 (void)bind_virq_to_irqhandler(
844 VIRQ_DEBUG,
845 0,
846 netif_be_dbg,
847 SA_SHIRQ,
848 "net-be-dbg",
849 &netif_be_dbg);
850 #endif
852 return 0;
853 }
855 module_init(netback_init);
857 MODULE_LICENSE("Dual BSD/GPL");
859 /*
860 * Local variables:
861 * c-file-style: "linux"
862 * indent-tabs-mode: t
863 * c-indent-level: 8
864 * c-basic-offset: 8
865 * tab-width: 8
866 * End:
867 */