ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/netback/netback.c @ 14291:7c8dcc5efd12

netback: Disable debug interrupt (accidentally left enabled in
previous checkin).
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Mar 07 16:16:09 2007 +0000 (2007-03-07)
parents 42b29f084c31
children 68282f4b3e0f
line source
1 /******************************************************************************
2 * drivers/xen/netback/netback.c
3 *
4 * Back-end of the driver for virtual network devices. This portion of the
5 * driver exports a 'unified' network-device interface that can be accessed
6 * by any operating system that implements a compatible front end. A
7 * reference front-end implementation can be found in:
8 * drivers/xen/netfront/netfront.c
9 *
10 * Copyright (c) 2002-2005, K A Fraser
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
35 */
37 #include "common.h"
38 #include <xen/balloon.h>
39 #include <xen/interface/memory.h>
41 /*define NETBE_DEBUG_INTERRUPT*/
43 /* extra field used in struct page */
44 #define netif_page_index(pg) (*(long *)&(pg)->mapping)
46 struct netbk_rx_meta {
47 skb_frag_t frag;
48 int id;
49 int copy:1;
50 };
52 static void netif_idx_release(u16 pending_idx);
53 static void netif_page_release(struct page *page);
54 static void make_tx_response(netif_t *netif,
55 netif_tx_request_t *txp,
56 s8 st);
57 static netif_rx_response_t *make_rx_response(netif_t *netif,
58 u16 id,
59 s8 st,
60 u16 offset,
61 u16 size,
62 u16 flags);
64 static void net_tx_action(unsigned long unused);
65 static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
67 static void net_rx_action(unsigned long unused);
68 static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
70 static struct timer_list net_timer;
72 #define MAX_PENDING_REQS 256
74 static struct sk_buff_head rx_queue;
76 static struct page **mmap_pages;
77 static inline unsigned long idx_to_kaddr(unsigned int idx)
78 {
79 return (unsigned long)pfn_to_kaddr(page_to_pfn(mmap_pages[idx]));
80 }
82 #define PKT_PROT_LEN 64
84 static struct pending_tx_info {
85 netif_tx_request_t req;
86 netif_t *netif;
87 } pending_tx_info[MAX_PENDING_REQS];
88 static u16 pending_ring[MAX_PENDING_REQS];
89 typedef unsigned int PEND_RING_IDX;
90 #define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
91 static PEND_RING_IDX pending_prod, pending_cons;
92 #define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
94 /* Freed TX SKBs get batched on this ring before return to pending_ring. */
95 static u16 dealloc_ring[MAX_PENDING_REQS];
96 static PEND_RING_IDX dealloc_prod, dealloc_cons;
98 static struct sk_buff_head tx_queue;
100 static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
101 static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
102 static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
104 static struct list_head net_schedule_list;
105 static spinlock_t net_schedule_list_lock;
107 #define MAX_MFN_ALLOC 64
108 static unsigned long mfn_list[MAX_MFN_ALLOC];
109 static unsigned int alloc_index = 0;
111 static inline unsigned long alloc_mfn(void)
112 {
113 return mfn_list[--alloc_index];
114 }
116 static int check_mfn(int nr)
117 {
118 struct xen_memory_reservation reservation = {
119 .extent_order = 0,
120 .domid = DOMID_SELF
121 };
123 if (likely(alloc_index >= nr))
124 return 0;
126 set_xen_guest_handle(reservation.extent_start, mfn_list + alloc_index);
127 reservation.nr_extents = MAX_MFN_ALLOC - alloc_index;
128 alloc_index += HYPERVISOR_memory_op(XENMEM_increase_reservation,
129 &reservation);
131 return alloc_index >= nr ? 0 : -ENOMEM;
132 }
134 static inline void maybe_schedule_tx_action(void)
135 {
136 smp_mb();
137 if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
138 !list_empty(&net_schedule_list))
139 tasklet_schedule(&net_tx_tasklet);
140 }
142 static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
143 {
144 struct skb_shared_info *ninfo;
145 struct sk_buff *nskb;
146 unsigned long offset;
147 int ret;
148 int len;
149 int headlen;
151 BUG_ON(skb_shinfo(skb)->frag_list != NULL);
153 nskb = alloc_skb(SKB_MAX_HEAD(0), GFP_ATOMIC | __GFP_NOWARN);
154 if (unlikely(!nskb))
155 goto err;
157 skb_reserve(nskb, 16 + NET_IP_ALIGN);
158 headlen = nskb->end - nskb->data;
159 if (headlen > skb_headlen(skb))
160 headlen = skb_headlen(skb);
161 ret = skb_copy_bits(skb, 0, __skb_put(nskb, headlen), headlen);
162 BUG_ON(ret);
164 ninfo = skb_shinfo(nskb);
165 ninfo->gso_size = skb_shinfo(skb)->gso_size;
166 ninfo->gso_type = skb_shinfo(skb)->gso_type;
168 offset = headlen;
169 len = skb->len - headlen;
171 nskb->len = skb->len;
172 nskb->data_len = len;
173 nskb->truesize += len;
175 while (len) {
176 struct page *page;
177 int copy;
178 int zero;
180 if (unlikely(ninfo->nr_frags >= MAX_SKB_FRAGS)) {
181 dump_stack();
182 goto err_free;
183 }
185 copy = len >= PAGE_SIZE ? PAGE_SIZE : len;
186 zero = len >= PAGE_SIZE ? 0 : __GFP_ZERO;
188 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN | zero);
189 if (unlikely(!page))
190 goto err_free;
192 ret = skb_copy_bits(skb, offset, page_address(page), copy);
193 BUG_ON(ret);
195 ninfo->frags[ninfo->nr_frags].page = page;
196 ninfo->frags[ninfo->nr_frags].page_offset = 0;
197 ninfo->frags[ninfo->nr_frags].size = copy;
198 ninfo->nr_frags++;
200 offset += copy;
201 len -= copy;
202 }
204 offset = nskb->data - skb->data;
206 nskb->h.raw = skb->h.raw + offset;
207 nskb->nh.raw = skb->nh.raw + offset;
208 nskb->mac.raw = skb->mac.raw + offset;
210 return nskb;
212 err_free:
213 kfree_skb(nskb);
214 err:
215 return NULL;
216 }
218 static inline int netbk_max_required_rx_slots(netif_t *netif)
219 {
220 if (netif->features & (NETIF_F_SG|NETIF_F_TSO))
221 return MAX_SKB_FRAGS + 2; /* header + extra_info + frags */
222 return 1; /* all in one */
223 }
225 static inline int netbk_queue_full(netif_t *netif)
226 {
227 RING_IDX peek = netif->rx_req_cons_peek;
228 RING_IDX needed = netbk_max_required_rx_slots(netif);
230 return ((netif->rx.sring->req_prod - peek) < needed) ||
231 ((netif->rx.rsp_prod_pvt + NET_RX_RING_SIZE - peek) < needed);
232 }
234 static void tx_queue_callback(unsigned long data)
235 {
236 netif_t *netif = (netif_t *)data;
237 if (netif_schedulable(netif))
238 netif_wake_queue(netif->dev);
239 }
241 int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
242 {
243 netif_t *netif = netdev_priv(dev);
245 BUG_ON(skb->dev != dev);
247 /* Drop the packet if the target domain has no receive buffers. */
248 if (unlikely(!netif_schedulable(netif) || netbk_queue_full(netif)))
249 goto drop;
251 /*
252 * Copy the packet here if it's destined for a flipping interface
253 * but isn't flippable (e.g. extra references to data).
254 */
255 if (!netif->copying_receiver) {
256 struct sk_buff *nskb = netbk_copy_skb(skb);
257 if ( unlikely(nskb == NULL) )
258 goto drop;
259 /* Copy only the header fields we use in this driver. */
260 nskb->dev = skb->dev;
261 nskb->ip_summed = skb->ip_summed;
262 nskb->proto_data_valid = skb->proto_data_valid;
263 dev_kfree_skb(skb);
264 skb = nskb;
265 }
267 netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 +
268 !!skb_shinfo(skb)->gso_size;
269 netif_get(netif);
271 if (netbk_can_queue(dev) && netbk_queue_full(netif)) {
272 netif->rx.sring->req_event = netif->rx_req_cons_peek +
273 netbk_max_required_rx_slots(netif);
274 mb(); /* request notification /then/ check & stop the queue */
275 if (netbk_queue_full(netif)) {
276 netif_stop_queue(dev);
277 /*
278 * Schedule 500ms timeout to restart the queue, thus
279 * ensuring that an inactive queue will be drained.
280 * Packets will be immediately be dropped until more
281 * receive buffers become available (see
282 * netbk_queue_full() check above).
283 */
284 netif->tx_queue_timeout.data = (unsigned long)netif;
285 netif->tx_queue_timeout.function = tx_queue_callback;
286 __mod_timer(&netif->tx_queue_timeout, jiffies + HZ/2);
287 }
288 }
290 skb_queue_tail(&rx_queue, skb);
291 tasklet_schedule(&net_rx_tasklet);
293 return 0;
295 drop:
296 netif->stats.tx_dropped++;
297 dev_kfree_skb(skb);
298 return 0;
299 }
301 #if 0
302 static void xen_network_done_notify(void)
303 {
304 static struct net_device *eth0_dev = NULL;
305 if (unlikely(eth0_dev == NULL))
306 eth0_dev = __dev_get_by_name("eth0");
307 netif_rx_schedule(eth0_dev);
308 }
309 /*
310 * Add following to poll() function in NAPI driver (Tigon3 is example):
311 * if ( xen_network_done() )
312 * tg3_enable_ints(tp);
313 */
314 int xen_network_done(void)
315 {
316 return skb_queue_empty(&rx_queue);
317 }
318 #endif
320 struct netrx_pending_operations {
321 unsigned trans_prod, trans_cons;
322 unsigned mmu_prod, mmu_cons;
323 unsigned mcl_prod, mcl_cons;
324 unsigned copy_prod, copy_cons;
325 unsigned meta_prod, meta_cons;
326 mmu_update_t *mmu;
327 gnttab_transfer_t *trans;
328 gnttab_copy_t *copy;
329 multicall_entry_t *mcl;
330 struct netbk_rx_meta *meta;
331 };
333 /* Set up the grant operations for this fragment. If it's a flipping
334 interface, we also set up the unmap request from here. */
335 static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
336 int i, struct netrx_pending_operations *npo,
337 struct page *page, unsigned long size,
338 unsigned long offset)
339 {
340 mmu_update_t *mmu;
341 gnttab_transfer_t *gop;
342 gnttab_copy_t *copy_gop;
343 multicall_entry_t *mcl;
344 netif_rx_request_t *req;
345 unsigned long old_mfn, new_mfn;
347 old_mfn = virt_to_mfn(page_address(page));
349 req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i);
350 if (netif->copying_receiver) {
351 /* The fragment needs to be copied rather than
352 flipped. */
353 meta->copy = 1;
354 copy_gop = npo->copy + npo->copy_prod++;
355 copy_gop->flags = GNTCOPY_dest_gref;
356 if (PageForeign(page)) {
357 struct pending_tx_info *src_pend =
358 &pending_tx_info[netif_page_index(page)];
359 copy_gop->source.domid = src_pend->netif->domid;
360 copy_gop->source.u.ref = src_pend->req.gref;
361 copy_gop->flags |= GNTCOPY_source_gref;
362 } else {
363 copy_gop->source.domid = DOMID_SELF;
364 copy_gop->source.u.gmfn = old_mfn;
365 }
366 copy_gop->source.offset = offset;
367 copy_gop->dest.domid = netif->domid;
368 copy_gop->dest.offset = 0;
369 copy_gop->dest.u.ref = req->gref;
370 copy_gop->len = size;
371 } else {
372 meta->copy = 0;
373 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
374 new_mfn = alloc_mfn();
376 /*
377 * Set the new P2M table entry before
378 * reassigning the old data page. Heed the
379 * comment in pgtable-2level.h:pte_page(). :-)
380 */
381 set_phys_to_machine(page_to_pfn(page), new_mfn);
383 mcl = npo->mcl + npo->mcl_prod++;
384 MULTI_update_va_mapping(mcl,
385 (unsigned long)page_address(page),
386 pfn_pte_ma(new_mfn, PAGE_KERNEL),
387 0);
389 mmu = npo->mmu + npo->mmu_prod++;
390 mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
391 MMU_MACHPHYS_UPDATE;
392 mmu->val = page_to_pfn(page);
393 }
395 gop = npo->trans + npo->trans_prod++;
396 gop->mfn = old_mfn;
397 gop->domid = netif->domid;
398 gop->ref = req->gref;
399 }
400 return req->id;
401 }
403 static void netbk_gop_skb(struct sk_buff *skb,
404 struct netrx_pending_operations *npo)
405 {
406 netif_t *netif = netdev_priv(skb->dev);
407 int nr_frags = skb_shinfo(skb)->nr_frags;
408 int i;
409 int extra;
410 struct netbk_rx_meta *head_meta, *meta;
412 head_meta = npo->meta + npo->meta_prod++;
413 head_meta->frag.page_offset = skb_shinfo(skb)->gso_type;
414 head_meta->frag.size = skb_shinfo(skb)->gso_size;
415 extra = !!head_meta->frag.size + 1;
417 for (i = 0; i < nr_frags; i++) {
418 meta = npo->meta + npo->meta_prod++;
419 meta->frag = skb_shinfo(skb)->frags[i];
420 meta->id = netbk_gop_frag(netif, meta, i + extra, npo,
421 meta->frag.page,
422 meta->frag.size,
423 meta->frag.page_offset);
424 }
426 /*
427 * This must occur at the end to ensure that we don't trash
428 * skb_shinfo until we're done.
429 */
430 head_meta->id = netbk_gop_frag(netif, head_meta, 0, npo,
431 virt_to_page(skb->data),
432 skb_headlen(skb),
433 offset_in_page(skb->data));
435 netif->rx.req_cons += nr_frags + extra;
436 }
438 static inline void netbk_free_pages(int nr_frags, struct netbk_rx_meta *meta)
439 {
440 int i;
442 for (i = 0; i < nr_frags; i++)
443 put_page(meta[i].frag.page);
444 }
446 /* This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
447 used to set up the operations on the top of
448 netrx_pending_operations, which have since been done. Check that
449 they didn't give any errors and advance over them. */
450 static int netbk_check_gop(int nr_frags, domid_t domid,
451 struct netrx_pending_operations *npo)
452 {
453 multicall_entry_t *mcl;
454 gnttab_transfer_t *gop;
455 gnttab_copy_t *copy_op;
456 int status = NETIF_RSP_OKAY;
457 int i;
459 for (i = 0; i <= nr_frags; i++) {
460 if (npo->meta[npo->meta_cons + i].copy) {
461 copy_op = npo->copy + npo->copy_cons++;
462 if (copy_op->status != GNTST_okay) {
463 DPRINTK("Bad status %d from copy to DOM%d.\n",
464 copy_op->status, domid);
465 status = NETIF_RSP_ERROR;
466 }
467 } else {
468 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
469 mcl = npo->mcl + npo->mcl_cons++;
470 /* The update_va_mapping() must not fail. */
471 BUG_ON(mcl->result != 0);
472 }
474 gop = npo->trans + npo->trans_cons++;
475 /* Check the reassignment error code. */
476 if (gop->status != 0) {
477 DPRINTK("Bad status %d from grant transfer to DOM%u\n",
478 gop->status, domid);
479 /*
480 * Page no longer belongs to us unless
481 * GNTST_bad_page, but that should be
482 * a fatal error anyway.
483 */
484 BUG_ON(gop->status == GNTST_bad_page);
485 status = NETIF_RSP_ERROR;
486 }
487 }
488 }
490 return status;
491 }
493 static void netbk_add_frag_responses(netif_t *netif, int status,
494 struct netbk_rx_meta *meta, int nr_frags)
495 {
496 int i;
497 unsigned long offset;
499 for (i = 0; i < nr_frags; i++) {
500 int id = meta[i].id;
501 int flags = (i == nr_frags - 1) ? 0 : NETRXF_more_data;
503 if (meta[i].copy)
504 offset = 0;
505 else
506 offset = meta[i].frag.page_offset;
507 make_rx_response(netif, id, status, offset,
508 meta[i].frag.size, flags);
509 }
510 }
512 static void net_rx_action(unsigned long unused)
513 {
514 netif_t *netif = NULL;
515 s8 status;
516 u16 id, irq, flags;
517 netif_rx_response_t *resp;
518 multicall_entry_t *mcl;
519 struct sk_buff_head rxq;
520 struct sk_buff *skb;
521 int notify_nr = 0;
522 int ret;
523 int nr_frags;
524 int count;
525 unsigned long offset;
527 /*
528 * Putting hundreds of bytes on the stack is considered rude.
529 * Static works because a tasklet can only be on one CPU at any time.
530 */
531 static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+3];
532 static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
533 static gnttab_transfer_t grant_trans_op[NET_RX_RING_SIZE];
534 static gnttab_copy_t grant_copy_op[NET_RX_RING_SIZE];
535 static unsigned char rx_notify[NR_IRQS];
536 static u16 notify_list[NET_RX_RING_SIZE];
537 static struct netbk_rx_meta meta[NET_RX_RING_SIZE];
539 struct netrx_pending_operations npo = {
540 mmu: rx_mmu,
541 trans: grant_trans_op,
542 copy: grant_copy_op,
543 mcl: rx_mcl,
544 meta: meta};
546 skb_queue_head_init(&rxq);
548 count = 0;
550 while ((skb = skb_dequeue(&rx_queue)) != NULL) {
551 nr_frags = skb_shinfo(skb)->nr_frags;
552 *(int *)skb->cb = nr_frags;
554 if (!xen_feature(XENFEAT_auto_translated_physmap) &&
555 check_mfn(nr_frags + 1)) {
556 /* Memory squeeze? Back off for an arbitrary while. */
557 if ( net_ratelimit() )
558 WPRINTK("Memory squeeze in netback "
559 "driver.\n");
560 mod_timer(&net_timer, jiffies + HZ);
561 skb_queue_head(&rx_queue, skb);
562 break;
563 }
565 netbk_gop_skb(skb, &npo);
567 count += nr_frags + 1;
569 __skb_queue_tail(&rxq, skb);
571 /* Filled the batch queue? */
572 if (count + MAX_SKB_FRAGS >= NET_RX_RING_SIZE)
573 break;
574 }
576 if (npo.mcl_prod &&
577 !xen_feature(XENFEAT_auto_translated_physmap)) {
578 mcl = npo.mcl + npo.mcl_prod++;
580 BUG_ON(mcl[-1].op != __HYPERVISOR_update_va_mapping);
581 mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
583 mcl->op = __HYPERVISOR_mmu_update;
584 mcl->args[0] = (unsigned long)rx_mmu;
585 mcl->args[1] = npo.mmu_prod;
586 mcl->args[2] = 0;
587 mcl->args[3] = DOMID_SELF;
588 }
590 if (npo.trans_prod) {
591 mcl = npo.mcl + npo.mcl_prod++;
592 mcl->op = __HYPERVISOR_grant_table_op;
593 mcl->args[0] = GNTTABOP_transfer;
594 mcl->args[1] = (unsigned long)grant_trans_op;
595 mcl->args[2] = npo.trans_prod;
596 }
598 if (npo.copy_prod) {
599 mcl = npo.mcl + npo.mcl_prod++;
600 mcl->op = __HYPERVISOR_grant_table_op;
601 mcl->args[0] = GNTTABOP_copy;
602 mcl->args[1] = (unsigned long)grant_copy_op;
603 mcl->args[2] = npo.copy_prod;
604 }
606 /* Nothing to do? */
607 if (!npo.mcl_prod)
608 return;
610 BUG_ON(npo.copy_prod > NET_RX_RING_SIZE);
611 BUG_ON(npo.mmu_prod > NET_RX_RING_SIZE);
612 BUG_ON(npo.trans_prod > NET_RX_RING_SIZE);
613 BUG_ON(npo.mcl_prod > NET_RX_RING_SIZE+3);
614 BUG_ON(npo.meta_prod > NET_RX_RING_SIZE);
616 ret = HYPERVISOR_multicall(npo.mcl, npo.mcl_prod);
617 BUG_ON(ret != 0);
619 while ((skb = __skb_dequeue(&rxq)) != NULL) {
620 nr_frags = *(int *)skb->cb;
622 netif = netdev_priv(skb->dev);
623 /* We can't rely on skb_release_data to release the
624 pages used by fragments for us, since it tries to
625 touch the pages in the fraglist. If we're in
626 flipping mode, that doesn't work. In copying mode,
627 we still have access to all of the pages, and so
628 it's safe to let release_data deal with it. */
629 /* (Freeing the fragments is safe since we copy
630 non-linear skbs destined for flipping interfaces) */
631 if (!netif->copying_receiver) {
632 atomic_set(&(skb_shinfo(skb)->dataref), 1);
633 skb_shinfo(skb)->frag_list = NULL;
634 skb_shinfo(skb)->nr_frags = 0;
635 netbk_free_pages(nr_frags, meta + npo.meta_cons + 1);
636 }
638 netif->stats.tx_bytes += skb->len;
639 netif->stats.tx_packets++;
641 status = netbk_check_gop(nr_frags, netif->domid, &npo);
643 id = meta[npo.meta_cons].id;
644 flags = nr_frags ? NETRXF_more_data : 0;
646 if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
647 flags |= NETRXF_csum_blank | NETRXF_data_validated;
648 else if (skb->proto_data_valid) /* remote but checksummed? */
649 flags |= NETRXF_data_validated;
651 if (meta[npo.meta_cons].copy)
652 offset = 0;
653 else
654 offset = offset_in_page(skb->data);
655 resp = make_rx_response(netif, id, status, offset,
656 skb_headlen(skb), flags);
658 if (meta[npo.meta_cons].frag.size) {
659 struct netif_extra_info *gso =
660 (struct netif_extra_info *)
661 RING_GET_RESPONSE(&netif->rx,
662 netif->rx.rsp_prod_pvt++);
664 resp->flags |= NETRXF_extra_info;
666 gso->u.gso.size = meta[npo.meta_cons].frag.size;
667 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
668 gso->u.gso.pad = 0;
669 gso->u.gso.features = 0;
671 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
672 gso->flags = 0;
673 }
675 netbk_add_frag_responses(netif, status,
676 meta + npo.meta_cons + 1,
677 nr_frags);
679 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret);
680 irq = netif->irq;
681 if (ret && !rx_notify[irq]) {
682 rx_notify[irq] = 1;
683 notify_list[notify_nr++] = irq;
684 }
686 if (netif_queue_stopped(netif->dev) &&
687 netif_schedulable(netif) &&
688 !netbk_queue_full(netif))
689 netif_wake_queue(netif->dev);
691 netif_put(netif);
692 dev_kfree_skb(skb);
693 npo.meta_cons += nr_frags + 1;
694 }
696 while (notify_nr != 0) {
697 irq = notify_list[--notify_nr];
698 rx_notify[irq] = 0;
699 notify_remote_via_irq(irq);
700 }
702 /* More work to do? */
703 if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
704 tasklet_schedule(&net_rx_tasklet);
705 #if 0
706 else
707 xen_network_done_notify();
708 #endif
709 }
711 static void net_alarm(unsigned long unused)
712 {
713 tasklet_schedule(&net_rx_tasklet);
714 }
716 struct net_device_stats *netif_be_get_stats(struct net_device *dev)
717 {
718 netif_t *netif = netdev_priv(dev);
719 return &netif->stats;
720 }
722 static int __on_net_schedule_list(netif_t *netif)
723 {
724 return netif->list.next != NULL;
725 }
727 static void remove_from_net_schedule_list(netif_t *netif)
728 {
729 spin_lock_irq(&net_schedule_list_lock);
730 if (likely(__on_net_schedule_list(netif))) {
731 list_del(&netif->list);
732 netif->list.next = NULL;
733 netif_put(netif);
734 }
735 spin_unlock_irq(&net_schedule_list_lock);
736 }
738 static void add_to_net_schedule_list_tail(netif_t *netif)
739 {
740 if (__on_net_schedule_list(netif))
741 return;
743 spin_lock_irq(&net_schedule_list_lock);
744 if (!__on_net_schedule_list(netif) &&
745 likely(netif_schedulable(netif))) {
746 list_add_tail(&netif->list, &net_schedule_list);
747 netif_get(netif);
748 }
749 spin_unlock_irq(&net_schedule_list_lock);
750 }
752 /*
753 * Note on CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER:
754 * If this driver is pipelining transmit requests then we can be very
755 * aggressive in avoiding new-packet notifications -- frontend only needs to
756 * send a notification if there are no outstanding unreceived responses.
757 * If we may be buffer transmit buffers for any reason then we must be rather
758 * more conservative and treat this as the final check for pending work.
759 */
760 void netif_schedule_work(netif_t *netif)
761 {
762 int more_to_do;
764 #ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
765 more_to_do = RING_HAS_UNCONSUMED_REQUESTS(&netif->tx);
766 #else
767 RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
768 #endif
770 if (more_to_do) {
771 add_to_net_schedule_list_tail(netif);
772 maybe_schedule_tx_action();
773 }
774 }
776 void netif_deschedule_work(netif_t *netif)
777 {
778 remove_from_net_schedule_list(netif);
779 }
782 static void tx_add_credit(netif_t *netif)
783 {
784 unsigned long max_burst, max_credit;
786 /*
787 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
788 * Otherwise the interface can seize up due to insufficient credit.
789 */
790 max_burst = RING_GET_REQUEST(&netif->tx, netif->tx.req_cons)->size;
791 max_burst = min(max_burst, 131072UL);
792 max_burst = max(max_burst, netif->credit_bytes);
794 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
795 max_credit = netif->remaining_credit + netif->credit_bytes;
796 if (max_credit < netif->remaining_credit)
797 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
799 netif->remaining_credit = min(max_credit, max_burst);
800 }
802 static void tx_credit_callback(unsigned long data)
803 {
804 netif_t *netif = (netif_t *)data;
805 tx_add_credit(netif);
806 netif_schedule_work(netif);
807 }
809 inline static void net_tx_action_dealloc(void)
810 {
811 gnttab_unmap_grant_ref_t *gop;
812 u16 pending_idx;
813 PEND_RING_IDX dc, dp;
814 netif_t *netif;
815 int ret;
817 dc = dealloc_cons;
818 dp = dealloc_prod;
820 /* Ensure we see all indexes enqueued by netif_idx_release(). */
821 smp_rmb();
823 /*
824 * Free up any grants we have finished using
825 */
826 gop = tx_unmap_ops;
827 while (dc != dp) {
828 pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
829 gnttab_set_unmap_op(gop, idx_to_kaddr(pending_idx),
830 GNTMAP_host_map,
831 grant_tx_handle[pending_idx]);
832 gop++;
833 }
834 ret = HYPERVISOR_grant_table_op(
835 GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops);
836 BUG_ON(ret);
838 while (dealloc_cons != dp) {
839 pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
841 netif = pending_tx_info[pending_idx].netif;
843 make_tx_response(netif, &pending_tx_info[pending_idx].req,
844 NETIF_RSP_OKAY);
846 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
848 netif_put(netif);
849 }
850 }
852 static void netbk_tx_err(netif_t *netif, netif_tx_request_t *txp, RING_IDX end)
853 {
854 RING_IDX cons = netif->tx.req_cons;
856 do {
857 make_tx_response(netif, txp, NETIF_RSP_ERROR);
858 if (cons >= end)
859 break;
860 txp = RING_GET_REQUEST(&netif->tx, cons++);
861 } while (1);
862 netif->tx.req_cons = cons;
863 netif_schedule_work(netif);
864 netif_put(netif);
865 }
867 static int netbk_count_requests(netif_t *netif, netif_tx_request_t *first,
868 netif_tx_request_t *txp, int work_to_do)
869 {
870 RING_IDX cons = netif->tx.req_cons;
871 int frags = 0;
873 if (!(first->flags & NETTXF_more_data))
874 return 0;
876 do {
877 if (frags >= work_to_do) {
878 DPRINTK("Need more frags\n");
879 return -frags;
880 }
882 if (unlikely(frags >= MAX_SKB_FRAGS)) {
883 DPRINTK("Too many frags\n");
884 return -frags;
885 }
887 memcpy(txp, RING_GET_REQUEST(&netif->tx, cons + frags),
888 sizeof(*txp));
889 if (txp->size > first->size) {
890 DPRINTK("Frags galore\n");
891 return -frags;
892 }
894 first->size -= txp->size;
895 frags++;
897 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
898 DPRINTK("txp->offset: %x, size: %u\n",
899 txp->offset, txp->size);
900 return -frags;
901 }
902 } while ((txp++)->flags & NETTXF_more_data);
904 return frags;
905 }
907 static gnttab_map_grant_ref_t *netbk_get_requests(netif_t *netif,
908 struct sk_buff *skb,
909 netif_tx_request_t *txp,
910 gnttab_map_grant_ref_t *mop)
911 {
912 struct skb_shared_info *shinfo = skb_shinfo(skb);
913 skb_frag_t *frags = shinfo->frags;
914 unsigned long pending_idx = *((u16 *)skb->data);
915 int i, start;
917 /* Skip first skb fragment if it is on same page as header fragment. */
918 start = ((unsigned long)shinfo->frags[0].page == pending_idx);
920 for (i = start; i < shinfo->nr_frags; i++, txp++) {
921 pending_idx = pending_ring[MASK_PEND_IDX(pending_cons++)];
923 gnttab_set_map_op(mop++, idx_to_kaddr(pending_idx),
924 GNTMAP_host_map | GNTMAP_readonly,
925 txp->gref, netif->domid);
927 memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
928 netif_get(netif);
929 pending_tx_info[pending_idx].netif = netif;
930 frags[i].page = (void *)pending_idx;
931 }
933 return mop;
934 }
936 static int netbk_tx_check_mop(struct sk_buff *skb,
937 gnttab_map_grant_ref_t **mopp)
938 {
939 gnttab_map_grant_ref_t *mop = *mopp;
940 int pending_idx = *((u16 *)skb->data);
941 netif_t *netif = pending_tx_info[pending_idx].netif;
942 netif_tx_request_t *txp;
943 struct skb_shared_info *shinfo = skb_shinfo(skb);
944 int nr_frags = shinfo->nr_frags;
945 int i, err, start;
947 /* Check status of header. */
948 err = mop->status;
949 if (unlikely(err)) {
950 txp = &pending_tx_info[pending_idx].req;
951 make_tx_response(netif, txp, NETIF_RSP_ERROR);
952 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
953 netif_put(netif);
954 } else {
955 set_phys_to_machine(
956 __pa(idx_to_kaddr(pending_idx)) >> PAGE_SHIFT,
957 FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
958 grant_tx_handle[pending_idx] = mop->handle;
959 }
961 /* Skip first skb fragment if it is on same page as header fragment. */
962 start = ((unsigned long)shinfo->frags[0].page == pending_idx);
964 for (i = start; i < nr_frags; i++) {
965 int j, newerr;
967 pending_idx = (unsigned long)shinfo->frags[i].page;
969 /* Check error status: if okay then remember grant handle. */
970 newerr = (++mop)->status;
971 if (likely(!newerr)) {
972 set_phys_to_machine(
973 __pa(idx_to_kaddr(pending_idx))>>PAGE_SHIFT,
974 FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT));
975 grant_tx_handle[pending_idx] = mop->handle;
976 /* Had a previous error? Invalidate this fragment. */
977 if (unlikely(err))
978 netif_idx_release(pending_idx);
979 continue;
980 }
982 /* Error on this fragment: respond to client with an error. */
983 txp = &pending_tx_info[pending_idx].req;
984 make_tx_response(netif, txp, NETIF_RSP_ERROR);
985 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
986 netif_put(netif);
988 /* Not the first error? Preceding frags already invalidated. */
989 if (err)
990 continue;
992 /* First error: invalidate header and preceding fragments. */
993 pending_idx = *((u16 *)skb->data);
994 netif_idx_release(pending_idx);
995 for (j = start; j < i; j++) {
996 pending_idx = (unsigned long)shinfo->frags[i].page;
997 netif_idx_release(pending_idx);
998 }
1000 /* Remember the error: invalidate all subsequent fragments. */
1001 err = newerr;
1004 *mopp = mop + 1;
1005 return err;
1008 static void netbk_fill_frags(struct sk_buff *skb)
1010 struct skb_shared_info *shinfo = skb_shinfo(skb);
1011 int nr_frags = shinfo->nr_frags;
1012 int i;
1014 for (i = 0; i < nr_frags; i++) {
1015 skb_frag_t *frag = shinfo->frags + i;
1016 netif_tx_request_t *txp;
1017 unsigned long pending_idx;
1019 pending_idx = (unsigned long)frag->page;
1020 txp = &pending_tx_info[pending_idx].req;
1021 frag->page = virt_to_page(idx_to_kaddr(pending_idx));
1022 frag->size = txp->size;
1023 frag->page_offset = txp->offset;
1025 skb->len += txp->size;
1026 skb->data_len += txp->size;
1027 skb->truesize += txp->size;
1031 int netbk_get_extras(netif_t *netif, struct netif_extra_info *extras,
1032 int work_to_do)
1034 struct netif_extra_info extra;
1035 RING_IDX cons = netif->tx.req_cons;
1037 do {
1038 if (unlikely(work_to_do-- <= 0)) {
1039 DPRINTK("Missing extra info\n");
1040 return -EBADR;
1043 memcpy(&extra, RING_GET_REQUEST(&netif->tx, cons),
1044 sizeof(extra));
1045 if (unlikely(!extra.type ||
1046 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1047 netif->tx.req_cons = ++cons;
1048 DPRINTK("Invalid extra type: %d\n", extra.type);
1049 return -EINVAL;
1052 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1053 netif->tx.req_cons = ++cons;
1054 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1056 return work_to_do;
1059 static int netbk_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso)
1061 if (!gso->u.gso.size) {
1062 DPRINTK("GSO size must not be zero.\n");
1063 return -EINVAL;
1066 /* Currently only TCPv4 S.O. is supported. */
1067 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1068 DPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
1069 return -EINVAL;
1072 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1073 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1075 /* Header must be checked, and gso_segs computed. */
1076 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1077 skb_shinfo(skb)->gso_segs = 0;
1079 return 0;
1082 /* Called after netfront has transmitted */
1083 static void net_tx_action(unsigned long unused)
1085 struct list_head *ent;
1086 struct sk_buff *skb;
1087 netif_t *netif;
1088 netif_tx_request_t txreq;
1089 netif_tx_request_t txfrags[MAX_SKB_FRAGS];
1090 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
1091 u16 pending_idx;
1092 RING_IDX i;
1093 gnttab_map_grant_ref_t *mop;
1094 unsigned int data_len;
1095 int ret, work_to_do;
1097 if (dealloc_cons != dealloc_prod)
1098 net_tx_action_dealloc();
1100 mop = tx_map_ops;
1101 while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
1102 !list_empty(&net_schedule_list)) {
1103 /* Get a netif from the list with work to do. */
1104 ent = net_schedule_list.next;
1105 netif = list_entry(ent, netif_t, list);
1106 netif_get(netif);
1107 remove_from_net_schedule_list(netif);
1109 RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
1110 if (!work_to_do) {
1111 netif_put(netif);
1112 continue;
1115 i = netif->tx.req_cons;
1116 rmb(); /* Ensure that we see the request before we copy it. */
1117 memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
1119 /* Credit-based scheduling. */
1120 if (txreq.size > netif->remaining_credit) {
1121 unsigned long now = jiffies;
1122 unsigned long next_credit =
1123 netif->credit_timeout.expires +
1124 msecs_to_jiffies(netif->credit_usec / 1000);
1126 /* Timer could already be pending in rare cases. */
1127 if (timer_pending(&netif->credit_timeout)) {
1128 netif_put(netif);
1129 continue;
1132 /* Passed the point where we can replenish credit? */
1133 if (time_after_eq(now, next_credit)) {
1134 netif->credit_timeout.expires = now;
1135 tx_add_credit(netif);
1138 /* Still too big to send right now? Set a callback. */
1139 if (txreq.size > netif->remaining_credit) {
1140 netif->credit_timeout.data =
1141 (unsigned long)netif;
1142 netif->credit_timeout.function =
1143 tx_credit_callback;
1144 __mod_timer(&netif->credit_timeout,
1145 next_credit);
1146 netif_put(netif);
1147 continue;
1150 netif->remaining_credit -= txreq.size;
1152 work_to_do--;
1153 netif->tx.req_cons = ++i;
1155 memset(extras, 0, sizeof(extras));
1156 if (txreq.flags & NETTXF_extra_info) {
1157 work_to_do = netbk_get_extras(netif, extras,
1158 work_to_do);
1159 i = netif->tx.req_cons;
1160 if (unlikely(work_to_do < 0)) {
1161 netbk_tx_err(netif, &txreq, i);
1162 continue;
1166 ret = netbk_count_requests(netif, &txreq, txfrags, work_to_do);
1167 if (unlikely(ret < 0)) {
1168 netbk_tx_err(netif, &txreq, i - ret);
1169 continue;
1171 i += ret;
1173 if (unlikely(txreq.size < ETH_HLEN)) {
1174 DPRINTK("Bad packet size: %d\n", txreq.size);
1175 netbk_tx_err(netif, &txreq, i);
1176 continue;
1179 /* No crossing a page as the payload mustn't fragment. */
1180 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1181 DPRINTK("txreq.offset: %x, size: %u, end: %lu\n",
1182 txreq.offset, txreq.size,
1183 (txreq.offset &~PAGE_MASK) + txreq.size);
1184 netbk_tx_err(netif, &txreq, i);
1185 continue;
1188 pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
1190 data_len = (txreq.size > PKT_PROT_LEN &&
1191 ret < MAX_SKB_FRAGS) ?
1192 PKT_PROT_LEN : txreq.size;
1194 skb = alloc_skb(data_len + 16 + NET_IP_ALIGN,
1195 GFP_ATOMIC | __GFP_NOWARN);
1196 if (unlikely(skb == NULL)) {
1197 DPRINTK("Can't allocate a skb in start_xmit.\n");
1198 netbk_tx_err(netif, &txreq, i);
1199 break;
1202 /* Packets passed to netif_rx() must have some headroom. */
1203 skb_reserve(skb, 16 + NET_IP_ALIGN);
1205 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1206 struct netif_extra_info *gso;
1207 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1209 if (netbk_set_skb_gso(skb, gso)) {
1210 kfree_skb(skb);
1211 netbk_tx_err(netif, &txreq, i);
1212 continue;
1216 gnttab_set_map_op(mop, idx_to_kaddr(pending_idx),
1217 GNTMAP_host_map | GNTMAP_readonly,
1218 txreq.gref, netif->domid);
1219 mop++;
1221 memcpy(&pending_tx_info[pending_idx].req,
1222 &txreq, sizeof(txreq));
1223 pending_tx_info[pending_idx].netif = netif;
1224 *((u16 *)skb->data) = pending_idx;
1226 __skb_put(skb, data_len);
1228 skb_shinfo(skb)->nr_frags = ret;
1229 if (data_len < txreq.size) {
1230 skb_shinfo(skb)->nr_frags++;
1231 skb_shinfo(skb)->frags[0].page =
1232 (void *)(unsigned long)pending_idx;
1233 } else {
1234 /* Discriminate from any valid pending_idx value. */
1235 skb_shinfo(skb)->frags[0].page = (void *)~0UL;
1238 __skb_queue_tail(&tx_queue, skb);
1240 pending_cons++;
1242 mop = netbk_get_requests(netif, skb, txfrags, mop);
1244 netif->tx.req_cons = i;
1245 netif_schedule_work(netif);
1247 if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
1248 break;
1251 if (mop == tx_map_ops)
1252 return;
1254 ret = HYPERVISOR_grant_table_op(
1255 GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops);
1256 BUG_ON(ret);
1258 mop = tx_map_ops;
1259 while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
1260 netif_tx_request_t *txp;
1262 pending_idx = *((u16 *)skb->data);
1263 netif = pending_tx_info[pending_idx].netif;
1264 txp = &pending_tx_info[pending_idx].req;
1266 /* Check the remap error code. */
1267 if (unlikely(netbk_tx_check_mop(skb, &mop))) {
1268 printk(KERN_ALERT "#### netback grant fails\n");
1269 skb_shinfo(skb)->nr_frags = 0;
1270 kfree_skb(skb);
1271 continue;
1274 data_len = skb->len;
1275 memcpy(skb->data,
1276 (void *)(idx_to_kaddr(pending_idx)|txp->offset),
1277 data_len);
1278 if (data_len < txp->size) {
1279 /* Append the packet payload as a fragment. */
1280 txp->offset += data_len;
1281 txp->size -= data_len;
1282 } else {
1283 /* Schedule a response immediately. */
1284 netif_idx_release(pending_idx);
1287 /*
1288 * Old frontends do not assert data_validated but we
1289 * can infer it from csum_blank so test both flags.
1290 */
1291 if (txp->flags & (NETTXF_data_validated|NETTXF_csum_blank)) {
1292 skb->ip_summed = CHECKSUM_UNNECESSARY;
1293 skb->proto_data_valid = 1;
1294 } else {
1295 skb->ip_summed = CHECKSUM_NONE;
1296 skb->proto_data_valid = 0;
1298 skb->proto_csum_blank = !!(txp->flags & NETTXF_csum_blank);
1300 netbk_fill_frags(skb);
1302 skb->dev = netif->dev;
1303 skb->protocol = eth_type_trans(skb, skb->dev);
1305 netif->stats.rx_bytes += skb->len;
1306 netif->stats.rx_packets++;
1308 netif_rx(skb);
1309 netif->dev->last_rx = jiffies;
1313 static void netif_idx_release(u16 pending_idx)
1315 static DEFINE_SPINLOCK(_lock);
1316 unsigned long flags;
1318 spin_lock_irqsave(&_lock, flags);
1319 dealloc_ring[MASK_PEND_IDX(dealloc_prod)] = pending_idx;
1320 /* Sync with net_tx_action_dealloc: insert idx /then/ incr producer. */
1321 smp_wmb();
1322 dealloc_prod++;
1323 spin_unlock_irqrestore(&_lock, flags);
1325 tasklet_schedule(&net_tx_tasklet);
1328 static void netif_page_release(struct page *page)
1330 /* Ready for next use. */
1331 init_page_count(page);
1333 netif_idx_release(netif_page_index(page));
1336 irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
1338 netif_t *netif = dev_id;
1340 add_to_net_schedule_list_tail(netif);
1341 maybe_schedule_tx_action();
1343 if (netif_schedulable(netif) && !netbk_queue_full(netif))
1344 netif_wake_queue(netif->dev);
1346 return IRQ_HANDLED;
1349 static void make_tx_response(netif_t *netif,
1350 netif_tx_request_t *txp,
1351 s8 st)
1353 RING_IDX i = netif->tx.rsp_prod_pvt;
1354 netif_tx_response_t *resp;
1355 int notify;
1357 resp = RING_GET_RESPONSE(&netif->tx, i);
1358 resp->id = txp->id;
1359 resp->status = st;
1361 if (txp->flags & NETTXF_extra_info)
1362 RING_GET_RESPONSE(&netif->tx, ++i)->status = NETIF_RSP_NULL;
1364 netif->tx.rsp_prod_pvt = ++i;
1365 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->tx, notify);
1366 if (notify)
1367 notify_remote_via_irq(netif->irq);
1369 #ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
1370 if (i == netif->tx.req_cons) {
1371 int more_to_do;
1372 RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
1373 if (more_to_do)
1374 add_to_net_schedule_list_tail(netif);
1376 #endif
1379 static netif_rx_response_t *make_rx_response(netif_t *netif,
1380 u16 id,
1381 s8 st,
1382 u16 offset,
1383 u16 size,
1384 u16 flags)
1386 RING_IDX i = netif->rx.rsp_prod_pvt;
1387 netif_rx_response_t *resp;
1389 resp = RING_GET_RESPONSE(&netif->rx, i);
1390 resp->offset = offset;
1391 resp->flags = flags;
1392 resp->id = id;
1393 resp->status = (s16)size;
1394 if (st < 0)
1395 resp->status = (s16)st;
1397 netif->rx.rsp_prod_pvt = ++i;
1399 return resp;
1402 #ifdef NETBE_DEBUG_INTERRUPT
1403 static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
1405 struct list_head *ent;
1406 netif_t *netif;
1407 int i = 0;
1409 printk(KERN_ALERT "netif_schedule_list:\n");
1410 spin_lock_irq(&net_schedule_list_lock);
1412 list_for_each (ent, &net_schedule_list) {
1413 netif = list_entry(ent, netif_t, list);
1414 printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
1415 "rx_resp_prod=%08x\n",
1416 i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
1417 printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
1418 netif->tx.req_cons, netif->tx.rsp_prod_pvt);
1419 printk(KERN_ALERT " shared(rx_req_prod=%08x "
1420 "rx_resp_prod=%08x\n",
1421 netif->rx.sring->req_prod, netif->rx.sring->rsp_prod);
1422 printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
1423 netif->rx.sring->rsp_event, netif->tx.sring->req_prod);
1424 printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
1425 netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event);
1426 i++;
1429 spin_unlock_irq(&net_schedule_list_lock);
1430 printk(KERN_ALERT " ** End of netif_schedule_list **\n");
1432 return IRQ_HANDLED;
1434 #endif
1436 static int __init netback_init(void)
1438 int i;
1439 struct page *page;
1441 if (!is_running_on_xen())
1442 return -ENODEV;
1444 /* We can increase reservation by this much in net_rx_action(). */
1445 balloon_update_driver_allowance(NET_RX_RING_SIZE);
1447 skb_queue_head_init(&rx_queue);
1448 skb_queue_head_init(&tx_queue);
1450 init_timer(&net_timer);
1451 net_timer.data = 0;
1452 net_timer.function = net_alarm;
1454 mmap_pages = alloc_empty_pages_and_pagevec(MAX_PENDING_REQS);
1455 if (mmap_pages == NULL) {
1456 printk("%s: out of memory\n", __FUNCTION__);
1457 return -ENOMEM;
1460 for (i = 0; i < MAX_PENDING_REQS; i++) {
1461 page = mmap_pages[i];
1462 SetPageForeign(page, netif_page_release);
1463 netif_page_index(page) = i;
1466 pending_cons = 0;
1467 pending_prod = MAX_PENDING_REQS;
1468 for (i = 0; i < MAX_PENDING_REQS; i++)
1469 pending_ring[i] = i;
1471 spin_lock_init(&net_schedule_list_lock);
1472 INIT_LIST_HEAD(&net_schedule_list);
1474 netif_xenbus_init();
1476 #ifdef NETBE_DEBUG_INTERRUPT
1477 (void)bind_virq_to_irqhandler(VIRQ_DEBUG,
1478 0,
1479 netif_be_dbg,
1480 SA_SHIRQ,
1481 "net-be-dbg",
1482 &netif_be_dbg);
1483 #endif
1485 return 0;
1488 module_init(netback_init);
1490 MODULE_LICENSE("Dual BSD/GPL");