ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/netback/netback.c @ 14426:9ff349bde23a

net back: Quieten loud error path.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Mar 15 14:51:32 2007 +0000 (2007-03-15)
parents 68282f4b3e0f
children 6139337e2690
line source
1 /******************************************************************************
2 * drivers/xen/netback/netback.c
3 *
4 * Back-end of the driver for virtual network devices. This portion of the
5 * driver exports a 'unified' network-device interface that can be accessed
6 * by any operating system that implements a compatible front end. A
7 * reference front-end implementation can be found in:
8 * drivers/xen/netfront/netfront.c
9 *
10 * Copyright (c) 2002-2005, K A Fraser
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
35 */
37 #include "common.h"
38 #include <xen/balloon.h>
39 #include <xen/interface/memory.h>
41 /*define NETBE_DEBUG_INTERRUPT*/
43 /* extra field used in struct page */
44 #define netif_page_index(pg) (*(long *)&(pg)->mapping)
46 struct netbk_rx_meta {
47 skb_frag_t frag;
48 int id;
49 int copy:1;
50 };
52 static void netif_idx_release(u16 pending_idx);
53 static void netif_page_release(struct page *page);
54 static void make_tx_response(netif_t *netif,
55 netif_tx_request_t *txp,
56 s8 st);
57 static netif_rx_response_t *make_rx_response(netif_t *netif,
58 u16 id,
59 s8 st,
60 u16 offset,
61 u16 size,
62 u16 flags);
64 static void net_tx_action(unsigned long unused);
65 static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
67 static void net_rx_action(unsigned long unused);
68 static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
70 static struct timer_list net_timer;
72 #define MAX_PENDING_REQS 256
74 static struct sk_buff_head rx_queue;
76 static struct page **mmap_pages;
77 static inline unsigned long idx_to_kaddr(unsigned int idx)
78 {
79 return (unsigned long)pfn_to_kaddr(page_to_pfn(mmap_pages[idx]));
80 }
82 #define PKT_PROT_LEN 64
84 static struct pending_tx_info {
85 netif_tx_request_t req;
86 netif_t *netif;
87 } pending_tx_info[MAX_PENDING_REQS];
88 static u16 pending_ring[MAX_PENDING_REQS];
89 typedef unsigned int PEND_RING_IDX;
90 #define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
91 static PEND_RING_IDX pending_prod, pending_cons;
92 #define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
94 /* Freed TX SKBs get batched on this ring before return to pending_ring. */
95 static u16 dealloc_ring[MAX_PENDING_REQS];
96 static PEND_RING_IDX dealloc_prod, dealloc_cons;
98 static struct sk_buff_head tx_queue;
100 static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
101 static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
102 static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
104 static struct list_head net_schedule_list;
105 static spinlock_t net_schedule_list_lock;
107 #define MAX_MFN_ALLOC 64
108 static unsigned long mfn_list[MAX_MFN_ALLOC];
109 static unsigned int alloc_index = 0;
111 static inline unsigned long alloc_mfn(void)
112 {
113 BUG_ON(alloc_index == 0);
114 return mfn_list[--alloc_index];
115 }
117 static int check_mfn(int nr)
118 {
119 struct xen_memory_reservation reservation = {
120 .extent_order = 0,
121 .domid = DOMID_SELF
122 };
124 if (likely(alloc_index >= nr))
125 return 0;
127 set_xen_guest_handle(reservation.extent_start, mfn_list + alloc_index);
128 reservation.nr_extents = MAX_MFN_ALLOC - alloc_index;
129 alloc_index += HYPERVISOR_memory_op(XENMEM_increase_reservation,
130 &reservation);
132 return alloc_index >= nr ? 0 : -ENOMEM;
133 }
135 static inline void maybe_schedule_tx_action(void)
136 {
137 smp_mb();
138 if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
139 !list_empty(&net_schedule_list))
140 tasklet_schedule(&net_tx_tasklet);
141 }
143 static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
144 {
145 struct skb_shared_info *ninfo;
146 struct sk_buff *nskb;
147 unsigned long offset;
148 int ret;
149 int len;
150 int headlen;
152 BUG_ON(skb_shinfo(skb)->frag_list != NULL);
154 nskb = alloc_skb(SKB_MAX_HEAD(0), GFP_ATOMIC | __GFP_NOWARN);
155 if (unlikely(!nskb))
156 goto err;
158 skb_reserve(nskb, 16 + NET_IP_ALIGN);
159 headlen = nskb->end - nskb->data;
160 if (headlen > skb_headlen(skb))
161 headlen = skb_headlen(skb);
162 ret = skb_copy_bits(skb, 0, __skb_put(nskb, headlen), headlen);
163 BUG_ON(ret);
165 ninfo = skb_shinfo(nskb);
166 ninfo->gso_size = skb_shinfo(skb)->gso_size;
167 ninfo->gso_type = skb_shinfo(skb)->gso_type;
169 offset = headlen;
170 len = skb->len - headlen;
172 nskb->len = skb->len;
173 nskb->data_len = len;
174 nskb->truesize += len;
176 while (len) {
177 struct page *page;
178 int copy;
179 int zero;
181 if (unlikely(ninfo->nr_frags >= MAX_SKB_FRAGS)) {
182 dump_stack();
183 goto err_free;
184 }
186 copy = len >= PAGE_SIZE ? PAGE_SIZE : len;
187 zero = len >= PAGE_SIZE ? 0 : __GFP_ZERO;
189 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN | zero);
190 if (unlikely(!page))
191 goto err_free;
193 ret = skb_copy_bits(skb, offset, page_address(page), copy);
194 BUG_ON(ret);
196 ninfo->frags[ninfo->nr_frags].page = page;
197 ninfo->frags[ninfo->nr_frags].page_offset = 0;
198 ninfo->frags[ninfo->nr_frags].size = copy;
199 ninfo->nr_frags++;
201 offset += copy;
202 len -= copy;
203 }
205 offset = nskb->data - skb->data;
207 nskb->h.raw = skb->h.raw + offset;
208 nskb->nh.raw = skb->nh.raw + offset;
209 nskb->mac.raw = skb->mac.raw + offset;
211 return nskb;
213 err_free:
214 kfree_skb(nskb);
215 err:
216 return NULL;
217 }
219 static inline int netbk_max_required_rx_slots(netif_t *netif)
220 {
221 if (netif->features & (NETIF_F_SG|NETIF_F_TSO))
222 return MAX_SKB_FRAGS + 2; /* header + extra_info + frags */
223 return 1; /* all in one */
224 }
226 static inline int netbk_queue_full(netif_t *netif)
227 {
228 RING_IDX peek = netif->rx_req_cons_peek;
229 RING_IDX needed = netbk_max_required_rx_slots(netif);
231 return ((netif->rx.sring->req_prod - peek) < needed) ||
232 ((netif->rx.rsp_prod_pvt + NET_RX_RING_SIZE - peek) < needed);
233 }
235 static void tx_queue_callback(unsigned long data)
236 {
237 netif_t *netif = (netif_t *)data;
238 if (netif_schedulable(netif))
239 netif_wake_queue(netif->dev);
240 }
242 int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
243 {
244 netif_t *netif = netdev_priv(dev);
246 BUG_ON(skb->dev != dev);
248 /* Drop the packet if the target domain has no receive buffers. */
249 if (unlikely(!netif_schedulable(netif) || netbk_queue_full(netif)))
250 goto drop;
252 /*
253 * Copy the packet here if it's destined for a flipping interface
254 * but isn't flippable (e.g. extra references to data).
255 */
256 if (!netif->copying_receiver) {
257 struct sk_buff *nskb = netbk_copy_skb(skb);
258 if ( unlikely(nskb == NULL) )
259 goto drop;
260 /* Copy only the header fields we use in this driver. */
261 nskb->dev = skb->dev;
262 nskb->ip_summed = skb->ip_summed;
263 nskb->proto_data_valid = skb->proto_data_valid;
264 dev_kfree_skb(skb);
265 skb = nskb;
266 }
268 netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 +
269 !!skb_shinfo(skb)->gso_size;
270 netif_get(netif);
272 if (netbk_can_queue(dev) && netbk_queue_full(netif)) {
273 netif->rx.sring->req_event = netif->rx_req_cons_peek +
274 netbk_max_required_rx_slots(netif);
275 mb(); /* request notification /then/ check & stop the queue */
276 if (netbk_queue_full(netif)) {
277 netif_stop_queue(dev);
278 /*
279 * Schedule 500ms timeout to restart the queue, thus
280 * ensuring that an inactive queue will be drained.
281 * Packets will be immediately be dropped until more
282 * receive buffers become available (see
283 * netbk_queue_full() check above).
284 */
285 netif->tx_queue_timeout.data = (unsigned long)netif;
286 netif->tx_queue_timeout.function = tx_queue_callback;
287 __mod_timer(&netif->tx_queue_timeout, jiffies + HZ/2);
288 }
289 }
291 skb_queue_tail(&rx_queue, skb);
292 tasklet_schedule(&net_rx_tasklet);
294 return 0;
296 drop:
297 netif->stats.tx_dropped++;
298 dev_kfree_skb(skb);
299 return 0;
300 }
302 #if 0
303 static void xen_network_done_notify(void)
304 {
305 static struct net_device *eth0_dev = NULL;
306 if (unlikely(eth0_dev == NULL))
307 eth0_dev = __dev_get_by_name("eth0");
308 netif_rx_schedule(eth0_dev);
309 }
310 /*
311 * Add following to poll() function in NAPI driver (Tigon3 is example):
312 * if ( xen_network_done() )
313 * tg3_enable_ints(tp);
314 */
315 int xen_network_done(void)
316 {
317 return skb_queue_empty(&rx_queue);
318 }
319 #endif
321 struct netrx_pending_operations {
322 unsigned trans_prod, trans_cons;
323 unsigned mmu_prod, mmu_cons;
324 unsigned mcl_prod, mcl_cons;
325 unsigned copy_prod, copy_cons;
326 unsigned meta_prod, meta_cons;
327 mmu_update_t *mmu;
328 gnttab_transfer_t *trans;
329 gnttab_copy_t *copy;
330 multicall_entry_t *mcl;
331 struct netbk_rx_meta *meta;
332 };
334 /* Set up the grant operations for this fragment. If it's a flipping
335 interface, we also set up the unmap request from here. */
336 static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
337 int i, struct netrx_pending_operations *npo,
338 struct page *page, unsigned long size,
339 unsigned long offset)
340 {
341 mmu_update_t *mmu;
342 gnttab_transfer_t *gop;
343 gnttab_copy_t *copy_gop;
344 multicall_entry_t *mcl;
345 netif_rx_request_t *req;
346 unsigned long old_mfn, new_mfn;
348 old_mfn = virt_to_mfn(page_address(page));
350 req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i);
351 if (netif->copying_receiver) {
352 /* The fragment needs to be copied rather than
353 flipped. */
354 meta->copy = 1;
355 copy_gop = npo->copy + npo->copy_prod++;
356 copy_gop->flags = GNTCOPY_dest_gref;
357 if (PageForeign(page)) {
358 struct pending_tx_info *src_pend =
359 &pending_tx_info[netif_page_index(page)];
360 copy_gop->source.domid = src_pend->netif->domid;
361 copy_gop->source.u.ref = src_pend->req.gref;
362 copy_gop->flags |= GNTCOPY_source_gref;
363 } else {
364 copy_gop->source.domid = DOMID_SELF;
365 copy_gop->source.u.gmfn = old_mfn;
366 }
367 copy_gop->source.offset = offset;
368 copy_gop->dest.domid = netif->domid;
369 copy_gop->dest.offset = 0;
370 copy_gop->dest.u.ref = req->gref;
371 copy_gop->len = size;
372 } else {
373 meta->copy = 0;
374 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
375 new_mfn = alloc_mfn();
377 /*
378 * Set the new P2M table entry before
379 * reassigning the old data page. Heed the
380 * comment in pgtable-2level.h:pte_page(). :-)
381 */
382 set_phys_to_machine(page_to_pfn(page), new_mfn);
384 mcl = npo->mcl + npo->mcl_prod++;
385 MULTI_update_va_mapping(mcl,
386 (unsigned long)page_address(page),
387 pfn_pte_ma(new_mfn, PAGE_KERNEL),
388 0);
390 mmu = npo->mmu + npo->mmu_prod++;
391 mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
392 MMU_MACHPHYS_UPDATE;
393 mmu->val = page_to_pfn(page);
394 }
396 gop = npo->trans + npo->trans_prod++;
397 gop->mfn = old_mfn;
398 gop->domid = netif->domid;
399 gop->ref = req->gref;
400 }
401 return req->id;
402 }
404 static void netbk_gop_skb(struct sk_buff *skb,
405 struct netrx_pending_operations *npo)
406 {
407 netif_t *netif = netdev_priv(skb->dev);
408 int nr_frags = skb_shinfo(skb)->nr_frags;
409 int i;
410 int extra;
411 struct netbk_rx_meta *head_meta, *meta;
413 head_meta = npo->meta + npo->meta_prod++;
414 head_meta->frag.page_offset = skb_shinfo(skb)->gso_type;
415 head_meta->frag.size = skb_shinfo(skb)->gso_size;
416 extra = !!head_meta->frag.size + 1;
418 for (i = 0; i < nr_frags; i++) {
419 meta = npo->meta + npo->meta_prod++;
420 meta->frag = skb_shinfo(skb)->frags[i];
421 meta->id = netbk_gop_frag(netif, meta, i + extra, npo,
422 meta->frag.page,
423 meta->frag.size,
424 meta->frag.page_offset);
425 }
427 /*
428 * This must occur at the end to ensure that we don't trash
429 * skb_shinfo until we're done.
430 */
431 head_meta->id = netbk_gop_frag(netif, head_meta, 0, npo,
432 virt_to_page(skb->data),
433 skb_headlen(skb),
434 offset_in_page(skb->data));
436 netif->rx.req_cons += nr_frags + extra;
437 }
439 static inline void netbk_free_pages(int nr_frags, struct netbk_rx_meta *meta)
440 {
441 int i;
443 for (i = 0; i < nr_frags; i++)
444 put_page(meta[i].frag.page);
445 }
447 /* This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
448 used to set up the operations on the top of
449 netrx_pending_operations, which have since been done. Check that
450 they didn't give any errors and advance over them. */
451 static int netbk_check_gop(int nr_frags, domid_t domid,
452 struct netrx_pending_operations *npo)
453 {
454 multicall_entry_t *mcl;
455 gnttab_transfer_t *gop;
456 gnttab_copy_t *copy_op;
457 int status = NETIF_RSP_OKAY;
458 int i;
460 for (i = 0; i <= nr_frags; i++) {
461 if (npo->meta[npo->meta_cons + i].copy) {
462 copy_op = npo->copy + npo->copy_cons++;
463 if (copy_op->status != GNTST_okay) {
464 DPRINTK("Bad status %d from copy to DOM%d.\n",
465 copy_op->status, domid);
466 status = NETIF_RSP_ERROR;
467 }
468 } else {
469 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
470 mcl = npo->mcl + npo->mcl_cons++;
471 /* The update_va_mapping() must not fail. */
472 BUG_ON(mcl->result != 0);
473 }
475 gop = npo->trans + npo->trans_cons++;
476 /* Check the reassignment error code. */
477 if (gop->status != 0) {
478 DPRINTK("Bad status %d from grant transfer to DOM%u\n",
479 gop->status, domid);
480 /*
481 * Page no longer belongs to us unless
482 * GNTST_bad_page, but that should be
483 * a fatal error anyway.
484 */
485 BUG_ON(gop->status == GNTST_bad_page);
486 status = NETIF_RSP_ERROR;
487 }
488 }
489 }
491 return status;
492 }
494 static void netbk_add_frag_responses(netif_t *netif, int status,
495 struct netbk_rx_meta *meta, int nr_frags)
496 {
497 int i;
498 unsigned long offset;
500 for (i = 0; i < nr_frags; i++) {
501 int id = meta[i].id;
502 int flags = (i == nr_frags - 1) ? 0 : NETRXF_more_data;
504 if (meta[i].copy)
505 offset = 0;
506 else
507 offset = meta[i].frag.page_offset;
508 make_rx_response(netif, id, status, offset,
509 meta[i].frag.size, flags);
510 }
511 }
513 static void net_rx_action(unsigned long unused)
514 {
515 netif_t *netif = NULL;
516 s8 status;
517 u16 id, irq, flags;
518 netif_rx_response_t *resp;
519 multicall_entry_t *mcl;
520 struct sk_buff_head rxq;
521 struct sk_buff *skb;
522 int notify_nr = 0;
523 int ret;
524 int nr_frags;
525 int count;
526 unsigned long offset;
528 /*
529 * Putting hundreds of bytes on the stack is considered rude.
530 * Static works because a tasklet can only be on one CPU at any time.
531 */
532 static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+3];
533 static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
534 static gnttab_transfer_t grant_trans_op[NET_RX_RING_SIZE];
535 static gnttab_copy_t grant_copy_op[NET_RX_RING_SIZE];
536 static unsigned char rx_notify[NR_IRQS];
537 static u16 notify_list[NET_RX_RING_SIZE];
538 static struct netbk_rx_meta meta[NET_RX_RING_SIZE];
540 struct netrx_pending_operations npo = {
541 mmu: rx_mmu,
542 trans: grant_trans_op,
543 copy: grant_copy_op,
544 mcl: rx_mcl,
545 meta: meta};
547 skb_queue_head_init(&rxq);
549 count = 0;
551 while ((skb = skb_dequeue(&rx_queue)) != NULL) {
552 nr_frags = skb_shinfo(skb)->nr_frags;
553 *(int *)skb->cb = nr_frags;
555 if (!xen_feature(XENFEAT_auto_translated_physmap) &&
556 !((netif_t *)netdev_priv(skb->dev))->copying_receiver &&
557 check_mfn(nr_frags + 1)) {
558 /* Memory squeeze? Back off for an arbitrary while. */
559 if ( net_ratelimit() )
560 WPRINTK("Memory squeeze in netback "
561 "driver.\n");
562 mod_timer(&net_timer, jiffies + HZ);
563 skb_queue_head(&rx_queue, skb);
564 break;
565 }
567 netbk_gop_skb(skb, &npo);
569 count += nr_frags + 1;
571 __skb_queue_tail(&rxq, skb);
573 /* Filled the batch queue? */
574 if (count + MAX_SKB_FRAGS >= NET_RX_RING_SIZE)
575 break;
576 }
578 if (npo.mcl_prod &&
579 !xen_feature(XENFEAT_auto_translated_physmap)) {
580 mcl = npo.mcl + npo.mcl_prod++;
582 BUG_ON(mcl[-1].op != __HYPERVISOR_update_va_mapping);
583 mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
585 mcl->op = __HYPERVISOR_mmu_update;
586 mcl->args[0] = (unsigned long)rx_mmu;
587 mcl->args[1] = npo.mmu_prod;
588 mcl->args[2] = 0;
589 mcl->args[3] = DOMID_SELF;
590 }
592 if (npo.trans_prod) {
593 mcl = npo.mcl + npo.mcl_prod++;
594 mcl->op = __HYPERVISOR_grant_table_op;
595 mcl->args[0] = GNTTABOP_transfer;
596 mcl->args[1] = (unsigned long)grant_trans_op;
597 mcl->args[2] = npo.trans_prod;
598 }
600 if (npo.copy_prod) {
601 mcl = npo.mcl + npo.mcl_prod++;
602 mcl->op = __HYPERVISOR_grant_table_op;
603 mcl->args[0] = GNTTABOP_copy;
604 mcl->args[1] = (unsigned long)grant_copy_op;
605 mcl->args[2] = npo.copy_prod;
606 }
608 /* Nothing to do? */
609 if (!npo.mcl_prod)
610 return;
612 BUG_ON(npo.copy_prod > NET_RX_RING_SIZE);
613 BUG_ON(npo.mmu_prod > NET_RX_RING_SIZE);
614 BUG_ON(npo.trans_prod > NET_RX_RING_SIZE);
615 BUG_ON(npo.mcl_prod > NET_RX_RING_SIZE+3);
616 BUG_ON(npo.meta_prod > NET_RX_RING_SIZE);
618 ret = HYPERVISOR_multicall(npo.mcl, npo.mcl_prod);
619 BUG_ON(ret != 0);
621 while ((skb = __skb_dequeue(&rxq)) != NULL) {
622 nr_frags = *(int *)skb->cb;
624 netif = netdev_priv(skb->dev);
625 /* We can't rely on skb_release_data to release the
626 pages used by fragments for us, since it tries to
627 touch the pages in the fraglist. If we're in
628 flipping mode, that doesn't work. In copying mode,
629 we still have access to all of the pages, and so
630 it's safe to let release_data deal with it. */
631 /* (Freeing the fragments is safe since we copy
632 non-linear skbs destined for flipping interfaces) */
633 if (!netif->copying_receiver) {
634 atomic_set(&(skb_shinfo(skb)->dataref), 1);
635 skb_shinfo(skb)->frag_list = NULL;
636 skb_shinfo(skb)->nr_frags = 0;
637 netbk_free_pages(nr_frags, meta + npo.meta_cons + 1);
638 }
640 netif->stats.tx_bytes += skb->len;
641 netif->stats.tx_packets++;
643 status = netbk_check_gop(nr_frags, netif->domid, &npo);
645 id = meta[npo.meta_cons].id;
646 flags = nr_frags ? NETRXF_more_data : 0;
648 if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
649 flags |= NETRXF_csum_blank | NETRXF_data_validated;
650 else if (skb->proto_data_valid) /* remote but checksummed? */
651 flags |= NETRXF_data_validated;
653 if (meta[npo.meta_cons].copy)
654 offset = 0;
655 else
656 offset = offset_in_page(skb->data);
657 resp = make_rx_response(netif, id, status, offset,
658 skb_headlen(skb), flags);
660 if (meta[npo.meta_cons].frag.size) {
661 struct netif_extra_info *gso =
662 (struct netif_extra_info *)
663 RING_GET_RESPONSE(&netif->rx,
664 netif->rx.rsp_prod_pvt++);
666 resp->flags |= NETRXF_extra_info;
668 gso->u.gso.size = meta[npo.meta_cons].frag.size;
669 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
670 gso->u.gso.pad = 0;
671 gso->u.gso.features = 0;
673 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
674 gso->flags = 0;
675 }
677 netbk_add_frag_responses(netif, status,
678 meta + npo.meta_cons + 1,
679 nr_frags);
681 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret);
682 irq = netif->irq;
683 if (ret && !rx_notify[irq]) {
684 rx_notify[irq] = 1;
685 notify_list[notify_nr++] = irq;
686 }
688 if (netif_queue_stopped(netif->dev) &&
689 netif_schedulable(netif) &&
690 !netbk_queue_full(netif))
691 netif_wake_queue(netif->dev);
693 netif_put(netif);
694 dev_kfree_skb(skb);
695 npo.meta_cons += nr_frags + 1;
696 }
698 while (notify_nr != 0) {
699 irq = notify_list[--notify_nr];
700 rx_notify[irq] = 0;
701 notify_remote_via_irq(irq);
702 }
704 /* More work to do? */
705 if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
706 tasklet_schedule(&net_rx_tasklet);
707 #if 0
708 else
709 xen_network_done_notify();
710 #endif
711 }
713 static void net_alarm(unsigned long unused)
714 {
715 tasklet_schedule(&net_rx_tasklet);
716 }
718 struct net_device_stats *netif_be_get_stats(struct net_device *dev)
719 {
720 netif_t *netif = netdev_priv(dev);
721 return &netif->stats;
722 }
724 static int __on_net_schedule_list(netif_t *netif)
725 {
726 return netif->list.next != NULL;
727 }
729 static void remove_from_net_schedule_list(netif_t *netif)
730 {
731 spin_lock_irq(&net_schedule_list_lock);
732 if (likely(__on_net_schedule_list(netif))) {
733 list_del(&netif->list);
734 netif->list.next = NULL;
735 netif_put(netif);
736 }
737 spin_unlock_irq(&net_schedule_list_lock);
738 }
740 static void add_to_net_schedule_list_tail(netif_t *netif)
741 {
742 if (__on_net_schedule_list(netif))
743 return;
745 spin_lock_irq(&net_schedule_list_lock);
746 if (!__on_net_schedule_list(netif) &&
747 likely(netif_schedulable(netif))) {
748 list_add_tail(&netif->list, &net_schedule_list);
749 netif_get(netif);
750 }
751 spin_unlock_irq(&net_schedule_list_lock);
752 }
754 /*
755 * Note on CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER:
756 * If this driver is pipelining transmit requests then we can be very
757 * aggressive in avoiding new-packet notifications -- frontend only needs to
758 * send a notification if there are no outstanding unreceived responses.
759 * If we may be buffer transmit buffers for any reason then we must be rather
760 * more conservative and treat this as the final check for pending work.
761 */
762 void netif_schedule_work(netif_t *netif)
763 {
764 int more_to_do;
766 #ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
767 more_to_do = RING_HAS_UNCONSUMED_REQUESTS(&netif->tx);
768 #else
769 RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
770 #endif
772 if (more_to_do) {
773 add_to_net_schedule_list_tail(netif);
774 maybe_schedule_tx_action();
775 }
776 }
778 void netif_deschedule_work(netif_t *netif)
779 {
780 remove_from_net_schedule_list(netif);
781 }
784 static void tx_add_credit(netif_t *netif)
785 {
786 unsigned long max_burst, max_credit;
788 /*
789 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
790 * Otherwise the interface can seize up due to insufficient credit.
791 */
792 max_burst = RING_GET_REQUEST(&netif->tx, netif->tx.req_cons)->size;
793 max_burst = min(max_burst, 131072UL);
794 max_burst = max(max_burst, netif->credit_bytes);
796 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
797 max_credit = netif->remaining_credit + netif->credit_bytes;
798 if (max_credit < netif->remaining_credit)
799 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
801 netif->remaining_credit = min(max_credit, max_burst);
802 }
804 static void tx_credit_callback(unsigned long data)
805 {
806 netif_t *netif = (netif_t *)data;
807 tx_add_credit(netif);
808 netif_schedule_work(netif);
809 }
811 inline static void net_tx_action_dealloc(void)
812 {
813 gnttab_unmap_grant_ref_t *gop;
814 u16 pending_idx;
815 PEND_RING_IDX dc, dp;
816 netif_t *netif;
817 int ret;
819 dc = dealloc_cons;
820 dp = dealloc_prod;
822 /* Ensure we see all indexes enqueued by netif_idx_release(). */
823 smp_rmb();
825 /*
826 * Free up any grants we have finished using
827 */
828 gop = tx_unmap_ops;
829 while (dc != dp) {
830 pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
831 gnttab_set_unmap_op(gop, idx_to_kaddr(pending_idx),
832 GNTMAP_host_map,
833 grant_tx_handle[pending_idx]);
834 gop++;
835 }
836 ret = HYPERVISOR_grant_table_op(
837 GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops);
838 BUG_ON(ret);
840 while (dealloc_cons != dp) {
841 pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
843 netif = pending_tx_info[pending_idx].netif;
845 make_tx_response(netif, &pending_tx_info[pending_idx].req,
846 NETIF_RSP_OKAY);
848 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
850 netif_put(netif);
851 }
852 }
854 static void netbk_tx_err(netif_t *netif, netif_tx_request_t *txp, RING_IDX end)
855 {
856 RING_IDX cons = netif->tx.req_cons;
858 do {
859 make_tx_response(netif, txp, NETIF_RSP_ERROR);
860 if (cons >= end)
861 break;
862 txp = RING_GET_REQUEST(&netif->tx, cons++);
863 } while (1);
864 netif->tx.req_cons = cons;
865 netif_schedule_work(netif);
866 netif_put(netif);
867 }
869 static int netbk_count_requests(netif_t *netif, netif_tx_request_t *first,
870 netif_tx_request_t *txp, int work_to_do)
871 {
872 RING_IDX cons = netif->tx.req_cons;
873 int frags = 0;
875 if (!(first->flags & NETTXF_more_data))
876 return 0;
878 do {
879 if (frags >= work_to_do) {
880 DPRINTK("Need more frags\n");
881 return -frags;
882 }
884 if (unlikely(frags >= MAX_SKB_FRAGS)) {
885 DPRINTK("Too many frags\n");
886 return -frags;
887 }
889 memcpy(txp, RING_GET_REQUEST(&netif->tx, cons + frags),
890 sizeof(*txp));
891 if (txp->size > first->size) {
892 DPRINTK("Frags galore\n");
893 return -frags;
894 }
896 first->size -= txp->size;
897 frags++;
899 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
900 DPRINTK("txp->offset: %x, size: %u\n",
901 txp->offset, txp->size);
902 return -frags;
903 }
904 } while ((txp++)->flags & NETTXF_more_data);
906 return frags;
907 }
909 static gnttab_map_grant_ref_t *netbk_get_requests(netif_t *netif,
910 struct sk_buff *skb,
911 netif_tx_request_t *txp,
912 gnttab_map_grant_ref_t *mop)
913 {
914 struct skb_shared_info *shinfo = skb_shinfo(skb);
915 skb_frag_t *frags = shinfo->frags;
916 unsigned long pending_idx = *((u16 *)skb->data);
917 int i, start;
919 /* Skip first skb fragment if it is on same page as header fragment. */
920 start = ((unsigned long)shinfo->frags[0].page == pending_idx);
922 for (i = start; i < shinfo->nr_frags; i++, txp++) {
923 pending_idx = pending_ring[MASK_PEND_IDX(pending_cons++)];
925 gnttab_set_map_op(mop++, idx_to_kaddr(pending_idx),
926 GNTMAP_host_map | GNTMAP_readonly,
927 txp->gref, netif->domid);
929 memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
930 netif_get(netif);
931 pending_tx_info[pending_idx].netif = netif;
932 frags[i].page = (void *)pending_idx;
933 }
935 return mop;
936 }
938 static int netbk_tx_check_mop(struct sk_buff *skb,
939 gnttab_map_grant_ref_t **mopp)
940 {
941 gnttab_map_grant_ref_t *mop = *mopp;
942 int pending_idx = *((u16 *)skb->data);
943 netif_t *netif = pending_tx_info[pending_idx].netif;
944 netif_tx_request_t *txp;
945 struct skb_shared_info *shinfo = skb_shinfo(skb);
946 int nr_frags = shinfo->nr_frags;
947 int i, err, start;
949 /* Check status of header. */
950 err = mop->status;
951 if (unlikely(err)) {
952 txp = &pending_tx_info[pending_idx].req;
953 make_tx_response(netif, txp, NETIF_RSP_ERROR);
954 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
955 netif_put(netif);
956 } else {
957 set_phys_to_machine(
958 __pa(idx_to_kaddr(pending_idx)) >> PAGE_SHIFT,
959 FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
960 grant_tx_handle[pending_idx] = mop->handle;
961 }
963 /* Skip first skb fragment if it is on same page as header fragment. */
964 start = ((unsigned long)shinfo->frags[0].page == pending_idx);
966 for (i = start; i < nr_frags; i++) {
967 int j, newerr;
969 pending_idx = (unsigned long)shinfo->frags[i].page;
971 /* Check error status: if okay then remember grant handle. */
972 newerr = (++mop)->status;
973 if (likely(!newerr)) {
974 set_phys_to_machine(
975 __pa(idx_to_kaddr(pending_idx))>>PAGE_SHIFT,
976 FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT));
977 grant_tx_handle[pending_idx] = mop->handle;
978 /* Had a previous error? Invalidate this fragment. */
979 if (unlikely(err))
980 netif_idx_release(pending_idx);
981 continue;
982 }
984 /* Error on this fragment: respond to client with an error. */
985 txp = &pending_tx_info[pending_idx].req;
986 make_tx_response(netif, txp, NETIF_RSP_ERROR);
987 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
988 netif_put(netif);
990 /* Not the first error? Preceding frags already invalidated. */
991 if (err)
992 continue;
994 /* First error: invalidate header and preceding fragments. */
995 pending_idx = *((u16 *)skb->data);
996 netif_idx_release(pending_idx);
997 for (j = start; j < i; j++) {
998 pending_idx = (unsigned long)shinfo->frags[i].page;
999 netif_idx_release(pending_idx);
1002 /* Remember the error: invalidate all subsequent fragments. */
1003 err = newerr;
1006 *mopp = mop + 1;
1007 return err;
1010 static void netbk_fill_frags(struct sk_buff *skb)
1012 struct skb_shared_info *shinfo = skb_shinfo(skb);
1013 int nr_frags = shinfo->nr_frags;
1014 int i;
1016 for (i = 0; i < nr_frags; i++) {
1017 skb_frag_t *frag = shinfo->frags + i;
1018 netif_tx_request_t *txp;
1019 unsigned long pending_idx;
1021 pending_idx = (unsigned long)frag->page;
1022 txp = &pending_tx_info[pending_idx].req;
1023 frag->page = virt_to_page(idx_to_kaddr(pending_idx));
1024 frag->size = txp->size;
1025 frag->page_offset = txp->offset;
1027 skb->len += txp->size;
1028 skb->data_len += txp->size;
1029 skb->truesize += txp->size;
1033 int netbk_get_extras(netif_t *netif, struct netif_extra_info *extras,
1034 int work_to_do)
1036 struct netif_extra_info extra;
1037 RING_IDX cons = netif->tx.req_cons;
1039 do {
1040 if (unlikely(work_to_do-- <= 0)) {
1041 DPRINTK("Missing extra info\n");
1042 return -EBADR;
1045 memcpy(&extra, RING_GET_REQUEST(&netif->tx, cons),
1046 sizeof(extra));
1047 if (unlikely(!extra.type ||
1048 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1049 netif->tx.req_cons = ++cons;
1050 DPRINTK("Invalid extra type: %d\n", extra.type);
1051 return -EINVAL;
1054 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1055 netif->tx.req_cons = ++cons;
1056 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1058 return work_to_do;
1061 static int netbk_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso)
1063 if (!gso->u.gso.size) {
1064 DPRINTK("GSO size must not be zero.\n");
1065 return -EINVAL;
1068 /* Currently only TCPv4 S.O. is supported. */
1069 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1070 DPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
1071 return -EINVAL;
1074 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1075 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1077 /* Header must be checked, and gso_segs computed. */
1078 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1079 skb_shinfo(skb)->gso_segs = 0;
1081 return 0;
1084 /* Called after netfront has transmitted */
1085 static void net_tx_action(unsigned long unused)
1087 struct list_head *ent;
1088 struct sk_buff *skb;
1089 netif_t *netif;
1090 netif_tx_request_t txreq;
1091 netif_tx_request_t txfrags[MAX_SKB_FRAGS];
1092 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
1093 u16 pending_idx;
1094 RING_IDX i;
1095 gnttab_map_grant_ref_t *mop;
1096 unsigned int data_len;
1097 int ret, work_to_do;
1099 if (dealloc_cons != dealloc_prod)
1100 net_tx_action_dealloc();
1102 mop = tx_map_ops;
1103 while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
1104 !list_empty(&net_schedule_list)) {
1105 /* Get a netif from the list with work to do. */
1106 ent = net_schedule_list.next;
1107 netif = list_entry(ent, netif_t, list);
1108 netif_get(netif);
1109 remove_from_net_schedule_list(netif);
1111 RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
1112 if (!work_to_do) {
1113 netif_put(netif);
1114 continue;
1117 i = netif->tx.req_cons;
1118 rmb(); /* Ensure that we see the request before we copy it. */
1119 memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
1121 /* Credit-based scheduling. */
1122 if (txreq.size > netif->remaining_credit) {
1123 unsigned long now = jiffies;
1124 unsigned long next_credit =
1125 netif->credit_timeout.expires +
1126 msecs_to_jiffies(netif->credit_usec / 1000);
1128 /* Timer could already be pending in rare cases. */
1129 if (timer_pending(&netif->credit_timeout)) {
1130 netif_put(netif);
1131 continue;
1134 /* Passed the point where we can replenish credit? */
1135 if (time_after_eq(now, next_credit)) {
1136 netif->credit_timeout.expires = now;
1137 tx_add_credit(netif);
1140 /* Still too big to send right now? Set a callback. */
1141 if (txreq.size > netif->remaining_credit) {
1142 netif->credit_timeout.data =
1143 (unsigned long)netif;
1144 netif->credit_timeout.function =
1145 tx_credit_callback;
1146 __mod_timer(&netif->credit_timeout,
1147 next_credit);
1148 netif_put(netif);
1149 continue;
1152 netif->remaining_credit -= txreq.size;
1154 work_to_do--;
1155 netif->tx.req_cons = ++i;
1157 memset(extras, 0, sizeof(extras));
1158 if (txreq.flags & NETTXF_extra_info) {
1159 work_to_do = netbk_get_extras(netif, extras,
1160 work_to_do);
1161 i = netif->tx.req_cons;
1162 if (unlikely(work_to_do < 0)) {
1163 netbk_tx_err(netif, &txreq, i);
1164 continue;
1168 ret = netbk_count_requests(netif, &txreq, txfrags, work_to_do);
1169 if (unlikely(ret < 0)) {
1170 netbk_tx_err(netif, &txreq, i - ret);
1171 continue;
1173 i += ret;
1175 if (unlikely(txreq.size < ETH_HLEN)) {
1176 DPRINTK("Bad packet size: %d\n", txreq.size);
1177 netbk_tx_err(netif, &txreq, i);
1178 continue;
1181 /* No crossing a page as the payload mustn't fragment. */
1182 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1183 DPRINTK("txreq.offset: %x, size: %u, end: %lu\n",
1184 txreq.offset, txreq.size,
1185 (txreq.offset &~PAGE_MASK) + txreq.size);
1186 netbk_tx_err(netif, &txreq, i);
1187 continue;
1190 pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
1192 data_len = (txreq.size > PKT_PROT_LEN &&
1193 ret < MAX_SKB_FRAGS) ?
1194 PKT_PROT_LEN : txreq.size;
1196 skb = alloc_skb(data_len + 16 + NET_IP_ALIGN,
1197 GFP_ATOMIC | __GFP_NOWARN);
1198 if (unlikely(skb == NULL)) {
1199 DPRINTK("Can't allocate a skb in start_xmit.\n");
1200 netbk_tx_err(netif, &txreq, i);
1201 break;
1204 /* Packets passed to netif_rx() must have some headroom. */
1205 skb_reserve(skb, 16 + NET_IP_ALIGN);
1207 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1208 struct netif_extra_info *gso;
1209 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1211 if (netbk_set_skb_gso(skb, gso)) {
1212 kfree_skb(skb);
1213 netbk_tx_err(netif, &txreq, i);
1214 continue;
1218 gnttab_set_map_op(mop, idx_to_kaddr(pending_idx),
1219 GNTMAP_host_map | GNTMAP_readonly,
1220 txreq.gref, netif->domid);
1221 mop++;
1223 memcpy(&pending_tx_info[pending_idx].req,
1224 &txreq, sizeof(txreq));
1225 pending_tx_info[pending_idx].netif = netif;
1226 *((u16 *)skb->data) = pending_idx;
1228 __skb_put(skb, data_len);
1230 skb_shinfo(skb)->nr_frags = ret;
1231 if (data_len < txreq.size) {
1232 skb_shinfo(skb)->nr_frags++;
1233 skb_shinfo(skb)->frags[0].page =
1234 (void *)(unsigned long)pending_idx;
1235 } else {
1236 /* Discriminate from any valid pending_idx value. */
1237 skb_shinfo(skb)->frags[0].page = (void *)~0UL;
1240 __skb_queue_tail(&tx_queue, skb);
1242 pending_cons++;
1244 mop = netbk_get_requests(netif, skb, txfrags, mop);
1246 netif->tx.req_cons = i;
1247 netif_schedule_work(netif);
1249 if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
1250 break;
1253 if (mop == tx_map_ops)
1254 return;
1256 ret = HYPERVISOR_grant_table_op(
1257 GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops);
1258 BUG_ON(ret);
1260 mop = tx_map_ops;
1261 while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
1262 netif_tx_request_t *txp;
1264 pending_idx = *((u16 *)skb->data);
1265 netif = pending_tx_info[pending_idx].netif;
1266 txp = &pending_tx_info[pending_idx].req;
1268 /* Check the remap error code. */
1269 if (unlikely(netbk_tx_check_mop(skb, &mop))) {
1270 DPRINTK("netback grant failed.\n");
1271 skb_shinfo(skb)->nr_frags = 0;
1272 kfree_skb(skb);
1273 continue;
1276 data_len = skb->len;
1277 memcpy(skb->data,
1278 (void *)(idx_to_kaddr(pending_idx)|txp->offset),
1279 data_len);
1280 if (data_len < txp->size) {
1281 /* Append the packet payload as a fragment. */
1282 txp->offset += data_len;
1283 txp->size -= data_len;
1284 } else {
1285 /* Schedule a response immediately. */
1286 netif_idx_release(pending_idx);
1289 /*
1290 * Old frontends do not assert data_validated but we
1291 * can infer it from csum_blank so test both flags.
1292 */
1293 if (txp->flags & (NETTXF_data_validated|NETTXF_csum_blank)) {
1294 skb->ip_summed = CHECKSUM_UNNECESSARY;
1295 skb->proto_data_valid = 1;
1296 } else {
1297 skb->ip_summed = CHECKSUM_NONE;
1298 skb->proto_data_valid = 0;
1300 skb->proto_csum_blank = !!(txp->flags & NETTXF_csum_blank);
1302 netbk_fill_frags(skb);
1304 skb->dev = netif->dev;
1305 skb->protocol = eth_type_trans(skb, skb->dev);
1307 netif->stats.rx_bytes += skb->len;
1308 netif->stats.rx_packets++;
1310 netif_rx(skb);
1311 netif->dev->last_rx = jiffies;
1315 static void netif_idx_release(u16 pending_idx)
1317 static DEFINE_SPINLOCK(_lock);
1318 unsigned long flags;
1320 spin_lock_irqsave(&_lock, flags);
1321 dealloc_ring[MASK_PEND_IDX(dealloc_prod)] = pending_idx;
1322 /* Sync with net_tx_action_dealloc: insert idx /then/ incr producer. */
1323 smp_wmb();
1324 dealloc_prod++;
1325 spin_unlock_irqrestore(&_lock, flags);
1327 tasklet_schedule(&net_tx_tasklet);
1330 static void netif_page_release(struct page *page)
1332 /* Ready for next use. */
1333 init_page_count(page);
1335 netif_idx_release(netif_page_index(page));
1338 irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
1340 netif_t *netif = dev_id;
1342 add_to_net_schedule_list_tail(netif);
1343 maybe_schedule_tx_action();
1345 if (netif_schedulable(netif) && !netbk_queue_full(netif))
1346 netif_wake_queue(netif->dev);
1348 return IRQ_HANDLED;
1351 static void make_tx_response(netif_t *netif,
1352 netif_tx_request_t *txp,
1353 s8 st)
1355 RING_IDX i = netif->tx.rsp_prod_pvt;
1356 netif_tx_response_t *resp;
1357 int notify;
1359 resp = RING_GET_RESPONSE(&netif->tx, i);
1360 resp->id = txp->id;
1361 resp->status = st;
1363 if (txp->flags & NETTXF_extra_info)
1364 RING_GET_RESPONSE(&netif->tx, ++i)->status = NETIF_RSP_NULL;
1366 netif->tx.rsp_prod_pvt = ++i;
1367 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->tx, notify);
1368 if (notify)
1369 notify_remote_via_irq(netif->irq);
1371 #ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
1372 if (i == netif->tx.req_cons) {
1373 int more_to_do;
1374 RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
1375 if (more_to_do)
1376 add_to_net_schedule_list_tail(netif);
1378 #endif
1381 static netif_rx_response_t *make_rx_response(netif_t *netif,
1382 u16 id,
1383 s8 st,
1384 u16 offset,
1385 u16 size,
1386 u16 flags)
1388 RING_IDX i = netif->rx.rsp_prod_pvt;
1389 netif_rx_response_t *resp;
1391 resp = RING_GET_RESPONSE(&netif->rx, i);
1392 resp->offset = offset;
1393 resp->flags = flags;
1394 resp->id = id;
1395 resp->status = (s16)size;
1396 if (st < 0)
1397 resp->status = (s16)st;
1399 netif->rx.rsp_prod_pvt = ++i;
1401 return resp;
1404 #ifdef NETBE_DEBUG_INTERRUPT
1405 static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
1407 struct list_head *ent;
1408 netif_t *netif;
1409 int i = 0;
1411 printk(KERN_ALERT "netif_schedule_list:\n");
1412 spin_lock_irq(&net_schedule_list_lock);
1414 list_for_each (ent, &net_schedule_list) {
1415 netif = list_entry(ent, netif_t, list);
1416 printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
1417 "rx_resp_prod=%08x\n",
1418 i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
1419 printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
1420 netif->tx.req_cons, netif->tx.rsp_prod_pvt);
1421 printk(KERN_ALERT " shared(rx_req_prod=%08x "
1422 "rx_resp_prod=%08x\n",
1423 netif->rx.sring->req_prod, netif->rx.sring->rsp_prod);
1424 printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
1425 netif->rx.sring->rsp_event, netif->tx.sring->req_prod);
1426 printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
1427 netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event);
1428 i++;
1431 spin_unlock_irq(&net_schedule_list_lock);
1432 printk(KERN_ALERT " ** End of netif_schedule_list **\n");
1434 return IRQ_HANDLED;
1436 #endif
1438 static int __init netback_init(void)
1440 int i;
1441 struct page *page;
1443 if (!is_running_on_xen())
1444 return -ENODEV;
1446 /* We can increase reservation by this much in net_rx_action(). */
1447 balloon_update_driver_allowance(NET_RX_RING_SIZE);
1449 skb_queue_head_init(&rx_queue);
1450 skb_queue_head_init(&tx_queue);
1452 init_timer(&net_timer);
1453 net_timer.data = 0;
1454 net_timer.function = net_alarm;
1456 mmap_pages = alloc_empty_pages_and_pagevec(MAX_PENDING_REQS);
1457 if (mmap_pages == NULL) {
1458 printk("%s: out of memory\n", __FUNCTION__);
1459 return -ENOMEM;
1462 for (i = 0; i < MAX_PENDING_REQS; i++) {
1463 page = mmap_pages[i];
1464 SetPageForeign(page, netif_page_release);
1465 netif_page_index(page) = i;
1468 pending_cons = 0;
1469 pending_prod = MAX_PENDING_REQS;
1470 for (i = 0; i < MAX_PENDING_REQS; i++)
1471 pending_ring[i] = i;
1473 spin_lock_init(&net_schedule_list_lock);
1474 INIT_LIST_HEAD(&net_schedule_list);
1476 netif_xenbus_init();
1478 #ifdef NETBE_DEBUG_INTERRUPT
1479 (void)bind_virq_to_irqhandler(VIRQ_DEBUG,
1480 0,
1481 netif_be_dbg,
1482 SA_SHIRQ,
1483 "net-be-dbg",
1484 &netif_be_dbg);
1485 #endif
1487 return 0;
1490 module_init(netback_init);
1492 MODULE_LICENSE("Dual BSD/GPL");