direct-io.hg

view linux-2.6.11-xen-sparse/drivers/xen/netback/netback.c @ 4959:2c0074c64c33

bitkeeper revision 1.1159.258.134 (428a2efbVjatsyWzpCT8mcdj0YbvXw)

Do not build netfront/netback loopback by default.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue May 17 17:50:51 2005 +0000 (2005-05-17)
parents cff0d3baf599
children db5a30a327e6 0dc3b8b8c298
line source
1 /******************************************************************************
2 * drivers/xen/netback/netback.c
3 *
4 * Back-end of the driver for virtual network devices. This portion of the
5 * driver exports a 'unified' network-device interface that can be accessed
6 * by any operating system that implements a compatible front end. A
7 * reference front-end implementation can be found in:
8 * drivers/xen/netfront/netfront.c
9 *
10 * Copyright (c) 2002-2004, K A Fraser
11 */
13 #include "common.h"
14 #include <asm-xen/balloon.h>
16 static void netif_idx_release(u16 pending_idx);
17 static void netif_page_release(struct page *page);
18 static void make_tx_response(netif_t *netif,
19 u16 id,
20 s8 st);
21 static int make_rx_response(netif_t *netif,
22 u16 id,
23 s8 st,
24 memory_t addr,
25 u16 size);
27 static void net_tx_action(unsigned long unused);
28 static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
30 static void net_rx_action(unsigned long unused);
31 static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
33 static struct timer_list net_timer;
35 static struct sk_buff_head rx_queue;
36 static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE*2];
37 static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE*3];
38 static unsigned char rx_notify[NR_EVENT_CHANNELS];
40 /* Don't currently gate addition of an interface to the tx scheduling list. */
41 #define tx_work_exists(_if) (1)
43 #define MAX_PENDING_REQS 256
44 static unsigned long mmap_vstart;
45 #define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE))
47 #define PKT_PROT_LEN 64
49 static struct {
50 netif_tx_request_t req;
51 netif_t *netif;
52 } pending_tx_info[MAX_PENDING_REQS];
53 static u16 pending_ring[MAX_PENDING_REQS];
54 typedef unsigned int PEND_RING_IDX;
55 #define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
56 static PEND_RING_IDX pending_prod, pending_cons;
57 #define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
59 /* Freed TX SKBs get batched on this ring before return to pending_ring. */
60 static u16 dealloc_ring[MAX_PENDING_REQS];
61 static PEND_RING_IDX dealloc_prod, dealloc_cons;
63 static struct sk_buff_head tx_queue;
64 static multicall_entry_t tx_mcl[MAX_PENDING_REQS];
66 static struct list_head net_schedule_list;
67 static spinlock_t net_schedule_list_lock;
69 #define MAX_MFN_ALLOC 64
70 static unsigned long mfn_list[MAX_MFN_ALLOC];
71 static unsigned int alloc_index = 0;
72 static spinlock_t mfn_lock = SPIN_LOCK_UNLOCKED;
74 static unsigned long alloc_mfn(void)
75 {
76 unsigned long mfn = 0, flags;
77 spin_lock_irqsave(&mfn_lock, flags);
78 if ( unlikely(alloc_index == 0) )
79 alloc_index = HYPERVISOR_dom_mem_op(
80 MEMOP_increase_reservation, mfn_list, MAX_MFN_ALLOC, 0);
81 if ( alloc_index != 0 )
82 mfn = mfn_list[--alloc_index];
83 spin_unlock_irqrestore(&mfn_lock, flags);
84 return mfn;
85 }
87 static void free_mfn(unsigned long mfn)
88 {
89 unsigned long flags;
90 spin_lock_irqsave(&mfn_lock, flags);
91 if ( alloc_index != MAX_MFN_ALLOC )
92 mfn_list[alloc_index++] = mfn;
93 else if ( HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
94 &mfn, 1, 0) != 1 )
95 BUG();
96 spin_unlock_irqrestore(&mfn_lock, flags);
97 }
99 static inline void maybe_schedule_tx_action(void)
100 {
101 smp_mb();
102 if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
103 !list_empty(&net_schedule_list) )
104 tasklet_schedule(&net_tx_tasklet);
105 }
107 /*
108 * A gross way of confirming the origin of an skb data page. The slab
109 * allocator abuses a field in the page struct to cache the kmem_cache_t ptr.
110 */
111 static inline int is_xen_skb(struct sk_buff *skb)
112 {
113 extern kmem_cache_t *skbuff_cachep;
114 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
115 kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
116 #else
117 kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->list.next;
118 #endif
119 return (cp == skbuff_cachep);
120 }
122 int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
123 {
124 netif_t *netif = netdev_priv(dev);
126 ASSERT(skb->dev == dev);
128 /* Drop the packet if the target domain has no receive buffers. */
129 if ( !netif->active ||
130 (netif->rx_req_cons == netif->rx->req_prod) ||
131 ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE) )
132 goto drop;
134 /*
135 * We do not copy the packet unless:
136 * 1. The data is shared; or
137 * 2. The data is not allocated from our special cache.
138 * NB. We also couldn't cope with fragmented packets, but we won't get
139 * any because we not advertise the NETIF_F_SG feature.
140 */
141 if ( skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb) )
142 {
143 int hlen = skb->data - skb->head;
144 struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
145 if ( unlikely(nskb == NULL) )
146 goto drop;
147 skb_reserve(nskb, hlen);
148 __skb_put(nskb, skb->len);
149 (void)skb_copy_bits(skb, -hlen, nskb->data - hlen, skb->len + hlen);
150 nskb->dev = skb->dev;
151 dev_kfree_skb(skb);
152 skb = nskb;
153 }
155 netif->rx_req_cons++;
156 netif_get(netif);
158 skb_queue_tail(&rx_queue, skb);
159 tasklet_schedule(&net_rx_tasklet);
161 return 0;
163 drop:
164 netif->stats.tx_dropped++;
165 dev_kfree_skb(skb);
166 return 0;
167 }
169 #if 0
170 static void xen_network_done_notify(void)
171 {
172 static struct net_device *eth0_dev = NULL;
173 if ( unlikely(eth0_dev == NULL) )
174 eth0_dev = __dev_get_by_name("eth0");
175 netif_rx_schedule(eth0_dev);
176 }
177 /*
178 * Add following to poll() function in NAPI driver (Tigon3 is example):
179 * if ( xen_network_done() )
180 * tg3_enable_ints(tp);
181 */
182 int xen_network_done(void)
183 {
184 return skb_queue_empty(&rx_queue);
185 }
186 #endif
188 static void net_rx_action(unsigned long unused)
189 {
190 netif_t *netif;
191 s8 status;
192 u16 size, id, evtchn;
193 mmu_update_t *mmu;
194 multicall_entry_t *mcl;
195 unsigned long vdata, mdata, new_mfn;
196 struct sk_buff_head rxq;
197 struct sk_buff *skb;
198 u16 notify_list[NETIF_RX_RING_SIZE];
199 int notify_nr = 0;
201 skb_queue_head_init(&rxq);
203 mcl = rx_mcl;
204 mmu = rx_mmu;
205 while ( (skb = skb_dequeue(&rx_queue)) != NULL )
206 {
207 netif = netdev_priv(skb->dev);
208 vdata = (unsigned long)skb->data;
209 mdata = virt_to_machine(vdata);
211 /* Memory squeeze? Back off for an arbitrary while. */
212 if ( (new_mfn = alloc_mfn()) == 0 )
213 {
214 if ( net_ratelimit() )
215 printk(KERN_WARNING "Memory squeeze in netback driver.\n");
216 mod_timer(&net_timer, jiffies + HZ);
217 skb_queue_head(&rx_queue, skb);
218 break;
219 }
221 /*
222 * Set the new P2M table entry before reassigning the old data page.
223 * Heed the comment in pgtable-2level.h:pte_page(). :-)
224 */
225 phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
227 mmu[0].ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
228 mmu[0].val = __pa(vdata) >> PAGE_SHIFT;
229 mmu[1].ptr = MMU_EXTENDED_COMMAND;
230 mmu[1].val = MMUEXT_SET_FOREIGNDOM;
231 mmu[1].val |= (unsigned long)netif->domid << 16;
232 mmu[2].ptr = (mdata & PAGE_MASK) | MMU_EXTENDED_COMMAND;
233 mmu[2].val = MMUEXT_REASSIGN_PAGE;
235 mcl[0].op = __HYPERVISOR_update_va_mapping;
236 mcl[0].args[0] = vdata >> PAGE_SHIFT;
237 mcl[0].args[1] = (new_mfn << PAGE_SHIFT) | __PAGE_KERNEL;
238 mcl[0].args[2] = 0;
239 mcl[1].op = __HYPERVISOR_mmu_update;
240 mcl[1].args[0] = (unsigned long)mmu;
241 mcl[1].args[1] = 3;
242 mcl[1].args[2] = 0;
244 mcl += 2;
245 mmu += 3;
247 __skb_queue_tail(&rxq, skb);
249 /* Filled the batch queue? */
250 if ( (mcl - rx_mcl) == ARRAY_SIZE(rx_mcl) )
251 break;
252 }
254 if ( mcl == rx_mcl )
255 return;
257 mcl[-2].args[2] = UVMF_FLUSH_TLB;
258 if ( unlikely(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0) )
259 BUG();
261 mcl = rx_mcl;
262 mmu = rx_mmu;
263 while ( (skb = __skb_dequeue(&rxq)) != NULL )
264 {
265 netif = netdev_priv(skb->dev);
266 size = skb->tail - skb->data;
268 /* Rederive the machine addresses. */
269 new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
270 mdata = ((mmu[2].ptr & PAGE_MASK) |
271 ((unsigned long)skb->data & ~PAGE_MASK));
273 atomic_set(&(skb_shinfo(skb)->dataref), 1);
274 skb_shinfo(skb)->nr_frags = 0;
275 skb_shinfo(skb)->frag_list = NULL;
277 netif->stats.tx_bytes += size;
278 netif->stats.tx_packets++;
280 /* The update_va_mapping() must not fail. */
281 if ( unlikely(mcl[0].args[5] != 0) )
282 BUG();
284 /* Check the reassignment error code. */
285 status = NETIF_RSP_OKAY;
286 if ( unlikely(mcl[1].args[5] != 0) )
287 {
288 DPRINTK("Failed MMU update transferring to DOM%u\n", netif->domid);
289 free_mfn(mdata >> PAGE_SHIFT);
290 status = NETIF_RSP_ERROR;
291 }
293 evtchn = netif->evtchn;
294 id = netif->rx->ring[MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
295 if ( make_rx_response(netif, id, status, mdata, size) &&
296 (rx_notify[evtchn] == 0) )
297 {
298 rx_notify[evtchn] = 1;
299 notify_list[notify_nr++] = evtchn;
300 }
302 netif_put(netif);
303 dev_kfree_skb(skb);
305 mcl += 2;
306 mmu += 3;
307 }
309 while ( notify_nr != 0 )
310 {
311 evtchn = notify_list[--notify_nr];
312 rx_notify[evtchn] = 0;
313 notify_via_evtchn(evtchn);
314 }
316 /* More work to do? */
317 if ( !skb_queue_empty(&rx_queue) && !timer_pending(&net_timer) )
318 tasklet_schedule(&net_rx_tasklet);
319 #if 0
320 else
321 xen_network_done_notify();
322 #endif
323 }
325 static void net_alarm(unsigned long unused)
326 {
327 tasklet_schedule(&net_rx_tasklet);
328 }
330 struct net_device_stats *netif_be_get_stats(struct net_device *dev)
331 {
332 netif_t *netif = netdev_priv(dev);
333 return &netif->stats;
334 }
336 static int __on_net_schedule_list(netif_t *netif)
337 {
338 return netif->list.next != NULL;
339 }
341 static void remove_from_net_schedule_list(netif_t *netif)
342 {
343 spin_lock_irq(&net_schedule_list_lock);
344 if ( likely(__on_net_schedule_list(netif)) )
345 {
346 list_del(&netif->list);
347 netif->list.next = NULL;
348 netif_put(netif);
349 }
350 spin_unlock_irq(&net_schedule_list_lock);
351 }
353 static void add_to_net_schedule_list_tail(netif_t *netif)
354 {
355 if ( __on_net_schedule_list(netif) )
356 return;
358 spin_lock_irq(&net_schedule_list_lock);
359 if ( !__on_net_schedule_list(netif) && netif->active )
360 {
361 list_add_tail(&netif->list, &net_schedule_list);
362 netif_get(netif);
363 }
364 spin_unlock_irq(&net_schedule_list_lock);
365 }
367 void netif_schedule_work(netif_t *netif)
368 {
369 if ( (netif->tx_req_cons != netif->tx->req_prod) &&
370 ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
371 {
372 add_to_net_schedule_list_tail(netif);
373 maybe_schedule_tx_action();
374 }
375 }
377 void netif_deschedule_work(netif_t *netif)
378 {
379 remove_from_net_schedule_list(netif);
380 }
382 #if 0
383 static void tx_credit_callback(unsigned long data)
384 {
385 netif_t *netif = (netif_t *)data;
386 netif->remaining_credit = netif->credit_bytes;
387 netif_schedule_work(netif);
388 }
389 #endif
391 static void net_tx_action(unsigned long unused)
392 {
393 struct list_head *ent;
394 struct sk_buff *skb;
395 netif_t *netif;
396 netif_tx_request_t txreq;
397 u16 pending_idx;
398 NETIF_RING_IDX i;
399 multicall_entry_t *mcl;
400 PEND_RING_IDX dc, dp;
401 unsigned int data_len;
403 if ( (dc = dealloc_cons) == (dp = dealloc_prod) )
404 goto skip_dealloc;
406 mcl = tx_mcl;
407 while ( dc != dp )
408 {
409 pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
410 mcl[0].op = __HYPERVISOR_update_va_mapping;
411 mcl[0].args[0] = MMAP_VADDR(pending_idx) >> PAGE_SHIFT;
412 mcl[0].args[1] = 0;
413 mcl[0].args[2] = 0;
414 mcl++;
415 }
417 mcl[-1].args[2] = UVMF_FLUSH_TLB;
418 if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
419 BUG();
421 mcl = tx_mcl;
422 while ( dealloc_cons != dp )
423 {
424 /* The update_va_mapping() must not fail. */
425 if ( unlikely(mcl[0].args[5] != 0) )
426 BUG();
428 pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
430 netif = pending_tx_info[pending_idx].netif;
432 make_tx_response(netif, pending_tx_info[pending_idx].req.id,
433 NETIF_RSP_OKAY);
435 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
437 /*
438 * Scheduling checks must happen after the above response is posted.
439 * This avoids a possible race with a guest OS on another CPU if that
440 * guest is testing against 'resp_prod' when deciding whether to notify
441 * us when it queues additional packets.
442 */
443 mb();
444 if ( (netif->tx_req_cons != netif->tx->req_prod) &&
445 ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
446 add_to_net_schedule_list_tail(netif);
448 netif_put(netif);
450 mcl++;
451 }
453 skip_dealloc:
454 mcl = tx_mcl;
455 while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
456 !list_empty(&net_schedule_list) )
457 {
458 /* Get a netif from the list with work to do. */
459 ent = net_schedule_list.next;
460 netif = list_entry(ent, netif_t, list);
461 netif_get(netif);
462 remove_from_net_schedule_list(netif);
464 /* Work to do? */
465 i = netif->tx_req_cons;
466 if ( (i == netif->tx->req_prod) ||
467 ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE) )
468 {
469 netif_put(netif);
470 continue;
471 }
473 netif->tx->req_cons = ++netif->tx_req_cons;
475 /*
476 * 1. Ensure that we see the request when we copy it.
477 * 2. Ensure that frontend sees updated req_cons before we check
478 * for more work to schedule.
479 */
480 mb();
482 memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req,
483 sizeof(txreq));
485 #if 0
486 /* Credit-based scheduling. */
487 if ( tx.size > netif->remaining_credit )
488 {
489 s_time_t now = NOW(), next_credit =
490 netif->credit_timeout.expires + MICROSECS(netif->credit_usec);
491 if ( next_credit <= now )
492 {
493 netif->credit_timeout.expires = now;
494 netif->remaining_credit = netif->credit_bytes;
495 }
496 else
497 {
498 netif->remaining_credit = 0;
499 netif->credit_timeout.expires = next_credit;
500 netif->credit_timeout.data = (unsigned long)netif;
501 netif->credit_timeout.function = tx_credit_callback;
502 netif->credit_timeout.cpu = smp_processor_id();
503 add_ac_timer(&netif->credit_timeout);
504 break;
505 }
506 }
507 netif->remaining_credit -= tx.size;
508 #endif
510 netif_schedule_work(netif);
512 if ( unlikely(txreq.size < ETH_HLEN) ||
513 unlikely(txreq.size > ETH_FRAME_LEN) )
514 {
515 DPRINTK("Bad packet size: %d\n", txreq.size);
516 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
517 netif_put(netif);
518 continue;
519 }
521 /* No crossing a page boundary as the payload mustn't fragment. */
522 if ( unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >= PAGE_SIZE) )
523 {
524 DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n",
525 txreq.addr, txreq.size,
526 (txreq.addr &~PAGE_MASK) + txreq.size);
527 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
528 netif_put(netif);
529 continue;
530 }
532 pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
534 data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
536 if ( unlikely((skb = alloc_skb(data_len+16, GFP_ATOMIC)) == NULL) )
537 {
538 DPRINTK("Can't allocate a skb in start_xmit.\n");
539 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
540 netif_put(netif);
541 break;
542 }
544 /* Packets passed to netif_rx() must have some headroom. */
545 skb_reserve(skb, 16);
547 mcl[0].op = __HYPERVISOR_update_va_mapping_otherdomain;
548 mcl[0].args[0] = MMAP_VADDR(pending_idx) >> PAGE_SHIFT;
549 mcl[0].args[1] = (txreq.addr & PAGE_MASK) | __PAGE_KERNEL;
550 mcl[0].args[2] = 0;
551 mcl[0].args[3] = netif->domid;
552 mcl++;
554 memcpy(&pending_tx_info[pending_idx].req, &txreq, sizeof(txreq));
555 pending_tx_info[pending_idx].netif = netif;
556 *((u16 *)skb->data) = pending_idx;
558 __skb_queue_tail(&tx_queue, skb);
560 pending_cons++;
562 /* Filled the batch queue? */
563 if ( (mcl - tx_mcl) == ARRAY_SIZE(tx_mcl) )
564 break;
565 }
567 if ( mcl == tx_mcl )
568 return;
570 if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
571 BUG();
573 mcl = tx_mcl;
574 while ( (skb = __skb_dequeue(&tx_queue)) != NULL )
575 {
576 pending_idx = *((u16 *)skb->data);
577 netif = pending_tx_info[pending_idx].netif;
578 memcpy(&txreq, &pending_tx_info[pending_idx].req, sizeof(txreq));
580 /* Check the remap error code. */
581 if ( unlikely(mcl[0].args[5] != 0) )
582 {
583 DPRINTK("Bad page frame\n");
584 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
585 netif_put(netif);
586 kfree_skb(skb);
587 mcl++;
588 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
589 continue;
590 }
592 phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
593 FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
595 data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
597 __skb_put(skb, data_len);
598 memcpy(skb->data,
599 (void *)(MMAP_VADDR(pending_idx)|(txreq.addr&~PAGE_MASK)),
600 data_len);
602 if ( data_len < txreq.size )
603 {
604 /* Append the packet payload as a fragment. */
605 skb_shinfo(skb)->frags[0].page =
606 virt_to_page(MMAP_VADDR(pending_idx));
607 skb_shinfo(skb)->frags[0].size = txreq.size - data_len;
608 skb_shinfo(skb)->frags[0].page_offset =
609 (txreq.addr + data_len) & ~PAGE_MASK;
610 skb_shinfo(skb)->nr_frags = 1;
611 }
612 else
613 {
614 /* Schedule a response immediately. */
615 netif_idx_release(pending_idx);
616 }
618 skb->data_len = txreq.size - data_len;
619 skb->len += skb->data_len;
621 skb->dev = netif->dev;
622 skb->protocol = eth_type_trans(skb, skb->dev);
624 netif->stats.rx_bytes += txreq.size;
625 netif->stats.rx_packets++;
627 netif_rx(skb);
628 netif->dev->last_rx = jiffies;
630 mcl++;
631 }
632 }
634 static void netif_idx_release(u16 pending_idx)
635 {
636 static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
637 unsigned long flags;
639 spin_lock_irqsave(&_lock, flags);
640 dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
641 spin_unlock_irqrestore(&_lock, flags);
643 tasklet_schedule(&net_tx_tasklet);
644 }
646 static void netif_page_release(struct page *page)
647 {
648 u16 pending_idx = page - virt_to_page(mmap_vstart);
650 /* Ready for next use. */
651 set_page_count(page, 1);
653 netif_idx_release(pending_idx);
654 }
656 irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
657 {
658 netif_t *netif = dev_id;
659 if ( tx_work_exists(netif) )
660 {
661 add_to_net_schedule_list_tail(netif);
662 maybe_schedule_tx_action();
663 }
664 return IRQ_HANDLED;
665 }
667 static void make_tx_response(netif_t *netif,
668 u16 id,
669 s8 st)
670 {
671 NETIF_RING_IDX i = netif->tx_resp_prod;
672 netif_tx_response_t *resp;
674 resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
675 resp->id = id;
676 resp->status = st;
677 wmb();
678 netif->tx->resp_prod = netif->tx_resp_prod = ++i;
680 mb(); /* Update producer before checking event threshold. */
681 if ( i == netif->tx->event )
682 notify_via_evtchn(netif->evtchn);
683 }
685 static int make_rx_response(netif_t *netif,
686 u16 id,
687 s8 st,
688 memory_t addr,
689 u16 size)
690 {
691 NETIF_RING_IDX i = netif->rx_resp_prod;
692 netif_rx_response_t *resp;
694 resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
695 resp->addr = addr;
696 resp->id = id;
697 resp->status = (s16)size;
698 if ( st < 0 )
699 resp->status = (s16)st;
700 wmb();
701 netif->rx->resp_prod = netif->rx_resp_prod = ++i;
703 mb(); /* Update producer before checking event threshold. */
704 return (i == netif->rx->event);
705 }
707 static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
708 {
709 struct list_head *ent;
710 netif_t *netif;
711 int i = 0;
713 printk(KERN_ALERT "netif_schedule_list:\n");
714 spin_lock_irq(&net_schedule_list_lock);
716 list_for_each ( ent, &net_schedule_list )
717 {
718 netif = list_entry(ent, netif_t, list);
719 printk(KERN_ALERT " %d: private(rx_req_cons=%08x rx_resp_prod=%08x\n",
720 i, netif->rx_req_cons, netif->rx_resp_prod);
721 printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
722 netif->tx_req_cons, netif->tx_resp_prod);
723 printk(KERN_ALERT " shared(rx_req_prod=%08x rx_resp_prod=%08x\n",
724 netif->rx->req_prod, netif->rx->resp_prod);
725 printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
726 netif->rx->event, netif->tx->req_prod);
727 printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
728 netif->tx->resp_prod, netif->tx->event);
729 i++;
730 }
732 spin_unlock_irq(&net_schedule_list_lock);
733 printk(KERN_ALERT " ** End of netif_schedule_list **\n");
735 return IRQ_HANDLED;
736 }
738 static int __init netback_init(void)
739 {
740 int i;
741 struct page *page;
743 if ( !(xen_start_info.flags & SIF_NET_BE_DOMAIN) &&
744 !(xen_start_info.flags & SIF_INITDOMAIN) )
745 return 0;
747 printk("Initialising Xen netif backend\n");
749 /* We can increase reservation by this much in net_rx_action(). */
750 balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
752 skb_queue_head_init(&rx_queue);
753 skb_queue_head_init(&tx_queue);
755 init_timer(&net_timer);
756 net_timer.data = 0;
757 net_timer.function = net_alarm;
759 netif_interface_init();
761 if ( (mmap_vstart = allocate_empty_lowmem_region(MAX_PENDING_REQS)) == 0 )
762 BUG();
764 for ( i = 0; i < MAX_PENDING_REQS; i++ )
765 {
766 page = virt_to_page(MMAP_VADDR(i));
767 set_page_count(page, 1);
768 SetPageForeign(page, netif_page_release);
769 }
771 pending_cons = 0;
772 pending_prod = MAX_PENDING_REQS;
773 for ( i = 0; i < MAX_PENDING_REQS; i++ )
774 pending_ring[i] = i;
776 spin_lock_init(&net_schedule_list_lock);
777 INIT_LIST_HEAD(&net_schedule_list);
779 netif_ctrlif_init();
781 (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
782 netif_be_dbg, SA_SHIRQ,
783 "net-be-dbg", &netif_be_dbg);
785 return 0;
786 }
788 static void netback_cleanup(void)
789 {
790 BUG();
791 }
793 module_init(netback_init);
794 module_exit(netback_cleanup);