ia64/xen-unstable

view linux-2.6.10-xen-sparse/drivers/xen/netback/netback.c @ 3742:d1e1c9854420

bitkeeper revision 1.1159.255.3 (420918d3OV9YNdw3dCaE6e4udrKnDA)

manual merge
author iap10@freefall.cl.cam.ac.uk
date Tue Feb 08 19:53:55 2005 +0000 (2005-02-08)
parents 9f7935ea4606 6062bb54a227
children e2b4ca470b91
line source
1 /******************************************************************************
2 * drivers/xen/netback/netback.c
3 *
4 * Back-end of the driver for virtual network devices. This portion of the
5 * driver exports a 'unified' network-device interface that can be accessed
6 * by any operating system that implements a compatible front end. A
7 * reference front-end implementation can be found in:
8 * drivers/xen/netfront/netfront.c
9 *
10 * Copyright (c) 2002-2004, K A Fraser
11 */
13 #include "common.h"
14 #include <asm-xen/balloon.h>
15 #include <asm-xen/evtchn.h>
17 static void netif_idx_release(u16 pending_idx);
18 static void netif_page_release(struct page *page);
19 static void make_tx_response(netif_t *netif,
20 u16 id,
21 s8 st);
22 static int make_rx_response(netif_t *netif,
23 u16 id,
24 s8 st,
25 memory_t addr,
26 u16 size);
28 static void net_tx_action(unsigned long unused);
29 static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
31 static void net_rx_action(unsigned long unused);
32 static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
34 static struct timer_list net_timer;
36 static struct sk_buff_head rx_queue;
37 static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE*2];
38 static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE*3];
39 static unsigned char rx_notify[NR_EVENT_CHANNELS];
41 /* Don't currently gate addition of an interface to the tx scheduling list. */
42 #define tx_work_exists(_if) (1)
44 #define MAX_PENDING_REQS 256
45 static unsigned long mmap_vstart;
46 #define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE))
48 #define PKT_PROT_LEN 64
50 static struct {
51 netif_tx_request_t req;
52 netif_t *netif;
53 } pending_tx_info[MAX_PENDING_REQS];
54 static u16 pending_ring[MAX_PENDING_REQS];
55 typedef unsigned int PEND_RING_IDX;
56 #define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
57 static PEND_RING_IDX pending_prod, pending_cons;
58 #define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
60 /* Freed TX SKBs get batched on this ring before return to pending_ring. */
61 static u16 dealloc_ring[MAX_PENDING_REQS];
62 static PEND_RING_IDX dealloc_prod, dealloc_cons;
64 static struct sk_buff_head tx_queue;
65 static multicall_entry_t tx_mcl[MAX_PENDING_REQS];
67 static struct list_head net_schedule_list;
68 static spinlock_t net_schedule_list_lock;
70 #define MAX_MFN_ALLOC 64
71 static unsigned long mfn_list[MAX_MFN_ALLOC];
72 static unsigned int alloc_index = 0;
73 static spinlock_t mfn_lock = SPIN_LOCK_UNLOCKED;
75 static unsigned long alloc_mfn(void)
76 {
77 unsigned long mfn = 0, flags;
78 spin_lock_irqsave(&mfn_lock, flags);
79 if ( unlikely(alloc_index == 0) )
80 alloc_index = HYPERVISOR_dom_mem_op(
81 MEMOP_increase_reservation, mfn_list, MAX_MFN_ALLOC, 0);
82 if ( alloc_index != 0 )
83 mfn = mfn_list[--alloc_index];
84 spin_unlock_irqrestore(&mfn_lock, flags);
85 return mfn;
86 }
88 static void free_mfn(unsigned long mfn)
89 {
90 unsigned long flags;
91 spin_lock_irqsave(&mfn_lock, flags);
92 if ( alloc_index != MAX_MFN_ALLOC )
93 mfn_list[alloc_index++] = mfn;
94 else if ( HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
95 &mfn, 1, 0) != 1 )
96 BUG();
97 spin_unlock_irqrestore(&mfn_lock, flags);
98 }
100 static inline void maybe_schedule_tx_action(void)
101 {
102 smp_mb();
103 if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
104 !list_empty(&net_schedule_list) )
105 tasklet_schedule(&net_tx_tasklet);
106 }
108 /*
109 * A gross way of confirming the origin of an skb data page. The slab
110 * allocator abuses a field in the page struct to cache the kmem_cache_t ptr.
111 */
112 static inline int is_xen_skb(struct sk_buff *skb)
113 {
114 extern kmem_cache_t *skbuff_cachep;
115 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
116 kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
117 #else
118 kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->list.next;
119 #endif
120 return (cp == skbuff_cachep);
121 }
123 int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
124 {
125 netif_t *netif = netdev_priv(dev);
127 ASSERT(skb->dev == dev);
129 /* Drop the packet if the target domain has no receive buffers. */
130 if ( !netif->active ||
131 (netif->rx_req_cons == netif->rx->req_prod) ||
132 ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE) )
133 goto drop;
135 /*
136 * We do not copy the packet unless:
137 * 1. The data is shared; or
138 * 2. The data is not allocated from our special cache.
139 * NB. We also couldn't cope with fragmented packets, but we won't get
140 * any because we not advertise the NETIF_F_SG feature.
141 */
142 if ( skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb) )
143 {
144 int hlen = skb->data - skb->head;
145 struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
146 if ( unlikely(nskb == NULL) )
147 goto drop;
148 skb_reserve(nskb, hlen);
149 __skb_put(nskb, skb->len);
150 (void)skb_copy_bits(skb, -hlen, nskb->data - hlen, skb->len + hlen);
151 nskb->dev = skb->dev;
152 dev_kfree_skb(skb);
153 skb = nskb;
154 }
156 netif->rx_req_cons++;
157 netif_get(netif);
159 skb_queue_tail(&rx_queue, skb);
160 tasklet_schedule(&net_rx_tasklet);
162 return 0;
164 drop:
165 netif->stats.tx_dropped++;
166 dev_kfree_skb(skb);
167 return 0;
168 }
170 #if 0
171 static void xen_network_done_notify(void)
172 {
173 static struct net_device *eth0_dev = NULL;
174 if ( unlikely(eth0_dev == NULL) )
175 eth0_dev = __dev_get_by_name("eth0");
176 netif_rx_schedule(eth0_dev);
177 }
178 /*
179 * Add following to poll() function in NAPI driver (Tigon3 is example):
180 * if ( xen_network_done() )
181 * tg3_enable_ints(tp);
182 */
183 int xen_network_done(void)
184 {
185 return skb_queue_empty(&rx_queue);
186 }
187 #endif
189 static void net_rx_action(unsigned long unused)
190 {
191 netif_t *netif;
192 s8 status;
193 u16 size, id, evtchn;
194 mmu_update_t *mmu;
195 multicall_entry_t *mcl;
196 unsigned long vdata, mdata, new_mfn;
197 struct sk_buff_head rxq;
198 struct sk_buff *skb;
199 u16 notify_list[NETIF_RX_RING_SIZE];
200 int notify_nr = 0;
202 skb_queue_head_init(&rxq);
204 mcl = rx_mcl;
205 mmu = rx_mmu;
206 while ( (skb = skb_dequeue(&rx_queue)) != NULL )
207 {
208 netif = netdev_priv(skb->dev);
209 vdata = (unsigned long)skb->data;
210 mdata = virt_to_machine(vdata);
212 /* Memory squeeze? Back off for an arbitrary while. */
213 if ( (new_mfn = alloc_mfn()) == 0 )
214 {
215 if ( net_ratelimit() )
216 printk(KERN_WARNING "Memory squeeze in netback driver.\n");
217 mod_timer(&net_timer, jiffies + HZ);
218 skb_queue_head(&rx_queue, skb);
219 break;
220 }
222 /*
223 * Set the new P2M table entry before reassigning the old data page.
224 * Heed the comment in pgtable-2level.h:pte_page(). :-)
225 */
226 phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
228 mmu[0].ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
229 mmu[0].val = __pa(vdata) >> PAGE_SHIFT;
230 mmu[1].ptr = MMU_EXTENDED_COMMAND;
231 mmu[1].val = MMUEXT_SET_FOREIGNDOM;
232 mmu[1].val |= (unsigned long)netif->domid << 16;
233 mmu[2].ptr = (mdata & PAGE_MASK) | MMU_EXTENDED_COMMAND;
234 mmu[2].val = MMUEXT_REASSIGN_PAGE;
236 mcl[0].op = __HYPERVISOR_update_va_mapping;
237 mcl[0].args[0] = vdata;
238 mcl[0].args[1] = (new_mfn << PAGE_SHIFT) | __PAGE_KERNEL;
239 mcl[0].args[2] = 0;
240 mcl[1].op = __HYPERVISOR_mmu_update;
241 mcl[1].args[0] = (unsigned long)mmu;
242 mcl[1].args[1] = 3;
243 mcl[1].args[2] = 0;
245 mcl += 2;
246 mmu += 3;
248 __skb_queue_tail(&rxq, skb);
250 /* Filled the batch queue? */
251 if ( (mcl - rx_mcl) == ARRAY_SIZE(rx_mcl) )
252 break;
253 }
255 if ( mcl == rx_mcl )
256 return;
258 mcl[-2].args[2] = UVMF_FLUSH_TLB;
259 if ( unlikely(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0) )
260 BUG();
262 mcl = rx_mcl;
263 mmu = rx_mmu;
264 while ( (skb = __skb_dequeue(&rxq)) != NULL )
265 {
266 netif = netdev_priv(skb->dev);
267 size = skb->tail - skb->data;
269 /* Rederive the machine addresses. */
270 new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
271 mdata = ((mmu[2].ptr & PAGE_MASK) |
272 ((unsigned long)skb->data & ~PAGE_MASK));
274 atomic_set(&(skb_shinfo(skb)->dataref), 1);
275 skb_shinfo(skb)->nr_frags = 0;
276 skb_shinfo(skb)->frag_list = NULL;
278 netif->stats.tx_bytes += size;
279 netif->stats.tx_packets++;
281 /* The update_va_mapping() must not fail. */
282 if ( unlikely(mcl[0].args[5] != 0) )
283 BUG();
285 /* Check the reassignment error code. */
286 status = NETIF_RSP_OKAY;
287 if ( unlikely(mcl[1].args[5] != 0) )
288 {
289 DPRINTK("Failed MMU update transferring to DOM%u\n", netif->domid);
290 free_mfn(mdata >> PAGE_SHIFT);
291 status = NETIF_RSP_ERROR;
292 }
294 evtchn = netif->evtchn;
295 id = netif->rx->ring[MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
296 if ( make_rx_response(netif, id, status, mdata, size) &&
297 (rx_notify[evtchn] == 0) )
298 {
299 rx_notify[evtchn] = 1;
300 notify_list[notify_nr++] = evtchn;
301 }
303 netif_put(netif);
304 dev_kfree_skb(skb);
306 mcl += 2;
307 mmu += 3;
308 }
310 while ( notify_nr != 0 )
311 {
312 evtchn = notify_list[--notify_nr];
313 rx_notify[evtchn] = 0;
314 notify_via_evtchn(evtchn);
315 }
317 /* More work to do? */
318 if ( !skb_queue_empty(&rx_queue) && !timer_pending(&net_timer) )
319 tasklet_schedule(&net_rx_tasklet);
320 #if 0
321 else
322 xen_network_done_notify();
323 #endif
324 }
326 static void net_alarm(unsigned long unused)
327 {
328 tasklet_schedule(&net_rx_tasklet);
329 }
331 struct net_device_stats *netif_be_get_stats(struct net_device *dev)
332 {
333 netif_t *netif = netdev_priv(dev);
334 return &netif->stats;
335 }
337 static int __on_net_schedule_list(netif_t *netif)
338 {
339 return netif->list.next != NULL;
340 }
342 static void remove_from_net_schedule_list(netif_t *netif)
343 {
344 spin_lock_irq(&net_schedule_list_lock);
345 if ( likely(__on_net_schedule_list(netif)) )
346 {
347 list_del(&netif->list);
348 netif->list.next = NULL;
349 netif_put(netif);
350 }
351 spin_unlock_irq(&net_schedule_list_lock);
352 }
354 static void add_to_net_schedule_list_tail(netif_t *netif)
355 {
356 if ( __on_net_schedule_list(netif) )
357 return;
359 spin_lock_irq(&net_schedule_list_lock);
360 if ( !__on_net_schedule_list(netif) && netif->active )
361 {
362 list_add_tail(&netif->list, &net_schedule_list);
363 netif_get(netif);
364 }
365 spin_unlock_irq(&net_schedule_list_lock);
366 }
368 void netif_schedule_work(netif_t *netif)
369 {
370 if ( (netif->tx_req_cons != netif->tx->req_prod) &&
371 ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
372 {
373 add_to_net_schedule_list_tail(netif);
374 maybe_schedule_tx_action();
375 }
376 }
378 void netif_deschedule_work(netif_t *netif)
379 {
380 remove_from_net_schedule_list(netif);
381 }
383 #if 0
384 static void tx_credit_callback(unsigned long data)
385 {
386 netif_t *netif = (netif_t *)data;
387 netif->remaining_credit = netif->credit_bytes;
388 netif_schedule_work(netif);
389 }
390 #endif
392 static void net_tx_action(unsigned long unused)
393 {
394 struct list_head *ent;
395 struct sk_buff *skb;
396 netif_t *netif;
397 netif_tx_request_t txreq;
398 u16 pending_idx;
399 NETIF_RING_IDX i;
400 multicall_entry_t *mcl;
401 PEND_RING_IDX dc, dp;
402 unsigned int data_len;
404 if ( (dc = dealloc_cons) == (dp = dealloc_prod) )
405 goto skip_dealloc;
407 mcl = tx_mcl;
408 while ( dc != dp )
409 {
410 pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
411 mcl[0].op = __HYPERVISOR_update_va_mapping;
412 mcl[0].args[0] = MMAP_VADDR(pending_idx);
413 mcl[0].args[1] = 0;
414 mcl[0].args[2] = 0;
415 mcl++;
416 }
418 mcl[-1].args[2] = UVMF_FLUSH_TLB;
419 if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
420 BUG();
422 mcl = tx_mcl;
423 while ( dealloc_cons != dp )
424 {
425 /* The update_va_mapping() must not fail. */
426 if ( unlikely(mcl[0].args[5] != 0) )
427 BUG();
429 pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
431 netif = pending_tx_info[pending_idx].netif;
433 make_tx_response(netif, pending_tx_info[pending_idx].req.id,
434 NETIF_RSP_OKAY);
436 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
438 /*
439 * Scheduling checks must happen after the above response is posted.
440 * This avoids a possible race with a guest OS on another CPU if that
441 * guest is testing against 'resp_prod' when deciding whether to notify
442 * us when it queues additional packets.
443 */
444 mb();
445 if ( (netif->tx_req_cons != netif->tx->req_prod) &&
446 ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
447 add_to_net_schedule_list_tail(netif);
449 netif_put(netif);
451 mcl++;
452 }
454 skip_dealloc:
455 mcl = tx_mcl;
456 while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
457 !list_empty(&net_schedule_list) )
458 {
459 /* Get a netif from the list with work to do. */
460 ent = net_schedule_list.next;
461 netif = list_entry(ent, netif_t, list);
462 netif_get(netif);
463 remove_from_net_schedule_list(netif);
465 /* Work to do? */
466 i = netif->tx_req_cons;
467 if ( (i == netif->tx->req_prod) ||
468 ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE) )
469 {
470 netif_put(netif);
471 continue;
472 }
474 netif->tx->req_cons = ++netif->tx_req_cons;
476 /*
477 * 1. Ensure that we see the request when we copy it.
478 * 2. Ensure that frontend sees updated req_cons before we check
479 * for more work to schedule.
480 */
481 mb();
483 memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req,
484 sizeof(txreq));
486 #if 0
487 /* Credit-based scheduling. */
488 if ( tx.size > netif->remaining_credit )
489 {
490 s_time_t now = NOW(), next_credit =
491 netif->credit_timeout.expires + MICROSECS(netif->credit_usec);
492 if ( next_credit <= now )
493 {
494 netif->credit_timeout.expires = now;
495 netif->remaining_credit = netif->credit_bytes;
496 }
497 else
498 {
499 netif->remaining_credit = 0;
500 netif->credit_timeout.expires = next_credit;
501 netif->credit_timeout.data = (unsigned long)netif;
502 netif->credit_timeout.function = tx_credit_callback;
503 netif->credit_timeout.cpu = smp_processor_id();
504 add_ac_timer(&netif->credit_timeout);
505 break;
506 }
507 }
508 netif->remaining_credit -= tx.size;
509 #endif
511 netif_schedule_work(netif);
513 if ( unlikely(txreq.size < ETH_HLEN) ||
514 unlikely(txreq.size > ETH_FRAME_LEN) )
515 {
516 DPRINTK("Bad packet size: %d\n", txreq.size);
517 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
518 netif_put(netif);
519 continue;
520 }
522 /* No crossing a page boundary as the payload mustn't fragment. */
523 if ( unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >= PAGE_SIZE) )
524 {
525 DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n",
526 txreq.addr, txreq.size,
527 (txreq.addr &~PAGE_MASK) + txreq.size);
528 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
529 netif_put(netif);
530 continue;
531 }
533 pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
535 data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
537 if ( unlikely((skb = alloc_skb(data_len+16, GFP_ATOMIC)) == NULL) )
538 {
539 DPRINTK("Can't allocate a skb in start_xmit.\n");
540 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
541 netif_put(netif);
542 break;
543 }
545 /* Packets passed to netif_rx() must have some headroom. */
546 skb_reserve(skb, 16);
548 mcl[0].op = __HYPERVISOR_update_va_mapping_otherdomain;
549 mcl[0].args[0] = MMAP_VADDR(pending_idx);
550 mcl[0].args[1] = (txreq.addr & PAGE_MASK) | __PAGE_KERNEL;
551 mcl[0].args[2] = 0;
552 mcl[0].args[3] = netif->domid;
553 mcl++;
555 memcpy(&pending_tx_info[pending_idx].req, &txreq, sizeof(txreq));
556 pending_tx_info[pending_idx].netif = netif;
557 *((u16 *)skb->data) = pending_idx;
559 __skb_queue_tail(&tx_queue, skb);
561 pending_cons++;
563 /* Filled the batch queue? */
564 if ( (mcl - tx_mcl) == ARRAY_SIZE(tx_mcl) )
565 break;
566 }
568 if ( mcl == tx_mcl )
569 return;
571 if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
572 BUG();
574 mcl = tx_mcl;
575 while ( (skb = __skb_dequeue(&tx_queue)) != NULL )
576 {
577 pending_idx = *((u16 *)skb->data);
578 netif = pending_tx_info[pending_idx].netif;
579 memcpy(&txreq, &pending_tx_info[pending_idx].req, sizeof(txreq));
581 /* Check the remap error code. */
582 if ( unlikely(mcl[0].args[5] != 0) )
583 {
584 DPRINTK("Bad page frame\n");
585 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
586 netif_put(netif);
587 kfree_skb(skb);
588 mcl++;
589 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
590 continue;
591 }
593 phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
594 FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
596 data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
598 __skb_put(skb, data_len);
599 memcpy(skb->data,
600 (void *)(MMAP_VADDR(pending_idx)|(txreq.addr&~PAGE_MASK)),
601 data_len);
603 if ( data_len < txreq.size )
604 {
605 /* Append the packet payload as a fragment. */
606 skb_shinfo(skb)->frags[0].page =
607 virt_to_page(MMAP_VADDR(pending_idx));
608 skb_shinfo(skb)->frags[0].size = txreq.size - data_len;
609 skb_shinfo(skb)->frags[0].page_offset =
610 (txreq.addr + data_len) & ~PAGE_MASK;
611 skb_shinfo(skb)->nr_frags = 1;
612 }
613 else
614 {
615 /* Schedule a response immediately. */
616 netif_idx_release(pending_idx);
617 }
619 skb->data_len = txreq.size - data_len;
620 skb->len += skb->data_len;
622 skb->dev = netif->dev;
623 skb->protocol = eth_type_trans(skb, skb->dev);
625 netif->stats.rx_bytes += txreq.size;
626 netif->stats.rx_packets++;
628 netif_rx(skb);
629 netif->dev->last_rx = jiffies;
631 mcl++;
632 }
633 }
635 static void netif_idx_release(u16 pending_idx)
636 {
637 static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
638 unsigned long flags;
640 spin_lock_irqsave(&_lock, flags);
641 dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
642 spin_unlock_irqrestore(&_lock, flags);
644 tasklet_schedule(&net_tx_tasklet);
645 }
647 static void netif_page_release(struct page *page)
648 {
649 u16 pending_idx = page - virt_to_page(mmap_vstart);
651 /* Ready for next use. */
652 set_page_count(page, 1);
654 netif_idx_release(pending_idx);
655 }
657 irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
658 {
659 netif_t *netif = dev_id;
660 if ( tx_work_exists(netif) )
661 {
662 add_to_net_schedule_list_tail(netif);
663 maybe_schedule_tx_action();
664 }
665 return IRQ_HANDLED;
666 }
668 static void make_tx_response(netif_t *netif,
669 u16 id,
670 s8 st)
671 {
672 NETIF_RING_IDX i = netif->tx_resp_prod;
673 netif_tx_response_t *resp;
675 resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
676 resp->id = id;
677 resp->status = st;
678 wmb();
679 netif->tx->resp_prod = netif->tx_resp_prod = ++i;
681 mb(); /* Update producer before checking event threshold. */
682 if ( i == netif->tx->event )
683 notify_via_evtchn(netif->evtchn);
684 }
686 static int make_rx_response(netif_t *netif,
687 u16 id,
688 s8 st,
689 memory_t addr,
690 u16 size)
691 {
692 NETIF_RING_IDX i = netif->rx_resp_prod;
693 netif_rx_response_t *resp;
695 resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
696 resp->addr = addr;
697 resp->id = id;
698 resp->status = (s16)size;
699 if ( st < 0 )
700 resp->status = (s16)st;
701 wmb();
702 netif->rx->resp_prod = netif->rx_resp_prod = ++i;
704 mb(); /* Update producer before checking event threshold. */
705 return (i == netif->rx->event);
706 }
708 static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
709 {
710 struct list_head *ent;
711 netif_t *netif;
712 int i = 0;
714 printk(KERN_ALERT "netif_schedule_list:\n");
715 spin_lock_irq(&net_schedule_list_lock);
717 list_for_each ( ent, &net_schedule_list )
718 {
719 netif = list_entry(ent, netif_t, list);
720 printk(KERN_ALERT " %d: private(rx_req_cons=%08x rx_resp_prod=%08x\n",
721 i, netif->rx_req_cons, netif->rx_resp_prod);
722 printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
723 netif->tx_req_cons, netif->tx_resp_prod);
724 printk(KERN_ALERT " shared(rx_req_prod=%08x rx_resp_prod=%08x\n",
725 netif->rx->req_prod, netif->rx->resp_prod);
726 printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
727 netif->rx->event, netif->tx->req_prod);
728 printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
729 netif->tx->resp_prod, netif->tx->event);
730 i++;
731 }
733 spin_unlock_irq(&net_schedule_list_lock);
734 printk(KERN_ALERT " ** End of netif_schedule_list **\n");
736 return IRQ_HANDLED;
737 }
739 static int __init netback_init(void)
740 {
741 int i;
742 struct page *page;
744 if ( !(xen_start_info.flags & SIF_NET_BE_DOMAIN) &&
745 !(xen_start_info.flags & SIF_INITDOMAIN) )
746 return 0;
748 printk("Initialising Xen netif backend\n");
750 /* We can increase reservation by this much in net_rx_action(). */
751 balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
753 skb_queue_head_init(&rx_queue);
754 skb_queue_head_init(&tx_queue);
756 init_timer(&net_timer);
757 net_timer.data = 0;
758 net_timer.function = net_alarm;
760 netif_interface_init();
762 if ( (mmap_vstart = allocate_empty_lowmem_region(MAX_PENDING_REQS)) == 0 )
763 BUG();
765 for ( i = 0; i < MAX_PENDING_REQS; i++ )
766 {
767 page = virt_to_page(MMAP_VADDR(i));
768 set_page_count(page, 1);
769 SetPageForeign(page, netif_page_release);
770 }
772 pending_cons = 0;
773 pending_prod = MAX_PENDING_REQS;
774 for ( i = 0; i < MAX_PENDING_REQS; i++ )
775 pending_ring[i] = i;
777 spin_lock_init(&net_schedule_list_lock);
778 INIT_LIST_HEAD(&net_schedule_list);
780 netif_ctrlif_init();
782 (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
783 netif_be_dbg, SA_SHIRQ,
784 "net-be-dbg", &netif_be_dbg);
786 return 0;
787 }
789 static void netback_cleanup(void)
790 {
791 BUG();
792 }
794 module_init(netback_init);
795 module_exit(netback_cleanup);