ia64/xen-unstable

view linux-2.4.26-xen-sparse/arch/xen/drivers/netif/backend/main.c @ 1615:f9bbf7aa1596

bitkeeper revision 1.1026.2.1 (40e1764d1ndRTs9hmUyiBLEHi5_V3A)

Fix network backend bugs. It isn't safe to use skb->cb[] for our own
purposes after all. :-(
author kaf24@scramble.cl.cam.ac.uk
date Tue Jun 29 14:01:49 2004 +0000 (2004-06-29)
parents 5bfc0d01717c
children 489b925b0e22
line source
1 /******************************************************************************
2 * arch/xen/drivers/netif/backend/main.c
3 *
4 * Back-end of the driver for virtual block devices. This portion of the
5 * driver exports a 'unified' block-device interface that can be accessed
6 * by any operating system that implements a compatible front end. A
7 * reference front-end implementation can be found in:
8 * arch/xen/drivers/netif/frontend
9 *
10 * Copyright (c) 2002-2004, K A Fraser
11 */
13 #include "common.h"
15 static void netif_page_release(struct page *page);
16 static void make_tx_response(netif_t *netif,
17 u16 id,
18 s8 st);
19 static int make_rx_response(netif_t *netif,
20 u16 id,
21 s8 st,
22 memory_t addr,
23 u16 size);
25 static void net_tx_action(unsigned long unused);
26 static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
28 static void net_rx_action(unsigned long unused);
29 static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
31 static struct sk_buff_head rx_queue;
32 static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE*2];
33 static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE*3];
34 static unsigned char rx_notify[NR_EVENT_CHANNELS];
36 /* Don't currently gate addition of an interface to the tx scheduling list. */
37 #define tx_work_exists(_if) (1)
39 #define MAX_PENDING_REQS 256
40 static unsigned long mmap_vstart;
41 #define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE))
43 #define PKT_PROT_LEN (ETH_HLEN + 20)
45 static struct {
46 netif_tx_request_t req;
47 netif_t *netif;
48 } pending_tx_info[MAX_PENDING_REQS];
49 static u16 pending_ring[MAX_PENDING_REQS];
50 typedef unsigned int PEND_RING_IDX;
51 #define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
52 static PEND_RING_IDX pending_prod, pending_cons;
53 #define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
55 /* Freed TX SKBs get batched on this ring before return to pending_ring. */
56 static u16 dealloc_ring[MAX_PENDING_REQS];
57 static spinlock_t dealloc_lock = SPIN_LOCK_UNLOCKED;
58 static PEND_RING_IDX dealloc_prod, dealloc_cons;
60 static struct sk_buff_head tx_queue;
61 static multicall_entry_t tx_mcl[MAX_PENDING_REQS];
63 static struct list_head net_schedule_list;
64 static spinlock_t net_schedule_list_lock;
66 #define MAX_MFN_ALLOC 64
67 static unsigned long mfn_list[MAX_MFN_ALLOC];
68 static unsigned int alloc_index = 0;
69 static spinlock_t mfn_lock = SPIN_LOCK_UNLOCKED;
71 static void __refresh_mfn_list(void)
72 {
73 int ret = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
74 mfn_list, MAX_MFN_ALLOC);
75 if ( unlikely(ret != MAX_MFN_ALLOC) )
76 {
77 printk(KERN_ALERT "Unable to increase memory reservation (%d)\n", ret);
78 BUG();
79 }
80 alloc_index = MAX_MFN_ALLOC;
81 }
83 static unsigned long get_new_mfn(void)
84 {
85 unsigned long mfn, flags;
86 spin_lock_irqsave(&mfn_lock, flags);
87 if ( alloc_index == 0 )
88 __refresh_mfn_list();
89 mfn = mfn_list[--alloc_index];
90 spin_unlock_irqrestore(&mfn_lock, flags);
91 return mfn;
92 }
94 static void dealloc_mfn(unsigned long mfn)
95 {
96 unsigned long flags;
97 spin_lock_irqsave(&mfn_lock, flags);
98 if ( alloc_index != MAX_MFN_ALLOC )
99 mfn_list[alloc_index++] = mfn;
100 else
101 (void)HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, &mfn, 1);
102 spin_unlock_irqrestore(&mfn_lock, flags);
103 }
105 static inline void maybe_schedule_tx_action(void)
106 {
107 smp_mb();
108 if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
109 !list_empty(&net_schedule_list) )
110 tasklet_schedule(&net_tx_tasklet);
111 }
113 /*
114 * This is the primary RECEIVE function for a network interface.
115 * Note that, from the p.o.v. of /this/ OS it looks like a transmit.
116 */
117 int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
118 {
119 netif_t *netif = (netif_t *)dev->priv;
121 ASSERT(skb->dev == dev);
123 /* Drop the packet if the target domain has no receive buffers. */
124 if ( (netif->rx_req_cons == netif->rx->req_prod) ||
125 ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE) )
126 goto drop;
128 /*
129 * We do not copy the packet unless:
130 * 1. The data is shared; or
131 * 2. It spans a page boundary; or
132 * 3. We cannot be sure the whole data page is allocated.
133 * The copying method is taken from skb_copy().
134 * NB. We also couldn't cope with fragmented packets, but we won't get
135 * any because we not advertise the NETIF_F_SG feature.
136 */
137 if ( skb_shared(skb) || skb_cloned(skb) ||
138 (((unsigned long)skb->end ^ (unsigned long)skb->head) & PAGE_MASK) ||
139 ((skb->end - skb->head) < (PAGE_SIZE/2)) )
140 {
141 struct sk_buff *nskb = alloc_skb(PAGE_SIZE-1024, GFP_ATOMIC);
142 int hlen = skb->data - skb->head;
143 if ( unlikely(nskb == NULL) )
144 goto drop;
145 skb_reserve(nskb, hlen);
146 __skb_put(nskb, skb->len);
147 (void)skb_copy_bits(skb, -hlen, nskb->head, hlen + skb->len);
148 nskb->dev = skb->dev;
149 dev_kfree_skb(skb);
150 skb = nskb;
151 }
153 netif->rx_req_cons++;
155 skb_queue_tail(&rx_queue, skb);
156 tasklet_schedule(&net_rx_tasklet);
158 return 0;
160 drop:
161 netif->stats.rx_dropped++;
162 dev_kfree_skb(skb);
163 return 0;
164 }
166 #if 0
167 static void xen_network_done_notify(void)
168 {
169 static struct net_device *eth0_dev = NULL;
170 if ( unlikely(eth0_dev == NULL) )
171 eth0_dev = __dev_get_by_name("eth0");
172 netif_rx_schedule(eth0_dev);
173 }
174 /*
175 * Add following to poll() function in NAPI driver (Tigon3 is example):
176 * if ( xen_network_done() )
177 * tg3_enable_ints(tp);
178 */
179 int xen_network_done(void)
180 {
181 return skb_queue_empty(&rx_queue);
182 }
183 #endif
185 static void net_rx_action(unsigned long unused)
186 {
187 netif_t *netif;
188 s8 status;
189 u16 size, id, evtchn;
190 mmu_update_t *mmu;
191 multicall_entry_t *mcl;
192 unsigned long vdata, mdata, new_mfn;
193 struct sk_buff_head rxq;
194 struct sk_buff *skb;
195 u16 notify_list[NETIF_RX_RING_SIZE];
196 int notify_nr = 0;
198 skb_queue_head_init(&rxq);
200 mcl = rx_mcl;
201 mmu = rx_mmu;
202 while ( (skb = skb_dequeue(&rx_queue)) != NULL )
203 {
204 netif = (netif_t *)skb->dev->priv;
205 vdata = (unsigned long)skb->data;
206 mdata = virt_to_machine(vdata);
207 new_mfn = get_new_mfn();
209 mmu[0].ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
210 mmu[0].val = __pa(vdata) >> PAGE_SHIFT;
211 mmu[1].val = (unsigned long)(netif->domid<<16) & ~0xFFFFUL;
212 mmu[1].ptr = (unsigned long)(netif->domid<< 0) & ~0xFFFFUL;
213 mmu[1].ptr |= MMU_EXTENDED_COMMAND;
214 mmu[1].val |= MMUEXT_SET_SUBJECTDOM;
215 mmu[2].ptr = (mdata & PAGE_MASK) | MMU_EXTENDED_COMMAND;
216 mmu[2].val = MMUEXT_REASSIGN_PAGE;
218 mcl[0].op = __HYPERVISOR_update_va_mapping;
219 mcl[0].args[0] = vdata >> PAGE_SHIFT;
220 mcl[0].args[1] = (new_mfn << PAGE_SHIFT) | __PAGE_KERNEL;
221 mcl[0].args[2] = 0;
222 mcl[1].op = __HYPERVISOR_mmu_update;
223 mcl[1].args[0] = (unsigned long)mmu;
224 mcl[1].args[1] = 3;
225 mcl[1].args[2] = 0;
227 mcl += 2;
228 mmu += 3;
230 __skb_queue_tail(&rxq, skb);
232 /* Filled the batch queue? */
233 if ( (mcl - rx_mcl) == ARRAY_SIZE(rx_mcl) )
234 break;
235 }
237 if ( mcl == rx_mcl )
238 return;
240 mcl[-2].args[2] = UVMF_FLUSH_TLB;
241 (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
243 mcl = rx_mcl;
244 mmu = rx_mmu;
245 while ( (skb = __skb_dequeue(&rxq)) != NULL )
246 {
247 netif = (netif_t *)skb->dev->priv;
248 size = skb->tail - skb->data;
250 /* Rederive the machine addresses. */
251 new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
252 mdata = ((mmu[2].ptr & PAGE_MASK) |
253 ((unsigned long)skb->data & ~PAGE_MASK));
255 /* Check the reassignment error code. */
256 if ( unlikely(mcl[1].args[5] != 0) )
257 {
258 DPRINTK("Failed MMU update transferring to DOM%u\n",
259 netif->domid);
260 (void)HYPERVISOR_update_va_mapping(
261 (unsigned long)skb->head >> PAGE_SHIFT,
262 (pte_t) { (mdata & PAGE_MASK) | __PAGE_KERNEL },
263 UVMF_INVLPG);
264 dealloc_mfn(new_mfn);
265 status = NETIF_RSP_ERROR;
266 }
267 else
268 {
269 phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
271 atomic_set(&(skb_shinfo(skb)->dataref), 1);
272 skb_shinfo(skb)->nr_frags = 0;
273 skb_shinfo(skb)->frag_list = NULL;
275 netif->stats.rx_bytes += size;
276 netif->stats.rx_packets++;
278 status = NETIF_RSP_OKAY;
279 }
281 evtchn = netif->evtchn;
282 id = netif->rx->ring[MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
283 if ( make_rx_response(netif, id, status, mdata, size) &&
284 (rx_notify[evtchn] == 0) )
285 {
286 rx_notify[evtchn] = 1;
287 notify_list[notify_nr++] = evtchn;
288 }
290 dev_kfree_skb(skb);
292 mcl += 2;
293 mmu += 3;
294 }
296 while ( notify_nr != 0 )
297 {
298 evtchn = notify_list[--notify_nr];
299 rx_notify[evtchn] = 0;
300 notify_via_evtchn(evtchn);
301 }
303 /* More work to do? */
304 if ( !skb_queue_empty(&rx_queue) )
305 tasklet_schedule(&net_rx_tasklet);
306 #if 0
307 else
308 xen_network_done_notify();
309 #endif
310 }
312 struct net_device_stats *netif_be_get_stats(struct net_device *dev)
313 {
314 netif_t *netif = dev->priv;
315 return &netif->stats;
316 }
318 static int __on_net_schedule_list(netif_t *netif)
319 {
320 return netif->list.next != NULL;
321 }
323 static void remove_from_net_schedule_list(netif_t *netif)
324 {
325 spin_lock_irq(&net_schedule_list_lock);
326 if ( likely(__on_net_schedule_list(netif)) )
327 {
328 list_del(&netif->list);
329 netif->list.next = NULL;
330 netif_put(netif);
331 }
332 spin_unlock_irq(&net_schedule_list_lock);
333 }
335 static void add_to_net_schedule_list_tail(netif_t *netif)
336 {
337 if ( __on_net_schedule_list(netif) )
338 return;
340 spin_lock_irq(&net_schedule_list_lock);
341 if ( !__on_net_schedule_list(netif) && (netif->status == CONNECTED) )
342 {
343 list_add_tail(&netif->list, &net_schedule_list);
344 netif_get(netif);
345 }
346 spin_unlock_irq(&net_schedule_list_lock);
347 }
349 static inline void netif_schedule_work(netif_t *netif)
350 {
351 if ( (netif->tx_req_cons != netif->tx->req_prod) &&
352 ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
353 {
354 add_to_net_schedule_list_tail(netif);
355 maybe_schedule_tx_action();
356 }
357 }
359 void netif_deschedule(netif_t *netif)
360 {
361 remove_from_net_schedule_list(netif);
362 }
364 #if 0
365 static void tx_credit_callback(unsigned long data)
366 {
367 netif_t *netif = (netif_t *)data;
368 netif->remaining_credit = netif->credit_bytes;
369 netif_schedule_work(netif);
370 }
371 #endif
373 static void net_tx_action(unsigned long unused)
374 {
375 struct list_head *ent;
376 struct sk_buff *skb;
377 netif_t *netif;
378 netif_tx_request_t txreq;
379 u16 pending_idx;
380 NETIF_RING_IDX i;
381 struct page *page;
382 multicall_entry_t *mcl;
384 if ( (i = dealloc_cons) == dealloc_prod )
385 goto skip_dealloc;
387 mcl = tx_mcl;
388 while ( i != dealloc_prod )
389 {
390 pending_idx = dealloc_ring[MASK_PEND_IDX(i++)];
391 mcl[0].op = __HYPERVISOR_update_va_mapping;
392 mcl[0].args[0] = MMAP_VADDR(pending_idx) >> PAGE_SHIFT;
393 mcl[0].args[1] = 0;
394 mcl[0].args[2] = 0;
395 mcl++;
396 }
398 mcl[-1].args[2] = UVMF_FLUSH_TLB;
399 (void)HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl);
401 while ( dealloc_cons != dealloc_prod )
402 {
403 pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
405 netif = pending_tx_info[pending_idx].netif;
407 spin_lock(&netif->tx_lock);
408 make_tx_response(netif, pending_tx_info[pending_idx].req.id,
409 NETIF_RSP_OKAY);
410 spin_unlock(&netif->tx_lock);
412 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
414 /*
415 * Scheduling checks must happen after the above response is posted.
416 * This avoids a possible race with a guest OS on another CPU.
417 */
418 mb();
419 if ( (netif->tx_req_cons != netif->tx->req_prod) &&
420 ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
421 add_to_net_schedule_list_tail(netif);
423 netif_put(netif);
424 }
426 skip_dealloc:
427 mcl = tx_mcl;
428 while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
429 !list_empty(&net_schedule_list) )
430 {
431 /* Get a netif from the list with work to do. */
432 ent = net_schedule_list.next;
433 netif = list_entry(ent, netif_t, list);
434 netif_get(netif);
435 remove_from_net_schedule_list(netif);
437 /* Work to do? */
438 i = netif->tx_req_cons;
439 if ( (i == netif->tx->req_prod) ||
440 ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE) )
441 {
442 netif_put(netif);
443 continue;
444 }
445 memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req,
446 sizeof(txreq));
447 netif->tx_req_cons++;
449 #if 0
450 /* Credit-based scheduling. */
451 if ( tx.size > netif->remaining_credit )
452 {
453 s_time_t now = NOW(), next_credit =
454 netif->credit_timeout.expires + MICROSECS(netif->credit_usec);
455 if ( next_credit <= now )
456 {
457 netif->credit_timeout.expires = now;
458 netif->remaining_credit = netif->credit_bytes;
459 }
460 else
461 {
462 netif->remaining_credit = 0;
463 netif->credit_timeout.expires = next_credit;
464 netif->credit_timeout.data = (unsigned long)netif;
465 netif->credit_timeout.function = tx_credit_callback;
466 netif->credit_timeout.cpu = smp_processor_id();
467 add_ac_timer(&netif->credit_timeout);
468 break;
469 }
470 }
471 netif->remaining_credit -= tx.size;
472 #endif
474 netif_schedule_work(netif);
476 if ( unlikely(txreq.size <= PKT_PROT_LEN) ||
477 unlikely(txreq.size > ETH_FRAME_LEN) )
478 {
479 DPRINTK("Bad packet size: %d\n", txreq.size);
480 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
481 netif_put(netif);
482 continue;
483 }
485 /* No crossing a page boundary as the payload mustn't fragment. */
486 if ( unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >= PAGE_SIZE) )
487 {
488 DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n",
489 txreq.addr, txreq.size,
490 (txreq.addr &~PAGE_MASK) + txreq.size);
491 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
492 netif_put(netif);
493 continue;
494 }
496 pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
498 if ( unlikely((skb = alloc_skb(PKT_PROT_LEN, GFP_ATOMIC)) == NULL) )
499 {
500 DPRINTK("Can't allocate a skb in start_xmit.\n");
501 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
502 netif_put(netif);
503 break;
504 }
506 mcl[0].op = __HYPERVISOR_update_va_mapping_otherdomain;
507 mcl[0].args[0] = MMAP_VADDR(pending_idx) >> PAGE_SHIFT;
508 mcl[0].args[1] = (txreq.addr & PAGE_MASK) | __PAGE_KERNEL;
509 mcl[0].args[2] = 0;
510 mcl[0].args[3] = netif->domid;
511 mcl++;
513 memcpy(&pending_tx_info[pending_idx].req, &txreq, sizeof(txreq));
514 pending_tx_info[pending_idx].netif = netif;
515 *((u16 *)skb->data) = pending_idx;
517 __skb_queue_tail(&tx_queue, skb);
519 pending_cons++;
521 /* Filled the batch queue? */
522 if ( (mcl - tx_mcl) == ARRAY_SIZE(tx_mcl) )
523 break;
524 }
526 if ( mcl == tx_mcl )
527 return;
529 (void)HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl);
531 mcl = tx_mcl;
532 while ( (skb = __skb_dequeue(&tx_queue)) != NULL )
533 {
534 pending_idx = *((u16 *)skb->data);
535 netif = pending_tx_info[pending_idx].netif;
536 memcpy(&txreq, &pending_tx_info[pending_idx].req, sizeof(txreq));
538 /* Check the remap error code. */
539 if ( unlikely(mcl[0].args[5] != 0) )
540 {
541 DPRINTK("Bad page frame\n");
542 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
543 netif_put(netif);
544 kfree_skb(skb);
545 mcl++;
546 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
547 continue;
548 }
550 phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
551 txreq.addr >> PAGE_SHIFT;
553 __skb_put(skb, PKT_PROT_LEN);
554 memcpy(skb->data,
555 (void *)(MMAP_VADDR(pending_idx)|(txreq.addr&~PAGE_MASK)),
556 PKT_PROT_LEN);
558 page = virt_to_page(MMAP_VADDR(pending_idx));
560 /* Append the packet payload as a fragment. */
561 skb_shinfo(skb)->frags[0].page = page;
562 skb_shinfo(skb)->frags[0].size = txreq.size - PKT_PROT_LEN;
563 skb_shinfo(skb)->frags[0].page_offset =
564 (txreq.addr + PKT_PROT_LEN) & ~PAGE_MASK;
565 skb_shinfo(skb)->nr_frags = 1;
566 skb->data_len = txreq.size - PKT_PROT_LEN;
567 skb->len += skb->data_len;
569 skb->dev = netif->dev;
570 skb->protocol = eth_type_trans(skb, skb->dev);
572 /*
573 * Destructor information. We hideously abuse the 'mapping' pointer,
574 * which isn't otherwise used by us. The page deallocator is modified
575 * to interpret a non-NULL value as a destructor function to be called.
576 * This works okay because in all other cases the pointer must be NULL
577 * when the page is freed (normally Linux will explicitly bug out if
578 * it sees otherwise.
579 */
580 page->mapping = (struct address_space *)netif_page_release;
581 atomic_set(&page->count, 1);
583 netif->stats.tx_bytes += txreq.size;
584 netif->stats.tx_packets++;
586 netif_rx(skb);
587 netif->dev->last_rx = jiffies;
589 mcl++;
590 }
591 }
593 static void netif_page_release(struct page *page)
594 {
595 unsigned long flags;
596 u16 pending_idx = page - virt_to_page(mmap_vstart);
598 /* Stop the abuse. */
599 page->mapping = NULL;
601 spin_lock_irqsave(&dealloc_lock, flags);
602 dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
603 spin_unlock_irqrestore(&dealloc_lock, flags);
605 tasklet_schedule(&net_tx_tasklet);
606 }
608 #if 0
609 long flush_bufs_for_netif(netif_t *netif)
610 {
611 NETIF_RING_IDX i;
613 /* Return any outstanding receive buffers to the guest OS. */
614 spin_lock(&netif->rx_lock);
615 for ( i = netif->rx_req_cons;
616 (i != netif->rx->req_prod) &&
617 ((i-netif->rx_resp_prod) != NETIF_RX_RING_SIZE);
618 i++ )
619 {
620 make_rx_response(netif,
621 netif->rx->ring[MASK_NETIF_RX_IDX(i)].req.id,
622 NETIF_RSP_DROPPED, 0, 0);
623 }
624 netif->rx_req_cons = i;
625 spin_unlock(&netif->rx_lock);
627 /*
628 * Flush pending transmit buffers. The guest may still have to wait for
629 * buffers that are queued at a physical NIC.
630 */
631 spin_lock(&netif->tx_lock);
632 for ( i = netif->tx_req_cons;
633 (i != netif->tx->req_prod) &&
634 ((i-netif->tx_resp_prod) != NETIF_TX_RING_SIZE);
635 i++ )
636 {
637 make_tx_response(netif,
638 netif->tx->ring[MASK_NETIF_TX_IDX(i)].req.id,
639 NETIF_RSP_DROPPED);
640 }
641 netif->tx_req_cons = i;
642 spin_unlock(&netif->tx_lock);
644 return 0;
645 }
646 #endif
648 void netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
649 {
650 netif_t *netif = dev_id;
651 if ( tx_work_exists(netif) )
652 {
653 add_to_net_schedule_list_tail(netif);
654 maybe_schedule_tx_action();
655 }
656 }
658 static void make_tx_response(netif_t *netif,
659 u16 id,
660 s8 st)
661 {
662 NETIF_RING_IDX i = netif->tx_resp_prod;
663 netif_tx_response_t *resp;
665 resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
666 resp->id = id;
667 resp->status = st;
668 wmb();
669 netif->tx->resp_prod = netif->tx_resp_prod = ++i;
671 mb(); /* Update producer before checking event threshold. */
672 if ( i == netif->tx->event )
673 notify_via_evtchn(netif->evtchn);
674 }
676 static int make_rx_response(netif_t *netif,
677 u16 id,
678 s8 st,
679 memory_t addr,
680 u16 size)
681 {
682 NETIF_RING_IDX i = netif->rx_resp_prod;
683 netif_rx_response_t *resp;
685 resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
686 resp->addr = addr;
687 resp->id = id;
688 resp->status = (s16)size;
689 if ( st < 0 )
690 resp->status = (s16)st;
691 wmb();
692 netif->rx->resp_prod = netif->rx_resp_prod = ++i;
694 mb(); /* Update producer before checking event threshold. */
695 return (i == netif->rx->event);
696 }
698 static void netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
699 {
700 struct list_head *ent;
701 netif_t *netif;
702 int i = 0;
704 printk(KERN_ALERT "netif_schedule_list:\n");
705 spin_lock_irq(&net_schedule_list_lock);
707 list_for_each ( ent, &net_schedule_list )
708 {
709 netif = list_entry(ent, netif_t, list);
710 printk(KERN_ALERT " %d: private(rx_req_cons=%08x rx_resp_prod=%08x\n",
711 i, netif->rx_req_cons, netif->rx_resp_prod);
712 printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
713 netif->tx_req_cons, netif->tx_resp_prod);
714 printk(KERN_ALERT " shared(rx_req_prod=%08x rx_resp_prod=%08x\n",
715 netif->rx->req_prod, netif->rx->resp_prod);
716 printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
717 netif->rx->event, netif->tx->req_prod);
718 printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
719 netif->tx->resp_prod, netif->tx->event);
720 i++;
721 }
723 spin_unlock_irq(&net_schedule_list_lock);
724 printk(KERN_ALERT " ** End of netif_schedule_list **\n");
725 }
727 static int __init init_module(void)
728 {
729 int i;
731 if ( !(start_info.flags & SIF_NET_BE_DOMAIN) &&
732 !(start_info.flags & SIF_INITDOMAIN) )
733 return 0;
735 printk("Initialising Xen netif backend\n");
737 skb_queue_head_init(&rx_queue);
738 skb_queue_head_init(&tx_queue);
740 netif_interface_init();
742 if ( (mmap_vstart = allocate_empty_lowmem_region(MAX_PENDING_REQS)) == 0 )
743 BUG();
745 pending_cons = 0;
746 pending_prod = MAX_PENDING_REQS;
747 for ( i = 0; i < MAX_PENDING_REQS; i++ )
748 pending_ring[i] = i;
750 spin_lock_init(&net_schedule_list_lock);
751 INIT_LIST_HEAD(&net_schedule_list);
753 netif_ctrlif_init();
755 (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
756 netif_be_dbg, SA_SHIRQ,
757 "net-be-dbg", &netif_be_dbg);
759 return 0;
760 }
762 static void cleanup_module(void)
763 {
764 BUG();
765 }
767 module_init(init_module);
768 module_exit(cleanup_module);