ia64/xen-unstable

view linux-2.4.26-xen-sparse/arch/xen/drivers/netif/frontend/main.c @ 1615:f9bbf7aa1596

bitkeeper revision 1.1026.2.1 (40e1764d1ndRTs9hmUyiBLEHi5_V3A)

Fix network backend bugs. It isn't safe to use skb->cb[] for our own
purposes after all. :-(
author kaf24@scramble.cl.cam.ac.uk
date Tue Jun 29 14:01:49 2004 +0000 (2004-06-29)
parents 44512070eb7b
children 489b925b0e22
line source
1 /******************************************************************************
2 * arch/xen/drivers/netif/frontend/main.c
3 *
4 * Virtual network driver for XenoLinux.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 */
9 #include <linux/config.h>
10 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
18 #include <linux/netdevice.h>
19 #include <linux/inetdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/skbuff.h>
22 #include <linux/init.h>
24 #include <asm/io.h>
25 #include <net/sock.h>
26 #include <net/pkt_sched.h>
28 #include <asm/evtchn.h>
29 #include <asm/ctrl_if.h>
31 #include <asm/page.h>
33 #include "../netif.h"
35 #define RX_BUF_SIZE ((PAGE_SIZE/2)+1) /* Fool the slab allocator :-) */
37 static void network_tx_buf_gc(struct net_device *dev);
38 static void network_alloc_rx_buffers(struct net_device *dev);
39 static void cleanup_module(void);
41 static unsigned long rx_pfn_array[NETIF_RX_RING_SIZE];
42 static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE+1];
43 static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
45 static struct list_head dev_list;
47 struct net_private
48 {
49 struct list_head list;
50 struct net_device *dev;
52 struct net_device_stats stats;
53 NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
54 unsigned int tx_full;
56 netif_tx_interface_t *tx;
57 netif_rx_interface_t *rx;
59 spinlock_t tx_lock;
60 spinlock_t rx_lock;
62 unsigned int handle;
63 unsigned int evtchn;
64 unsigned int irq;
66 #define NETIF_STATE_CLOSED 0
67 #define NETIF_STATE_DISCONNECTED 1
68 #define NETIF_STATE_CONNECTED 2
69 #define NETIF_STATE_ACTIVE 3
70 unsigned int state;
72 /*
73 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
74 * array is an index into a chain of free entries.
75 */
76 struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
77 struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
78 };
80 /* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
81 #define ADD_ID_TO_FREELIST(_list, _id) \
82 (_list)[(_id)] = (_list)[0]; \
83 (_list)[0] = (void *)(unsigned long)(_id);
84 #define GET_ID_FROM_FREELIST(_list) \
85 ({ unsigned long _id = (unsigned long)(_list)[0]; \
86 (_list)[0] = (_list)[_id]; \
87 (unsigned short)_id; })
89 static struct net_device *find_dev_by_handle(unsigned int handle)
90 {
91 struct list_head *ent;
92 struct net_private *np;
93 list_for_each ( ent, &dev_list )
94 {
95 np = list_entry(ent, struct net_private, list);
96 if ( np->handle == handle )
97 return np->dev;
98 }
99 return NULL;
100 }
102 /** Network interface info. */
103 struct netif_ctrl {
104 /** Number of interfaces. */
105 int interface_n;
106 /** Number of connected interfaces. */
107 int connected_n;
108 /** Error code. */
109 int err;
110 };
112 static struct netif_ctrl netctrl;
114 static void netctrl_init(void)
115 {
116 memset(&netctrl, 0, sizeof(netctrl));
117 netctrl.interface_n = -1;
118 }
120 /** Get or set a network interface error.
121 */
122 static int netctrl_err(int err)
123 {
124 if(err < 0 && !netctrl.err){
125 netctrl.err = err;
126 printk(KERN_WARNING "%s> err=%d\n", __FUNCTION__, err);
127 }
128 return netctrl.err;
129 }
131 /** Test if all network interfaces are connected.
132 *
133 * @return 1 if all connected, 0 if not, negative error code otherwise
134 */
135 static int netctrl_connected(void)
136 {
137 int ok = 0;
138 ok = (netctrl.err ? netctrl.err : (netctrl.connected_n == netctrl.interface_n));
139 return ok;
140 }
142 /** Count the connected network interfaces.
143 *
144 * @return connected count
145 */
146 static int netctrl_connected_count(void)
147 {
149 struct list_head *ent;
150 struct net_private *np;
151 unsigned int connected;
153 connected = 0;
155 list_for_each(ent, &dev_list) {
156 np = list_entry(ent, struct net_private, list);
157 if ( np->state == NETIF_STATE_CONNECTED){
158 connected++;
159 }
160 }
161 netctrl.connected_n = connected;
162 return connected;
163 }
165 static int network_open(struct net_device *dev)
166 {
167 struct net_private *np = dev->priv;
168 int i;
170 if ( np->state != NETIF_STATE_CONNECTED )
171 return -EINVAL;
173 np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
174 memset(&np->stats, 0, sizeof(np->stats));
175 spin_lock_init(&np->tx_lock);
176 spin_lock_init(&np->rx_lock);
178 /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
179 for ( i = 0; i <= NETIF_TX_RING_SIZE; i++ )
180 np->tx_skbs[i] = (void *)(i+1);
181 for ( i = 0; i <= NETIF_RX_RING_SIZE; i++ )
182 np->rx_skbs[i] = (void *)(i+1);
184 wmb();
185 np->state = NETIF_STATE_ACTIVE;
187 network_alloc_rx_buffers(dev);
188 np->rx->event = np->rx_resp_cons + 1;
190 netif_start_queue(dev);
192 MOD_INC_USE_COUNT;
194 return 0;
195 }
198 static void network_tx_buf_gc(struct net_device *dev)
199 {
200 NETIF_RING_IDX i, prod;
201 unsigned short id;
202 struct net_private *np = dev->priv;
203 struct sk_buff *skb;
205 do {
206 prod = np->tx->resp_prod;
208 for ( i = np->tx_resp_cons; i != prod; i++ )
209 {
210 id = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
211 skb = np->tx_skbs[id];
212 ADD_ID_TO_FREELIST(np->tx_skbs, id);
213 dev_kfree_skb_any(skb);
214 }
216 np->tx_resp_cons = prod;
218 /*
219 * Set a new event, then check for race with update of tx_cons. Note
220 * that it is essential to schedule a callback, no matter how few
221 * buffers are pending. Even if there is space in the transmit ring,
222 * higher layers may be blocked because too much data is outstanding:
223 * in such cases notification from Xen is likely to be the only kick
224 * that we'll get.
225 */
226 np->tx->event =
227 prod + ((np->tx->req_prod - prod) >> 1) + 1;
228 mb();
229 }
230 while ( prod != np->tx->resp_prod );
232 if ( np->tx_full &&
233 ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE) )
234 {
235 np->tx_full = 0;
236 if ( np->state == NETIF_STATE_ACTIVE )
237 netif_wake_queue(dev);
238 }
239 }
242 static void network_alloc_rx_buffers(struct net_device *dev)
243 {
244 unsigned short id;
245 struct net_private *np = dev->priv;
246 struct sk_buff *skb;
247 NETIF_RING_IDX i = np->rx->req_prod;
248 int nr_pfns = 0;
250 /* Make sure the batch is large enough to be worthwhile (1/2 ring). */
251 if ( unlikely((i - np->rx_resp_cons) > (NETIF_RX_RING_SIZE/2)) ||
252 unlikely(np->state != NETIF_STATE_ACTIVE) )
253 return;
255 do {
256 skb = dev_alloc_skb(RX_BUF_SIZE);
257 if ( unlikely(skb == NULL) )
258 break;
260 skb->dev = dev;
262 if ( unlikely(((unsigned long)skb->head & (PAGE_SIZE-1)) != 0) )
263 panic("alloc_skb needs to provide us page-aligned buffers.");
265 id = GET_ID_FROM_FREELIST(np->rx_skbs);
267 np->rx_skbs[id] = skb;
269 np->rx->ring[MASK_NETIF_RX_IDX(i)].req.id = id;
271 rx_pfn_array[nr_pfns] = virt_to_machine(skb->head) >> PAGE_SHIFT;
273 rx_mcl[nr_pfns].op = __HYPERVISOR_update_va_mapping;
274 rx_mcl[nr_pfns].args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
275 rx_mcl[nr_pfns].args[1] = 0;
276 rx_mcl[nr_pfns].args[2] = 0;
278 nr_pfns++;
279 }
280 while ( (++i - np->rx_resp_cons) != NETIF_RX_RING_SIZE );
282 /*
283 * We may have allocated buffers which have entries outstanding in the page
284 * update queue -- make sure we flush those first!
285 */
286 flush_page_update_queue();
288 /* After all PTEs have been zapped we blow away stale TLB entries. */
289 rx_mcl[nr_pfns-1].args[2] = UVMF_FLUSH_TLB;
291 /* Give away a batch of pages. */
292 rx_mcl[nr_pfns].op = __HYPERVISOR_dom_mem_op;
293 rx_mcl[nr_pfns].args[0] = MEMOP_decrease_reservation;
294 rx_mcl[nr_pfns].args[1] = (unsigned long)rx_pfn_array;
295 rx_mcl[nr_pfns].args[2] = (unsigned long)nr_pfns;
297 /* Zap PTEs and give away pages in one big multicall. */
298 (void)HYPERVISOR_multicall(rx_mcl, nr_pfns+1);
300 /* Check return status of HYPERVISOR_dom_mem_op(). */
301 if ( rx_mcl[nr_pfns].args[5] != nr_pfns )
302 panic("Unable to reduce memory reservation\n");
304 np->rx->req_prod = i;
305 }
308 static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
309 {
310 unsigned short id;
311 struct net_private *np = (struct net_private *)dev->priv;
312 netif_tx_request_t *tx;
313 NETIF_RING_IDX i;
315 if ( unlikely(np->tx_full) )
316 {
317 printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name);
318 netif_stop_queue(dev);
319 return -ENOBUFS;
320 }
322 if ( unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
323 PAGE_SIZE) )
324 {
325 struct sk_buff *new_skb = dev_alloc_skb(RX_BUF_SIZE);
326 if ( unlikely(new_skb == NULL) )
327 return 1;
328 skb_put(new_skb, skb->len);
329 memcpy(new_skb->data, skb->data, skb->len);
330 dev_kfree_skb(skb);
331 skb = new_skb;
332 }
334 spin_lock_irq(&np->tx_lock);
336 /* if the backend isn't available then don't do anything! */
337 if ( !netif_carrier_ok(dev) )
338 {
339 spin_unlock_irq(&np->tx_lock);
340 return 1;
341 }
343 i = np->tx->req_prod;
345 id = GET_ID_FROM_FREELIST(np->tx_skbs);
346 np->tx_skbs[id] = skb;
348 tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
350 tx->id = id;
351 tx->addr = virt_to_machine(skb->data);
352 tx->size = skb->len;
354 wmb();
355 np->tx->req_prod = i + 1;
357 network_tx_buf_gc(dev);
359 if ( (i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1) )
360 {
361 np->tx_full = 1;
362 netif_stop_queue(dev);
363 }
365 spin_unlock_irq(&np->tx_lock);
367 np->stats.tx_bytes += skb->len;
368 np->stats.tx_packets++;
370 /* Only notify Xen if there are no outstanding responses. */
371 mb();
372 if ( np->tx->resp_prod == i )
373 notify_via_evtchn(np->evtchn);
375 return 0;
376 }
379 static void netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
380 {
381 struct net_device *dev = dev_id;
382 struct net_private *np = dev->priv;
383 unsigned long flags;
385 spin_lock_irqsave(&np->tx_lock, flags);
386 if ( likely(netif_carrier_ok(dev)) )
387 network_tx_buf_gc(dev);
388 spin_unlock_irqrestore(&np->tx_lock, flags);
390 if ( np->rx_resp_cons != np->rx->resp_prod )
391 netif_rx_schedule(dev);
392 }
395 static int netif_poll(struct net_device *dev, int *pbudget)
396 {
397 struct net_private *np = dev->priv;
398 struct sk_buff *skb;
399 netif_rx_response_t *rx;
400 NETIF_RING_IDX i;
401 mmu_update_t *mmu = rx_mmu;
402 multicall_entry_t *mcl = rx_mcl;
403 int work_done, budget, more_to_do = 1;
404 struct sk_buff_head rxq;
405 unsigned long flags;
407 spin_lock(&np->rx_lock);
409 /* If the device is undergoing recovery then don't do anything. */
410 if ( !netif_carrier_ok(dev) )
411 {
412 spin_unlock(&np->rx_lock);
413 return 0;
414 }
416 skb_queue_head_init(&rxq);
418 if ( (budget = *pbudget) > dev->quota )
419 budget = dev->quota;
421 for ( i = np->rx_resp_cons, work_done = 0;
422 (i != np->rx->resp_prod) && (work_done < budget);
423 i++, work_done++ )
424 {
425 rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
427 skb = np->rx_skbs[rx->id];
428 ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
430 if ( unlikely(rx->status <= 0) )
431 {
432 /* Gate this error. We get a (valid) slew of them on suspend. */
433 if ( np->state == NETIF_STATE_ACTIVE )
434 printk(KERN_ALERT "bad buffer on RX ring!(%d)\n", rx->status);
435 dev_kfree_skb(skb);
436 continue;
437 }
439 skb->data = skb->tail = skb->head + (rx->addr & ~PAGE_MASK);
440 skb_put(skb, rx->status);
442 np->stats.rx_packets++;
443 np->stats.rx_bytes += rx->status;
445 /* Remap the page. */
446 mmu->ptr = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
447 mmu->val = __pa(skb->head) >> PAGE_SHIFT;
448 mmu++;
449 mcl->op = __HYPERVISOR_update_va_mapping;
450 mcl->args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
451 mcl->args[1] = (rx->addr & PAGE_MASK) | __PAGE_KERNEL;
452 mcl->args[2] = 0;
453 mcl++;
455 phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] =
456 rx->addr >> PAGE_SHIFT;
458 __skb_queue_tail(&rxq, skb);
459 }
461 /* Do all the remapping work, and M->P updates, in one big hypercall. */
462 if ( likely((mcl - rx_mcl) != 0) )
463 {
464 mcl->op = __HYPERVISOR_mmu_update;
465 mcl->args[0] = (unsigned long)rx_mmu;
466 mcl->args[1] = mmu - rx_mmu;
467 mcl->args[2] = 0;
468 mcl++;
469 (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
470 }
472 while ( (skb = __skb_dequeue(&rxq)) != NULL )
473 {
474 /* Set the shared-info area, which is hidden behind the real data. */
475 atomic_set(&(skb_shinfo(skb)->dataref), 1);
476 skb_shinfo(skb)->nr_frags = 0;
477 skb_shinfo(skb)->frag_list = NULL;
479 /* Ethernet-specific work. Delayed to here as it peeks the header. */
480 skb->protocol = eth_type_trans(skb, dev);
482 /* Pass it up. */
483 netif_rx(skb);
484 dev->last_rx = jiffies;
485 }
487 np->rx_resp_cons = i;
489 network_alloc_rx_buffers(dev);
491 *pbudget -= work_done;
492 dev->quota -= work_done;
494 if ( work_done < budget )
495 {
496 local_irq_save(flags);
498 np->rx->event = i + 1;
500 /* Deal with hypervisor racing our resetting of rx_event. */
501 mb();
502 if ( np->rx->resp_prod == i )
503 {
504 __netif_rx_complete(dev);
505 more_to_do = 0;
506 }
508 local_irq_restore(flags);
509 }
511 spin_unlock(&np->rx_lock);
513 return more_to_do;
514 }
517 static int network_close(struct net_device *dev)
518 {
519 struct net_private *np = dev->priv;
521 netif_stop_queue(np->dev);
523 np->state = NETIF_STATE_CONNECTED;
525 /* XXX We need to properly disconnect via the domain controller. */
526 while ( /*(np->rx_resp_cons != np->rx->req_prod) ||*/
527 (np->tx_resp_cons != np->tx->req_prod) )
528 {
529 barrier();
530 current->state = TASK_INTERRUPTIBLE;
531 schedule_timeout(1);
532 }
534 MOD_DEC_USE_COUNT;
536 return 0;
537 }
540 static struct net_device_stats *network_get_stats(struct net_device *dev)
541 {
542 struct net_private *np = (struct net_private *)dev->priv;
543 return &np->stats;
544 }
547 static void network_reconnect(struct net_device *dev, netif_fe_interface_status_changed_t *status)
548 {
549 struct net_private *np;
550 int i, requeue_idx;
551 netif_tx_request_t *tx;
553 np = dev->priv;
554 spin_lock_irq(&np->rx_lock);
555 spin_lock(&np->tx_lock);
557 /* Recovery procedure: */
559 /* Step 1: Reinitialise variables. */
560 np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
561 np->rx->event = 1;
563 /* Step 2: Rebuild the RX and TX ring contents.
564 * NB. We could just free the queued TX packets now but we hope
565 * that sending them out might do some good. We have to rebuild
566 * the RX ring because some of our pages are currently flipped out
567 * so we can't just free the RX skbs.
568 * NB2. Freelist index entries are always going to be less than
569 * __PAGE_OFFSET, whereas pointers to skbs will always be equal or
570 * greater than __PAGE_OFFSET: we use this property to distinguish
571 * them.
572 */
574 /* Rebuild the TX buffer freelist and the TX ring itself.
575 * NB. This reorders packets. We could keep more private state
576 * to avoid this but maybe it doesn't matter so much given the
577 * interface has been down.
578 */
579 for( requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++ ){
580 if( (unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET ) {
581 struct sk_buff *skb = np->tx_skbs[i];
583 tx = &np->tx->ring[requeue_idx++].req;
585 tx->id = i;
586 tx->addr = virt_to_machine(skb->data);
587 tx->size = skb->len;
589 np->stats.tx_bytes += skb->len;
590 np->stats.tx_packets++;
591 }
592 }
593 wmb();
594 np->tx->req_prod = requeue_idx;
596 /* Rebuild the RX buffer freelist and the RX ring itself. */
597 for ( requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++ )
598 if ( (unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET )
599 np->rx->ring[requeue_idx++].req.id = i;
600 wmb();
601 np->rx->req_prod = requeue_idx;
603 /* Step 3: All public and private state should now be sane. Get
604 * ready to start sending and receiving packets and give the driver
605 * domain a kick because we've probably just requeued some
606 * packets.
607 */
608 netif_carrier_on(dev);
609 netif_start_queue(dev);
610 np->state = NETIF_STATE_ACTIVE;
612 notify_via_evtchn(status->evtchn);
614 network_tx_buf_gc(dev);
616 printk(KERN_INFO "Recovery completed\n");
618 spin_unlock(&np->tx_lock);
619 spin_unlock_irq(&np->rx_lock);
620 }
622 static void netif_status_change(netif_fe_interface_status_changed_t *status)
623 {
624 ctrl_msg_t cmsg;
625 netif_fe_interface_connect_t up;
626 struct net_device *dev;
627 struct net_private *np;
629 if ( netctrl.interface_n <= 0 )
630 {
631 printk(KERN_WARNING "Status change: no interfaces\n");
632 return;
633 }
635 dev = find_dev_by_handle(status->handle);
636 if(!dev){
637 printk(KERN_WARNING "Status change: invalid netif handle %u\n",
638 status->handle);
639 return;
640 }
641 np = dev->priv;
643 switch ( status->status )
644 {
645 case NETIF_INTERFACE_STATUS_DESTROYED:
646 printk(KERN_WARNING "Unexpected netif-DESTROYED message in state %d\n",
647 np->state);
648 break;
650 case NETIF_INTERFACE_STATUS_DISCONNECTED:
651 if ( np->state != NETIF_STATE_CLOSED )
652 {
653 printk(KERN_WARNING "Unexpected netif-DISCONNECTED message"
654 " in state %d\n", np->state);
655 printk(KERN_INFO "Attempting to reconnect network interface\n");
657 /* Begin interface recovery.
658 *
659 * NB. Whilst we're recovering, we turn the carrier state off. We
660 * take measures to ensure that this device isn't used for
661 * anything. We also stop the queue for this device. Various
662 * different approaches (e.g. continuing to buffer packets) have
663 * been tested but don't appear to improve the overall impact on
664 * TCP connections.
665 *
666 * TODO: (MAW) Change the Xend<->Guest protocol so that a recovery
667 * is initiated by a special "RESET" message - disconnect could
668 * just mean we're not allowed to use this interface any more.
669 */
671 /* Stop old i/f to prevent errors whilst we rebuild the state. */
672 spin_lock_irq(&np->tx_lock);
673 spin_lock(&np->rx_lock);
674 netif_stop_queue(dev);
675 netif_carrier_off(dev);
676 np->state = NETIF_STATE_DISCONNECTED;
677 spin_unlock(&np->rx_lock);
678 spin_unlock_irq(&np->tx_lock);
680 /* Free resources. */
681 free_irq(np->irq, dev);
682 unbind_evtchn_from_irq(np->evtchn);
683 free_page((unsigned long)np->tx);
684 free_page((unsigned long)np->rx);
685 }
687 /* Move from CLOSED to DISCONNECTED state. */
688 np->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
689 np->rx = (netif_rx_interface_t *)__get_free_page(GFP_KERNEL);
690 memset(np->tx, 0, PAGE_SIZE);
691 memset(np->rx, 0, PAGE_SIZE);
692 np->state = NETIF_STATE_DISCONNECTED;
694 /* Construct an interface-CONNECT message for the domain controller. */
695 cmsg.type = CMSG_NETIF_FE;
696 cmsg.subtype = CMSG_NETIF_FE_INTERFACE_CONNECT;
697 cmsg.length = sizeof(netif_fe_interface_connect_t);
698 up.handle = status->handle;
699 up.tx_shmem_frame = virt_to_machine(np->tx) >> PAGE_SHIFT;
700 up.rx_shmem_frame = virt_to_machine(np->rx) >> PAGE_SHIFT;
701 memcpy(cmsg.msg, &up, sizeof(up));
703 /* Tell the controller to bring up the interface. */
704 ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
705 break;
707 case NETIF_INTERFACE_STATUS_CONNECTED:
708 if ( np->state == NETIF_STATE_CLOSED ){
709 printk(KERN_WARNING "Unexpected netif-CONNECTED message"
710 " in state %d\n", np->state);
711 break;
712 }
714 memcpy(dev->dev_addr, status->mac, ETH_ALEN);
716 if ( netif_carrier_ok(dev) )
717 np->state = NETIF_STATE_CONNECTED;
718 else
719 network_reconnect(dev, status);
721 np->evtchn = status->evtchn;
722 np->irq = bind_evtchn_to_irq(np->evtchn);
723 (void)request_irq(np->irq, netif_int, SA_SAMPLE_RANDOM,
724 dev->name, dev);
726 netctrl_connected_count();
727 break;
729 default:
730 printk(KERN_WARNING "Status change to unknown value %d\n",
731 status->status);
732 break;
733 }
734 }
736 /** Create a network device.
737 * @param handle device handle
738 * @param val return parameter for created device
739 * @return 0 on success, error code otherwise
740 */
741 static int create_netdev(int handle, struct net_device **val)
742 {
743 int err = 0;
744 struct net_device *dev = NULL;
745 struct net_private *np = NULL;
747 dev = alloc_etherdev(sizeof(struct net_private));
748 if (!dev){
749 printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__);
750 err = -ENOMEM;
751 goto exit;
752 }
753 np = dev->priv;
754 np->state = NETIF_STATE_CLOSED;
755 np->handle = handle;
757 dev->open = network_open;
758 dev->hard_start_xmit = network_start_xmit;
759 dev->stop = network_close;
760 dev->get_stats = network_get_stats;
761 dev->poll = netif_poll;
762 dev->weight = 64;
764 err = register_netdev(dev);
765 if (err){
766 printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err);
767 goto exit;
768 }
769 np->dev = dev;
770 list_add(&np->list, &dev_list);
771 exit:
772 if(err){
773 if(dev) kfree(dev);
774 dev = NULL;
775 }
776 if(val) *val = dev;
777 return err;
778 }
780 /*
781 * Initialize the network control interface. Set the number of network devices
782 * and create them.
783 */
784 static void netif_driver_status_change(
785 netif_fe_driver_status_changed_t *status)
786 {
787 int err = 0;
788 int i;
790 netctrl.interface_n = status->nr_interfaces;
791 netctrl.connected_n = 0;
793 for(i = 0; i < netctrl.interface_n; i++){
794 err = create_netdev(i, NULL);
795 if(err){
796 netctrl_err(err);
797 break;
798 }
799 }
800 }
802 static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
803 {
804 int respond = 1;
806 switch ( msg->subtype )
807 {
808 case CMSG_NETIF_FE_INTERFACE_STATUS_CHANGED:
809 if ( msg->length != sizeof(netif_fe_interface_status_changed_t) )
810 goto error;
811 netif_status_change((netif_fe_interface_status_changed_t *)
812 &msg->msg[0]);
813 break;
815 case CMSG_NETIF_FE_DRIVER_STATUS_CHANGED:
816 if ( msg->length != sizeof(netif_fe_driver_status_changed_t) )
817 goto error;
818 netif_driver_status_change((netif_fe_driver_status_changed_t *)
819 &msg->msg[0]);
820 /* Message is a response */
821 respond = 0;
822 break;
824 error:
825 default:
826 msg->length = 0;
827 break;
828 }
830 if ( respond )
831 ctrl_if_send_response(msg);
832 }
835 static int __init init_module(void)
836 {
837 ctrl_msg_t cmsg;
838 netif_fe_driver_status_changed_t st;
839 int err = 0, wait_i, wait_n = 20;
841 if ( (start_info.flags & SIF_INITDOMAIN) ||
842 (start_info.flags & SIF_NET_BE_DOMAIN) )
843 return 0;
845 printk("Initialising Xen virtual ethernet frontend driver");
847 INIT_LIST_HEAD(&dev_list);
849 netctrl_init();
851 (void)ctrl_if_register_receiver(CMSG_NETIF_FE, netif_ctrlif_rx,
852 CALLBACK_IN_BLOCKING_CONTEXT);
854 /* Send a driver-UP notification to the domain controller. */
855 cmsg.type = CMSG_NETIF_FE;
856 cmsg.subtype = CMSG_NETIF_FE_DRIVER_STATUS_CHANGED;
857 cmsg.length = sizeof(netif_fe_driver_status_changed_t);
858 st.status = NETIF_DRIVER_STATUS_UP;
859 st.nr_interfaces = 0;
860 memcpy(cmsg.msg, &st, sizeof(st));
861 ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
863 /* Wait for all interfaces to be connected. */
864 for ( wait_i = 0; ; wait_i++)
865 {
866 if ( (err = (wait_i < wait_n) ? netctrl_connected() : -ENETDOWN) != 0 )
867 {
868 err = (err > 0) ? 0 : err;
869 break;
870 }
871 set_current_state(TASK_INTERRUPTIBLE);
872 schedule_timeout(1);
873 }
875 if ( err )
876 ctrl_if_unregister_receiver(CMSG_NETIF_FE, netif_ctrlif_rx);
878 return err;
879 }
882 static void cleanup_module(void)
883 {
884 /* XXX FIXME */
885 BUG();
886 }
889 module_init(init_module);
890 module_exit(cleanup_module);