ia64/xen-unstable

view linux-2.4.26-xen-sparse/arch/xen/drivers/netif/frontend/main.c @ 1776:c2f673cea5e4

bitkeeper revision 1.1072.1.1 (40f4e51fLMgcKX4Sn6FNYePX6EqkGA)

Merge http://xen.bkbits.net:8080/xeno-unstable.bk
into gandalf.hpl.hp.com:/var/bk/xeno-unstable.bk
author xenbk@gandalf.hpl.hp.com
date Wed Jul 14 07:47:43 2004 +0000 (2004-07-14)
parents 489b925b0e22 131c48baa117
children e91945007886
line source
1 /******************************************************************************
2 * arch/xen/drivers/netif/frontend/main.c
3 *
4 * Virtual network driver for conversing with remote driver backends.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 */
9 #include <linux/config.h>
10 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
18 #include <linux/netdevice.h>
19 #include <linux/inetdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/skbuff.h>
22 #include <linux/init.h>
24 #include <asm/io.h>
25 #include <net/sock.h>
26 #include <net/pkt_sched.h>
28 #include <asm/evtchn.h>
29 #include <asm/ctrl_if.h>
31 #include <asm/page.h>
33 #include "../netif.h"
35 #define RX_BUF_SIZE ((PAGE_SIZE/2)+1) /* Fool the slab allocator :-) */
37 static void network_tx_buf_gc(struct net_device *dev);
38 static void network_alloc_rx_buffers(struct net_device *dev);
40 static unsigned long rx_pfn_array[NETIF_RX_RING_SIZE];
41 static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE+1];
42 static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
44 static struct list_head dev_list;
46 struct net_private
47 {
48 struct list_head list;
49 struct net_device *dev;
51 struct net_device_stats stats;
52 NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
53 unsigned int tx_full;
55 netif_tx_interface_t *tx;
56 netif_rx_interface_t *rx;
58 spinlock_t tx_lock;
59 spinlock_t rx_lock;
61 unsigned int handle;
62 unsigned int evtchn;
63 unsigned int irq;
65 /* What is the status of our connection to the remote backend? */
66 #define BEST_CLOSED 0
67 #define BEST_DISCONNECTED 1
68 #define BEST_CONNECTED 2
69 unsigned int backend_state;
71 /* Is this interface open or closed (down or up)? */
72 #define UST_CLOSED 0
73 #define UST_OPEN 1
74 unsigned int user_state;
76 /*
77 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
78 * array is an index into a chain of free entries.
79 */
80 struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
81 struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
82 };
84 /* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
85 #define ADD_ID_TO_FREELIST(_list, _id) \
86 (_list)[(_id)] = (_list)[0]; \
87 (_list)[0] = (void *)(unsigned long)(_id);
88 #define GET_ID_FROM_FREELIST(_list) \
89 ({ unsigned long _id = (unsigned long)(_list)[0]; \
90 (_list)[0] = (_list)[_id]; \
91 (unsigned short)_id; })
93 static struct net_device *find_dev_by_handle(unsigned int handle)
94 {
95 struct list_head *ent;
96 struct net_private *np;
97 list_for_each ( ent, &dev_list )
98 {
99 np = list_entry(ent, struct net_private, list);
100 if ( np->handle == handle )
101 return np->dev;
102 }
103 return NULL;
104 }
106 /** Network interface info. */
107 struct netif_ctrl {
108 /** Number of interfaces. */
109 int interface_n;
110 /** Number of connected interfaces. */
111 int connected_n;
112 /** Error code. */
113 int err;
114 };
116 static struct netif_ctrl netctrl;
118 static void netctrl_init(void)
119 {
120 memset(&netctrl, 0, sizeof(netctrl));
121 netctrl.interface_n = -1;
122 }
124 /** Get or set a network interface error.
125 */
126 static int netctrl_err(int err)
127 {
128 if(err < 0 && !netctrl.err){
129 netctrl.err = err;
130 printk(KERN_WARNING "%s> err=%d\n", __FUNCTION__, err);
131 }
132 return netctrl.err;
133 }
135 /** Test if all network interfaces are connected.
136 *
137 * @return 1 if all connected, 0 if not, negative error code otherwise
138 */
139 static int netctrl_connected(void)
140 {
141 int ok = 0;
142 ok = (netctrl.err ? netctrl.err :
143 (netctrl.connected_n == netctrl.interface_n));
144 return ok;
145 }
147 /** Count the connected network interfaces.
148 *
149 * @return connected count
150 */
151 static int netctrl_connected_count(void)
152 {
154 struct list_head *ent;
155 struct net_private *np;
156 unsigned int connected;
158 connected = 0;
160 list_for_each(ent, &dev_list)
161 {
162 np = list_entry(ent, struct net_private, list);
163 if ( np->backend_state == BEST_CONNECTED )
164 connected++;
165 }
167 netctrl.connected_n = connected;
168 return connected;
169 }
171 static int network_open(struct net_device *dev)
172 {
173 struct net_private *np = dev->priv;
175 memset(&np->stats, 0, sizeof(np->stats));
177 np->user_state = UST_OPEN;
179 network_alloc_rx_buffers(dev);
180 np->rx->event = np->rx_resp_cons + 1;
182 netif_start_queue(dev);
184 return 0;
185 }
188 static void network_tx_buf_gc(struct net_device *dev)
189 {
190 NETIF_RING_IDX i, prod;
191 unsigned short id;
192 struct net_private *np = dev->priv;
193 struct sk_buff *skb;
195 if ( np->backend_state != BEST_CONNECTED )
196 return;
198 do {
199 prod = np->tx->resp_prod;
201 for ( i = np->tx_resp_cons; i != prod; i++ )
202 {
203 id = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
204 skb = np->tx_skbs[id];
205 ADD_ID_TO_FREELIST(np->tx_skbs, id);
206 dev_kfree_skb_any(skb);
207 }
209 np->tx_resp_cons = prod;
211 /*
212 * Set a new event, then check for race with update of tx_cons. Note
213 * that it is essential to schedule a callback, no matter how few
214 * buffers are pending. Even if there is space in the transmit ring,
215 * higher layers may be blocked because too much data is outstanding:
216 * in such cases notification from Xen is likely to be the only kick
217 * that we'll get.
218 */
219 np->tx->event =
220 prod + ((np->tx->req_prod - prod) >> 1) + 1;
221 mb();
222 }
223 while ( prod != np->tx->resp_prod );
225 if ( np->tx_full &&
226 ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE) )
227 {
228 np->tx_full = 0;
229 if ( np->user_state == UST_OPEN )
230 netif_wake_queue(dev);
231 }
232 }
235 static void network_alloc_rx_buffers(struct net_device *dev)
236 {
237 unsigned short id;
238 struct net_private *np = dev->priv;
239 struct sk_buff *skb;
240 NETIF_RING_IDX i = np->rx->req_prod;
241 int nr_pfns = 0;
243 /* Make sure the batch is large enough to be worthwhile (1/2 ring). */
244 if ( unlikely((i - np->rx_resp_cons) > (NETIF_RX_RING_SIZE/2)) ||
245 unlikely(np->backend_state != BEST_CONNECTED) )
246 return;
248 do {
249 skb = dev_alloc_skb(RX_BUF_SIZE);
250 if ( unlikely(skb == NULL) )
251 break;
253 skb->dev = dev;
255 if ( unlikely(((unsigned long)skb->head & (PAGE_SIZE-1)) != 0) )
256 panic("alloc_skb needs to provide us page-aligned buffers.");
258 id = GET_ID_FROM_FREELIST(np->rx_skbs);
260 np->rx_skbs[id] = skb;
262 np->rx->ring[MASK_NETIF_RX_IDX(i)].req.id = id;
264 rx_pfn_array[nr_pfns] = virt_to_machine(skb->head) >> PAGE_SHIFT;
266 rx_mcl[nr_pfns].op = __HYPERVISOR_update_va_mapping;
267 rx_mcl[nr_pfns].args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
268 rx_mcl[nr_pfns].args[1] = 0;
269 rx_mcl[nr_pfns].args[2] = 0;
271 nr_pfns++;
272 }
273 while ( (++i - np->rx_resp_cons) != NETIF_RX_RING_SIZE );
275 /*
276 * We may have allocated buffers which have entries outstanding in the page
277 * update queue -- make sure we flush those first!
278 */
279 flush_page_update_queue();
281 /* After all PTEs have been zapped we blow away stale TLB entries. */
282 rx_mcl[nr_pfns-1].args[2] = UVMF_FLUSH_TLB;
284 /* Give away a batch of pages. */
285 rx_mcl[nr_pfns].op = __HYPERVISOR_dom_mem_op;
286 rx_mcl[nr_pfns].args[0] = MEMOP_decrease_reservation;
287 rx_mcl[nr_pfns].args[1] = (unsigned long)rx_pfn_array;
288 rx_mcl[nr_pfns].args[2] = (unsigned long)nr_pfns;
290 /* Zap PTEs and give away pages in one big multicall. */
291 (void)HYPERVISOR_multicall(rx_mcl, nr_pfns+1);
293 /* Check return status of HYPERVISOR_dom_mem_op(). */
294 if ( rx_mcl[nr_pfns].args[5] != nr_pfns )
295 panic("Unable to reduce memory reservation\n");
297 np->rx->req_prod = i;
298 }
301 static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
302 {
303 unsigned short id;
304 struct net_private *np = (struct net_private *)dev->priv;
305 netif_tx_request_t *tx;
306 NETIF_RING_IDX i;
308 if ( unlikely(np->tx_full) )
309 {
310 printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name);
311 netif_stop_queue(dev);
312 return -ENOBUFS;
313 }
315 if ( unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
316 PAGE_SIZE) )
317 {
318 struct sk_buff *new_skb = dev_alloc_skb(RX_BUF_SIZE);
319 if ( unlikely(new_skb == NULL) )
320 return 1;
321 skb_put(new_skb, skb->len);
322 memcpy(new_skb->data, skb->data, skb->len);
323 dev_kfree_skb(skb);
324 skb = new_skb;
325 }
327 spin_lock_irq(&np->tx_lock);
329 if ( np->backend_state != BEST_CONNECTED )
330 {
331 spin_unlock_irq(&np->tx_lock);
332 return 1;
333 }
335 i = np->tx->req_prod;
337 id = GET_ID_FROM_FREELIST(np->tx_skbs);
338 np->tx_skbs[id] = skb;
340 tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
342 tx->id = id;
343 tx->addr = virt_to_machine(skb->data);
344 tx->size = skb->len;
346 wmb();
347 np->tx->req_prod = i + 1;
349 network_tx_buf_gc(dev);
351 if ( (i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1) )
352 {
353 np->tx_full = 1;
354 netif_stop_queue(dev);
355 }
357 spin_unlock_irq(&np->tx_lock);
359 np->stats.tx_bytes += skb->len;
360 np->stats.tx_packets++;
362 /* Only notify Xen if there are no outstanding responses. */
363 mb();
364 if ( np->tx->resp_prod == i )
365 notify_via_evtchn(np->evtchn);
367 return 0;
368 }
371 static void netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
372 {
373 struct net_device *dev = dev_id;
374 struct net_private *np = dev->priv;
375 unsigned long flags;
377 spin_lock_irqsave(&np->tx_lock, flags);
378 network_tx_buf_gc(dev);
379 spin_unlock_irqrestore(&np->tx_lock, flags);
381 if ( (np->rx_resp_cons != np->rx->resp_prod) &&
382 (np->user_state == UST_OPEN) )
383 netif_rx_schedule(dev);
384 }
387 static int netif_poll(struct net_device *dev, int *pbudget)
388 {
389 struct net_private *np = dev->priv;
390 struct sk_buff *skb;
391 netif_rx_response_t *rx;
392 NETIF_RING_IDX i;
393 mmu_update_t *mmu = rx_mmu;
394 multicall_entry_t *mcl = rx_mcl;
395 int work_done, budget, more_to_do = 1;
396 struct sk_buff_head rxq;
397 unsigned long flags;
399 spin_lock(&np->rx_lock);
401 if ( np->backend_state != BEST_CONNECTED )
402 {
403 spin_unlock(&np->rx_lock);
404 return 0;
405 }
407 skb_queue_head_init(&rxq);
409 if ( (budget = *pbudget) > dev->quota )
410 budget = dev->quota;
412 for ( i = np->rx_resp_cons, work_done = 0;
413 (i != np->rx->resp_prod) && (work_done < budget);
414 i++, work_done++ )
415 {
416 rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
418 skb = np->rx_skbs[rx->id];
419 ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
421 if ( unlikely(rx->status <= 0) )
422 {
423 /* Gate this error. We get a (valid) slew of them on suspend. */
424 if ( np->user_state != UST_OPEN )
425 printk(KERN_ALERT "bad buffer on RX ring!(%d)\n", rx->status);
426 dev_kfree_skb(skb);
427 continue;
428 }
430 skb->data = skb->tail = skb->head + (rx->addr & ~PAGE_MASK);
431 skb_put(skb, rx->status);
433 np->stats.rx_packets++;
434 np->stats.rx_bytes += rx->status;
436 /* Remap the page. */
437 mmu->ptr = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
438 mmu->val = __pa(skb->head) >> PAGE_SHIFT;
439 mmu++;
440 mcl->op = __HYPERVISOR_update_va_mapping;
441 mcl->args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
442 mcl->args[1] = (rx->addr & PAGE_MASK) | __PAGE_KERNEL;
443 mcl->args[2] = 0;
444 mcl++;
446 phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] =
447 rx->addr >> PAGE_SHIFT;
449 __skb_queue_tail(&rxq, skb);
450 }
452 /* Do all the remapping work, and M->P updates, in one big hypercall. */
453 if ( likely((mcl - rx_mcl) != 0) )
454 {
455 mcl->op = __HYPERVISOR_mmu_update;
456 mcl->args[0] = (unsigned long)rx_mmu;
457 mcl->args[1] = mmu - rx_mmu;
458 mcl->args[2] = 0;
459 mcl++;
460 (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
461 }
463 while ( (skb = __skb_dequeue(&rxq)) != NULL )
464 {
465 /* Set the shared-info area, which is hidden behind the real data. */
466 atomic_set(&(skb_shinfo(skb)->dataref), 1);
467 skb_shinfo(skb)->nr_frags = 0;
468 skb_shinfo(skb)->frag_list = NULL;
470 /* Ethernet-specific work. Delayed to here as it peeks the header. */
471 skb->protocol = eth_type_trans(skb, dev);
473 /* Pass it up. */
474 netif_rx(skb);
475 dev->last_rx = jiffies;
476 }
478 np->rx_resp_cons = i;
480 network_alloc_rx_buffers(dev);
482 *pbudget -= work_done;
483 dev->quota -= work_done;
485 if ( work_done < budget )
486 {
487 local_irq_save(flags);
489 np->rx->event = i + 1;
491 /* Deal with hypervisor racing our resetting of rx_event. */
492 mb();
493 if ( np->rx->resp_prod == i )
494 {
495 __netif_rx_complete(dev);
496 more_to_do = 0;
497 }
499 local_irq_restore(flags);
500 }
502 spin_unlock(&np->rx_lock);
504 return more_to_do;
505 }
508 static int network_close(struct net_device *dev)
509 {
510 struct net_private *np = dev->priv;
511 np->user_state = UST_CLOSED;
512 netif_stop_queue(np->dev);
513 return 0;
514 }
517 static struct net_device_stats *network_get_stats(struct net_device *dev)
518 {
519 struct net_private *np = (struct net_private *)dev->priv;
520 return &np->stats;
521 }
524 static void network_connect(struct net_device *dev,
525 netif_fe_interface_status_changed_t *status)
526 {
527 struct net_private *np;
528 int i, requeue_idx;
529 netif_tx_request_t *tx;
531 np = dev->priv;
532 spin_lock_irq(&np->rx_lock);
533 spin_lock(&np->tx_lock);
535 /* Recovery procedure: */
537 /* Step 1: Reinitialise variables. */
538 np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
539 np->rx->event = 1;
541 /* Step 2: Rebuild the RX and TX ring contents.
542 * NB. We could just free the queued TX packets now but we hope
543 * that sending them out might do some good. We have to rebuild
544 * the RX ring because some of our pages are currently flipped out
545 * so we can't just free the RX skbs.
546 * NB2. Freelist index entries are always going to be less than
547 * __PAGE_OFFSET, whereas pointers to skbs will always be equal or
548 * greater than __PAGE_OFFSET: we use this property to distinguish
549 * them.
550 */
552 /* Rebuild the TX buffer freelist and the TX ring itself.
553 * NB. This reorders packets. We could keep more private state
554 * to avoid this but maybe it doesn't matter so much given the
555 * interface has been down.
556 */
557 for ( requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++ )
558 {
559 if ( (unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET )
560 {
561 struct sk_buff *skb = np->tx_skbs[i];
563 tx = &np->tx->ring[requeue_idx++].req;
565 tx->id = i;
566 tx->addr = virt_to_machine(skb->data);
567 tx->size = skb->len;
569 np->stats.tx_bytes += skb->len;
570 np->stats.tx_packets++;
571 }
572 }
573 wmb();
574 np->tx->req_prod = requeue_idx;
576 /* Rebuild the RX buffer freelist and the RX ring itself. */
577 for ( requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++ )
578 if ( (unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET )
579 np->rx->ring[requeue_idx++].req.id = i;
580 wmb();
581 np->rx->req_prod = requeue_idx;
583 /* Step 3: All public and private state should now be sane. Get
584 * ready to start sending and receiving packets and give the driver
585 * domain a kick because we've probably just requeued some
586 * packets.
587 */
588 np->backend_state = BEST_CONNECTED;
589 notify_via_evtchn(status->evtchn);
590 network_tx_buf_gc(dev);
592 if ( np->user_state == UST_OPEN )
593 netif_start_queue(dev);
595 spin_unlock(&np->tx_lock);
596 spin_unlock_irq(&np->rx_lock);
597 }
599 static void netif_status_change(netif_fe_interface_status_changed_t *status)
600 {
601 ctrl_msg_t cmsg;
602 netif_fe_interface_connect_t up;
603 struct net_device *dev;
604 struct net_private *np;
606 if ( netctrl.interface_n <= 0 )
607 {
608 printk(KERN_WARNING "Status change: no interfaces\n");
609 return;
610 }
612 dev = find_dev_by_handle(status->handle);
613 if(!dev){
614 printk(KERN_WARNING "Status change: invalid netif handle %u\n",
615 status->handle);
616 return;
617 }
618 np = dev->priv;
620 switch ( status->status )
621 {
622 case NETIF_INTERFACE_STATUS_DESTROYED:
623 printk(KERN_WARNING "Unexpected netif-DESTROYED message in state %d\n",
624 np->backend_state);
625 break;
627 case NETIF_INTERFACE_STATUS_DISCONNECTED:
628 if ( np->backend_state != BEST_CLOSED )
629 {
630 printk(KERN_WARNING "Unexpected netif-DISCONNECTED message"
631 " in state %d\n", np->backend_state);
632 printk(KERN_INFO "Attempting to reconnect network interface\n");
634 /* Begin interface recovery.
635 *
636 * NB. Whilst we're recovering, we turn the carrier state off. We
637 * take measures to ensure that this device isn't used for
638 * anything. We also stop the queue for this device. Various
639 * different approaches (e.g. continuing to buffer packets) have
640 * been tested but don't appear to improve the overall impact on
641 * TCP connections.
642 *
643 * TODO: (MAW) Change the Xend<->Guest protocol so that a recovery
644 * is initiated by a special "RESET" message - disconnect could
645 * just mean we're not allowed to use this interface any more.
646 */
648 /* Stop old i/f to prevent errors whilst we rebuild the state. */
649 spin_lock_irq(&np->tx_lock);
650 spin_lock(&np->rx_lock);
651 netif_stop_queue(dev);
652 np->backend_state = BEST_DISCONNECTED;
653 spin_unlock(&np->rx_lock);
654 spin_unlock_irq(&np->tx_lock);
656 /* Free resources. */
657 free_irq(np->irq, dev);
658 unbind_evtchn_from_irq(np->evtchn);
659 free_page((unsigned long)np->tx);
660 free_page((unsigned long)np->rx);
661 }
663 /* Move from CLOSED to DISCONNECTED state. */
664 np->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
665 np->rx = (netif_rx_interface_t *)__get_free_page(GFP_KERNEL);
666 memset(np->tx, 0, PAGE_SIZE);
667 memset(np->rx, 0, PAGE_SIZE);
668 np->backend_state = BEST_DISCONNECTED;
670 /* Construct an interface-CONNECT message for the domain controller. */
671 cmsg.type = CMSG_NETIF_FE;
672 cmsg.subtype = CMSG_NETIF_FE_INTERFACE_CONNECT;
673 cmsg.length = sizeof(netif_fe_interface_connect_t);
674 up.handle = status->handle;
675 up.tx_shmem_frame = virt_to_machine(np->tx) >> PAGE_SHIFT;
676 up.rx_shmem_frame = virt_to_machine(np->rx) >> PAGE_SHIFT;
677 memcpy(cmsg.msg, &up, sizeof(up));
679 /* Tell the controller to bring up the interface. */
680 ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
681 break;
683 case NETIF_INTERFACE_STATUS_CONNECTED:
684 if ( np->backend_state == BEST_CLOSED )
685 {
686 printk(KERN_WARNING "Unexpected netif-CONNECTED message"
687 " in state %d\n", np->backend_state);
688 break;
689 }
691 memcpy(dev->dev_addr, status->mac, ETH_ALEN);
693 network_connect(dev, status);
695 np->evtchn = status->evtchn;
696 np->irq = bind_evtchn_to_irq(np->evtchn);
697 (void)request_irq(np->irq, netif_int, SA_SAMPLE_RANDOM,
698 dev->name, dev);
700 netctrl_connected_count();
701 break;
703 default:
704 printk(KERN_WARNING "Status change to unknown value %d\n",
705 status->status);
706 break;
707 }
708 }
710 /** Create a network device.
711 * @param handle device handle
712 * @param val return parameter for created device
713 * @return 0 on success, error code otherwise
714 */
715 static int create_netdev(int handle, struct net_device **val)
716 {
717 int i, err = 0;
718 struct net_device *dev = NULL;
719 struct net_private *np = NULL;
721 if ( (dev = alloc_etherdev(sizeof(struct net_private))) == NULL )
722 {
723 printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__);
724 err = -ENOMEM;
725 goto exit;
726 }
728 np = dev->priv;
729 np->backend_state = BEST_CLOSED;
730 np->user_state = UST_CLOSED;
731 np->handle = handle;
733 spin_lock_init(&np->tx_lock);
734 spin_lock_init(&np->rx_lock);
736 /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
737 for ( i = 0; i <= NETIF_TX_RING_SIZE; i++ )
738 np->tx_skbs[i] = (void *)(i+1);
739 for ( i = 0; i <= NETIF_RX_RING_SIZE; i++ )
740 np->rx_skbs[i] = (void *)(i+1);
742 dev->open = network_open;
743 dev->hard_start_xmit = network_start_xmit;
744 dev->stop = network_close;
745 dev->get_stats = network_get_stats;
746 dev->poll = netif_poll;
747 dev->weight = 64;
749 if ( (err = register_netdev(dev)) != 0 )
750 {
751 printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err);
752 goto exit;
753 }
754 np->dev = dev;
755 list_add(&np->list, &dev_list);
757 exit:
758 if ( (err != 0) && (dev != NULL ) )
759 kfree(dev);
760 else if ( val != NULL )
761 *val = dev;
762 return err;
763 }
765 /*
766 * Initialize the network control interface. Set the number of network devices
767 * and create them.
768 */
769 static void netif_driver_status_change(
770 netif_fe_driver_status_changed_t *status)
771 {
772 int err = 0;
773 int i;
775 netctrl.interface_n = status->nr_interfaces;
776 netctrl.connected_n = 0;
778 for ( i = 0; i < netctrl.interface_n; i++ )
779 {
780 if ( (err = create_netdev(i, NULL)) != 0 )
781 {
782 netctrl_err(err);
783 break;
784 }
785 }
786 }
788 static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
789 {
790 int respond = 1;
792 switch ( msg->subtype )
793 {
794 case CMSG_NETIF_FE_INTERFACE_STATUS_CHANGED:
795 if ( msg->length != sizeof(netif_fe_interface_status_changed_t) )
796 goto error;
797 netif_status_change((netif_fe_interface_status_changed_t *)
798 &msg->msg[0]);
799 break;
801 case CMSG_NETIF_FE_DRIVER_STATUS_CHANGED:
802 if ( msg->length != sizeof(netif_fe_driver_status_changed_t) )
803 goto error;
804 netif_driver_status_change((netif_fe_driver_status_changed_t *)
805 &msg->msg[0]);
806 /* Message is a response */
807 respond = 0;
808 break;
810 error:
811 default:
812 msg->length = 0;
813 break;
814 }
816 if ( respond )
817 ctrl_if_send_response(msg);
818 }
821 static int __init netif_init(void)
822 {
823 ctrl_msg_t cmsg;
824 netif_fe_driver_status_changed_t st;
825 int err = 0, wait_i, wait_n = 20;
827 if ( (start_info.flags & SIF_INITDOMAIN) ||
828 (start_info.flags & SIF_NET_BE_DOMAIN) )
829 return 0;
831 printk("Initialising Xen virtual ethernet frontend driver");
833 INIT_LIST_HEAD(&dev_list);
835 netctrl_init();
837 (void)ctrl_if_register_receiver(CMSG_NETIF_FE, netif_ctrlif_rx,
838 CALLBACK_IN_BLOCKING_CONTEXT);
840 /* Send a driver-UP notification to the domain controller. */
841 cmsg.type = CMSG_NETIF_FE;
842 cmsg.subtype = CMSG_NETIF_FE_DRIVER_STATUS_CHANGED;
843 cmsg.length = sizeof(netif_fe_driver_status_changed_t);
844 st.status = NETIF_DRIVER_STATUS_UP;
845 st.nr_interfaces = 0;
846 memcpy(cmsg.msg, &st, sizeof(st));
847 ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
849 /* Wait for all interfaces to be connected. */
850 for ( wait_i = 0; ; wait_i++)
851 {
852 if ( (err = (wait_i < wait_n) ? netctrl_connected() : -ENETDOWN) != 0 )
853 {
854 err = (err > 0) ? 0 : err;
855 break;
856 }
857 set_current_state(TASK_INTERRUPTIBLE);
858 schedule_timeout(1);
859 }
861 if ( err )
862 ctrl_if_unregister_receiver(CMSG_NETIF_FE, netif_ctrlif_rx);
864 return err;
865 }
867 __initcall(netif_init);