ia64/xen-unstable

view linux-2.6.7-xen-sparse/drivers/xen/net/network.c @ 1820:3d4f8eb89670

bitkeeper revision 1.1106.1.2 (40faa780dekT3E5arFwcbQDu1MbX6g)

Cleaned up Xen's instruction emulator.
author kaf24@scramble.cl.cam.ac.uk
date Sun Jul 18 16:38:24 2004 +0000 (2004-07-18)
parents 002fc84add90
children 51332b88e187 79d5d57de7d1 bde2c59c2519 f26582ec895e
line source
1 /******************************************************************************
2 * network.c
3 *
4 * Virtual network driver for conversing with remote driver backends.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 */
9 #include <linux/config.h>
10 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
18 #include <linux/netdevice.h>
19 #include <linux/inetdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/skbuff.h>
22 #include <linux/init.h>
24 #include <asm/io.h>
25 #include <net/sock.h>
26 #include <net/pkt_sched.h>
28 #include <asm-xen/evtchn.h>
29 #include <asm-xen/ctrl_if.h>
31 #include <asm/page.h>
33 #include <asm-xen/netif.h>
35 #define RX_BUF_SIZE ((PAGE_SIZE/2)+1) /* Fool the slab allocator :-) */
37 static void network_tx_buf_gc(struct net_device *dev);
38 static void network_alloc_rx_buffers(struct net_device *dev);
40 static unsigned long rx_pfn_array[NETIF_RX_RING_SIZE];
41 static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE+1];
42 static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
44 static struct list_head dev_list;
46 struct net_private
47 {
48 struct list_head list;
49 struct net_device *dev;
51 struct net_device_stats stats;
52 NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
53 unsigned int tx_full;
55 netif_tx_interface_t *tx;
56 netif_rx_interface_t *rx;
58 spinlock_t tx_lock;
59 spinlock_t rx_lock;
61 unsigned int handle;
62 unsigned int evtchn;
63 unsigned int irq;
65 /* What is the status of our connection to the remote backend? */
66 #define BEST_CLOSED 0
67 #define BEST_DISCONNECTED 1
68 #define BEST_CONNECTED 2
69 unsigned int backend_state;
71 /* Is this interface open or closed (down or up)? */
72 #define UST_CLOSED 0
73 #define UST_OPEN 1
74 unsigned int user_state;
76 /*
77 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
78 * array is an index into a chain of free entries.
79 */
80 struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
81 struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
82 };
84 /* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
85 #define ADD_ID_TO_FREELIST(_list, _id) \
86 (_list)[(_id)] = (_list)[0]; \
87 (_list)[0] = (void *)(unsigned long)(_id);
88 #define GET_ID_FROM_FREELIST(_list) \
89 ({ unsigned long _id = (unsigned long)(_list)[0]; \
90 (_list)[0] = (_list)[_id]; \
91 (unsigned short)_id; })
93 static struct net_device *find_dev_by_handle(unsigned int handle)
94 {
95 struct list_head *ent;
96 struct net_private *np;
97 list_for_each ( ent, &dev_list )
98 {
99 np = list_entry(ent, struct net_private, list);
100 if ( np->handle == handle )
101 return np->dev;
102 }
103 return NULL;
104 }
106 /** Network interface info. */
107 struct netif_ctrl {
108 /** Number of interfaces. */
109 int interface_n;
110 /** Number of connected interfaces. */
111 int connected_n;
112 /** Error code. */
113 int err;
114 };
116 static struct netif_ctrl netctrl;
118 static void netctrl_init(void)
119 {
120 memset(&netctrl, 0, sizeof(netctrl));
121 netctrl.interface_n = -1;
122 }
124 /** Get or set a network interface error.
125 */
126 static int netctrl_err(int err)
127 {
128 if(err < 0 && !netctrl.err){
129 netctrl.err = err;
130 printk(KERN_WARNING "%s> err=%d\n", __FUNCTION__, err);
131 }
132 return netctrl.err;
133 }
135 /** Test if all network interfaces are connected.
136 *
137 * @return 1 if all connected, 0 if not, negative error code otherwise
138 */
139 static int netctrl_connected(void)
140 {
141 int ok = 0;
142 ok = (netctrl.err ? netctrl.err :
143 (netctrl.connected_n == netctrl.interface_n));
144 return ok;
145 }
147 /** Count the connected network interfaces.
148 *
149 * @return connected count
150 */
151 static int netctrl_connected_count(void)
152 {
154 struct list_head *ent;
155 struct net_private *np;
156 unsigned int connected;
158 connected = 0;
160 list_for_each(ent, &dev_list)
161 {
162 np = list_entry(ent, struct net_private, list);
163 if ( np->backend_state == BEST_CONNECTED )
164 connected++;
165 }
167 netctrl.connected_n = connected;
168 return connected;
169 }
171 static int network_open(struct net_device *dev)
172 {
173 struct net_private *np = dev->priv;
175 memset(&np->stats, 0, sizeof(np->stats));
177 np->user_state = UST_OPEN;
179 network_alloc_rx_buffers(dev);
180 np->rx->event = np->rx_resp_cons + 1;
182 netif_start_queue(dev);
184 return 0;
185 }
188 static void network_tx_buf_gc(struct net_device *dev)
189 {
190 NETIF_RING_IDX i, prod;
191 unsigned short id;
192 struct net_private *np = dev->priv;
193 struct sk_buff *skb;
195 if ( np->backend_state != BEST_CONNECTED )
196 return;
198 do {
199 prod = np->tx->resp_prod;
201 for ( i = np->tx_resp_cons; i != prod; i++ )
202 {
203 id = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
204 skb = np->tx_skbs[id];
205 ADD_ID_TO_FREELIST(np->tx_skbs, id);
206 dev_kfree_skb_any(skb);
207 }
209 np->tx_resp_cons = prod;
211 /*
212 * Set a new event, then check for race with update of tx_cons. Note
213 * that it is essential to schedule a callback, no matter how few
214 * buffers are pending. Even if there is space in the transmit ring,
215 * higher layers may be blocked because too much data is outstanding:
216 * in such cases notification from Xen is likely to be the only kick
217 * that we'll get.
218 */
219 np->tx->event =
220 prod + ((np->tx->req_prod - prod) >> 1) + 1;
221 mb();
222 }
223 while ( prod != np->tx->resp_prod );
225 if ( np->tx_full &&
226 ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE) )
227 {
228 np->tx_full = 0;
229 if ( np->user_state == UST_OPEN )
230 netif_wake_queue(dev);
231 }
232 }
235 static void network_alloc_rx_buffers(struct net_device *dev)
236 {
237 unsigned short id;
238 struct net_private *np = dev->priv;
239 struct sk_buff *skb;
240 NETIF_RING_IDX i = np->rx->req_prod;
241 int nr_pfns = 0;
243 /* Make sure the batch is large enough to be worthwhile (1/2 ring). */
244 if ( unlikely((i - np->rx_resp_cons) > (NETIF_RX_RING_SIZE/2)) ||
245 unlikely(np->backend_state != BEST_CONNECTED) )
246 return;
248 do {
249 skb = dev_alloc_skb(RX_BUF_SIZE);
250 if ( unlikely(skb == NULL) )
251 break;
253 skb->dev = dev;
255 if ( unlikely(((unsigned long)skb->head & (PAGE_SIZE-1)) != 0) )
256 panic("alloc_skb needs to provide us page-aligned buffers.");
258 id = GET_ID_FROM_FREELIST(np->rx_skbs);
260 np->rx_skbs[id] = skb;
262 np->rx->ring[MASK_NETIF_RX_IDX(i)].req.id = id;
264 rx_pfn_array[nr_pfns] = virt_to_machine(skb->head) >> PAGE_SHIFT;
266 rx_mcl[nr_pfns].op = __HYPERVISOR_update_va_mapping;
267 rx_mcl[nr_pfns].args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
268 rx_mcl[nr_pfns].args[1] = 0;
269 rx_mcl[nr_pfns].args[2] = 0;
271 nr_pfns++;
272 }
273 while ( (++i - np->rx_resp_cons) != NETIF_RX_RING_SIZE );
275 /*
276 * We may have allocated buffers which have entries outstanding in the page
277 * update queue -- make sure we flush those first!
278 */
279 flush_page_update_queue();
281 /* After all PTEs have been zapped we blow away stale TLB entries. */
282 rx_mcl[nr_pfns-1].args[2] = UVMF_FLUSH_TLB;
284 /* Give away a batch of pages. */
285 rx_mcl[nr_pfns].op = __HYPERVISOR_dom_mem_op;
286 rx_mcl[nr_pfns].args[0] = MEMOP_decrease_reservation;
287 rx_mcl[nr_pfns].args[1] = (unsigned long)rx_pfn_array;
288 rx_mcl[nr_pfns].args[2] = (unsigned long)nr_pfns;
290 /* Zap PTEs and give away pages in one big multicall. */
291 (void)HYPERVISOR_multicall(rx_mcl, nr_pfns+1);
293 /* Check return status of HYPERVISOR_dom_mem_op(). */
294 if ( rx_mcl[nr_pfns].args[5] != nr_pfns )
295 panic("Unable to reduce memory reservation\n");
297 np->rx->req_prod = i;
298 }
301 static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
302 {
303 unsigned short id;
304 struct net_private *np = (struct net_private *)dev->priv;
305 netif_tx_request_t *tx;
306 NETIF_RING_IDX i;
308 if ( unlikely(np->tx_full) )
309 {
310 printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name);
311 netif_stop_queue(dev);
312 return -ENOBUFS;
313 }
315 if ( unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
316 PAGE_SIZE) )
317 {
318 struct sk_buff *new_skb = dev_alloc_skb(RX_BUF_SIZE);
319 if ( unlikely(new_skb == NULL) )
320 return 1;
321 skb_put(new_skb, skb->len);
322 memcpy(new_skb->data, skb->data, skb->len);
323 dev_kfree_skb(skb);
324 skb = new_skb;
325 }
327 spin_lock_irq(&np->tx_lock);
329 if ( np->backend_state != BEST_CONNECTED )
330 {
331 spin_unlock_irq(&np->tx_lock);
332 return 1;
333 }
335 i = np->tx->req_prod;
337 id = GET_ID_FROM_FREELIST(np->tx_skbs);
338 np->tx_skbs[id] = skb;
340 tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
342 tx->id = id;
343 tx->addr = virt_to_machine(skb->data);
344 tx->size = skb->len;
346 wmb();
347 np->tx->req_prod = i + 1;
349 network_tx_buf_gc(dev);
351 if ( (i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1) )
352 {
353 np->tx_full = 1;
354 netif_stop_queue(dev);
355 }
357 spin_unlock_irq(&np->tx_lock);
359 np->stats.tx_bytes += skb->len;
360 np->stats.tx_packets++;
362 /* Only notify Xen if there are no outstanding responses. */
363 mb();
364 if ( np->tx->resp_prod == i )
365 notify_via_evtchn(np->evtchn);
367 return 0;
368 }
371 static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
372 {
373 struct net_device *dev = dev_id;
374 struct net_private *np = dev->priv;
375 unsigned long flags;
377 spin_lock_irqsave(&np->tx_lock, flags);
378 network_tx_buf_gc(dev);
379 spin_unlock_irqrestore(&np->tx_lock, flags);
381 if ( (np->rx_resp_cons != np->rx->resp_prod) &&
382 (np->user_state == UST_OPEN) )
383 netif_rx_schedule(dev);
385 return IRQ_HANDLED;
386 }
389 static int netif_poll(struct net_device *dev, int *pbudget)
390 {
391 struct net_private *np = dev->priv;
392 struct sk_buff *skb;
393 netif_rx_response_t *rx;
394 NETIF_RING_IDX i;
395 mmu_update_t *mmu = rx_mmu;
396 multicall_entry_t *mcl = rx_mcl;
397 int work_done, budget, more_to_do = 1;
398 struct sk_buff_head rxq;
399 unsigned long flags;
401 spin_lock(&np->rx_lock);
403 if ( np->backend_state != BEST_CONNECTED )
404 {
405 spin_unlock(&np->rx_lock);
406 return 0;
407 }
409 skb_queue_head_init(&rxq);
411 if ( (budget = *pbudget) > dev->quota )
412 budget = dev->quota;
414 for ( i = np->rx_resp_cons, work_done = 0;
415 (i != np->rx->resp_prod) && (work_done < budget);
416 i++, work_done++ )
417 {
418 rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
420 skb = np->rx_skbs[rx->id];
421 ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
423 if ( unlikely(rx->status <= 0) )
424 {
425 /* Gate this error. We get a (valid) slew of them on suspend. */
426 if ( np->user_state != UST_OPEN )
427 printk(KERN_ALERT "bad buffer on RX ring!(%d)\n", rx->status);
428 dev_kfree_skb(skb);
429 continue;
430 }
432 skb->data = skb->tail = skb->head + (rx->addr & ~PAGE_MASK);
433 skb_put(skb, rx->status);
435 np->stats.rx_packets++;
436 np->stats.rx_bytes += rx->status;
438 /* Remap the page. */
439 mmu->ptr = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
440 mmu->val = __pa(skb->head) >> PAGE_SHIFT;
441 mmu++;
442 mcl->op = __HYPERVISOR_update_va_mapping;
443 mcl->args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
444 mcl->args[1] = (rx->addr & PAGE_MASK) | __PAGE_KERNEL;
445 mcl->args[2] = 0;
446 mcl++;
448 phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] =
449 rx->addr >> PAGE_SHIFT;
451 __skb_queue_tail(&rxq, skb);
452 }
454 /* Do all the remapping work, and M->P updates, in one big hypercall. */
455 if ( likely((mcl - rx_mcl) != 0) )
456 {
457 mcl->op = __HYPERVISOR_mmu_update;
458 mcl->args[0] = (unsigned long)rx_mmu;
459 mcl->args[1] = mmu - rx_mmu;
460 mcl->args[2] = 0;
461 mcl++;
462 (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
463 }
465 while ( (skb = __skb_dequeue(&rxq)) != NULL )
466 {
467 /* Set the shared-info area, which is hidden behind the real data. */
468 atomic_set(&(skb_shinfo(skb)->dataref), 1);
469 skb_shinfo(skb)->nr_frags = 0;
470 skb_shinfo(skb)->frag_list = NULL;
472 /* Ethernet-specific work. Delayed to here as it peeks the header. */
473 skb->protocol = eth_type_trans(skb, dev);
475 /* Pass it up. */
476 netif_rx(skb);
477 dev->last_rx = jiffies;
478 }
480 np->rx_resp_cons = i;
482 network_alloc_rx_buffers(dev);
484 *pbudget -= work_done;
485 dev->quota -= work_done;
487 if ( work_done < budget )
488 {
489 local_irq_save(flags);
491 np->rx->event = i + 1;
493 /* Deal with hypervisor racing our resetting of rx_event. */
494 mb();
495 if ( np->rx->resp_prod == i )
496 {
497 __netif_rx_complete(dev);
498 more_to_do = 0;
499 }
501 local_irq_restore(flags);
502 }
504 spin_unlock(&np->rx_lock);
506 return more_to_do;
507 }
510 static int network_close(struct net_device *dev)
511 {
512 struct net_private *np = dev->priv;
513 np->user_state = UST_CLOSED;
514 netif_stop_queue(np->dev);
515 return 0;
516 }
519 static struct net_device_stats *network_get_stats(struct net_device *dev)
520 {
521 struct net_private *np = (struct net_private *)dev->priv;
522 return &np->stats;
523 }
526 static void network_connect(struct net_device *dev,
527 netif_fe_interface_status_changed_t *status)
528 {
529 struct net_private *np;
530 int i, requeue_idx;
531 netif_tx_request_t *tx;
533 np = dev->priv;
534 spin_lock_irq(&np->rx_lock);
535 spin_lock(&np->tx_lock);
537 /* Recovery procedure: */
539 /* Step 1: Reinitialise variables. */
540 np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
541 np->rx->event = 1;
543 /* Step 2: Rebuild the RX and TX ring contents.
544 * NB. We could just free the queued TX packets now but we hope
545 * that sending them out might do some good. We have to rebuild
546 * the RX ring because some of our pages are currently flipped out
547 * so we can't just free the RX skbs.
548 * NB2. Freelist index entries are always going to be less than
549 * __PAGE_OFFSET, whereas pointers to skbs will always be equal or
550 * greater than __PAGE_OFFSET: we use this property to distinguish
551 * them.
552 */
554 /* Rebuild the TX buffer freelist and the TX ring itself.
555 * NB. This reorders packets. We could keep more private state
556 * to avoid this but maybe it doesn't matter so much given the
557 * interface has been down.
558 */
559 for ( requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++ )
560 {
561 if ( (unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET )
562 {
563 struct sk_buff *skb = np->tx_skbs[i];
565 tx = &np->tx->ring[requeue_idx++].req;
567 tx->id = i;
568 tx->addr = virt_to_machine(skb->data);
569 tx->size = skb->len;
571 np->stats.tx_bytes += skb->len;
572 np->stats.tx_packets++;
573 }
574 }
575 wmb();
576 np->tx->req_prod = requeue_idx;
578 /* Rebuild the RX buffer freelist and the RX ring itself. */
579 for ( requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++ )
580 if ( (unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET )
581 np->rx->ring[requeue_idx++].req.id = i;
582 wmb();
583 np->rx->req_prod = requeue_idx;
585 /* Step 3: All public and private state should now be sane. Get
586 * ready to start sending and receiving packets and give the driver
587 * domain a kick because we've probably just requeued some
588 * packets.
589 */
590 np->backend_state = BEST_CONNECTED;
591 notify_via_evtchn(status->evtchn);
592 network_tx_buf_gc(dev);
594 if ( np->user_state == UST_OPEN )
595 netif_start_queue(dev);
597 spin_unlock(&np->tx_lock);
598 spin_unlock_irq(&np->rx_lock);
599 }
601 static void netif_status_change(netif_fe_interface_status_changed_t *status)
602 {
603 ctrl_msg_t cmsg;
604 netif_fe_interface_connect_t up;
605 struct net_device *dev;
606 struct net_private *np;
608 if ( netctrl.interface_n <= 0 )
609 {
610 printk(KERN_WARNING "Status change: no interfaces\n");
611 return;
612 }
614 dev = find_dev_by_handle(status->handle);
615 if(!dev){
616 printk(KERN_WARNING "Status change: invalid netif handle %u\n",
617 status->handle);
618 return;
619 }
620 np = dev->priv;
622 switch ( status->status )
623 {
624 case NETIF_INTERFACE_STATUS_DESTROYED:
625 printk(KERN_WARNING "Unexpected netif-DESTROYED message in state %d\n",
626 np->backend_state);
627 break;
629 case NETIF_INTERFACE_STATUS_DISCONNECTED:
630 if ( np->backend_state != BEST_CLOSED )
631 {
632 printk(KERN_WARNING "Unexpected netif-DISCONNECTED message"
633 " in state %d\n", np->backend_state);
634 printk(KERN_INFO "Attempting to reconnect network interface\n");
636 /* Begin interface recovery.
637 *
638 * NB. Whilst we're recovering, we turn the carrier state off. We
639 * take measures to ensure that this device isn't used for
640 * anything. We also stop the queue for this device. Various
641 * different approaches (e.g. continuing to buffer packets) have
642 * been tested but don't appear to improve the overall impact on
643 * TCP connections.
644 *
645 * TODO: (MAW) Change the Xend<->Guest protocol so that a recovery
646 * is initiated by a special "RESET" message - disconnect could
647 * just mean we're not allowed to use this interface any more.
648 */
650 /* Stop old i/f to prevent errors whilst we rebuild the state. */
651 spin_lock_irq(&np->tx_lock);
652 spin_lock(&np->rx_lock);
653 netif_stop_queue(dev);
654 np->backend_state = BEST_DISCONNECTED;
655 spin_unlock(&np->rx_lock);
656 spin_unlock_irq(&np->tx_lock);
658 /* Free resources. */
659 free_irq(np->irq, dev);
660 unbind_evtchn_from_irq(np->evtchn);
661 free_page((unsigned long)np->tx);
662 free_page((unsigned long)np->rx);
663 }
665 /* Move from CLOSED to DISCONNECTED state. */
666 np->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
667 np->rx = (netif_rx_interface_t *)__get_free_page(GFP_KERNEL);
668 memset(np->tx, 0, PAGE_SIZE);
669 memset(np->rx, 0, PAGE_SIZE);
670 np->backend_state = BEST_DISCONNECTED;
672 /* Construct an interface-CONNECT message for the domain controller. */
673 cmsg.type = CMSG_NETIF_FE;
674 cmsg.subtype = CMSG_NETIF_FE_INTERFACE_CONNECT;
675 cmsg.length = sizeof(netif_fe_interface_connect_t);
676 up.handle = status->handle;
677 up.tx_shmem_frame = virt_to_machine(np->tx) >> PAGE_SHIFT;
678 up.rx_shmem_frame = virt_to_machine(np->rx) >> PAGE_SHIFT;
679 memcpy(cmsg.msg, &up, sizeof(up));
681 /* Tell the controller to bring up the interface. */
682 ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
683 break;
685 case NETIF_INTERFACE_STATUS_CONNECTED:
686 if ( np->backend_state == BEST_CLOSED )
687 {
688 printk(KERN_WARNING "Unexpected netif-CONNECTED message"
689 " in state %d\n", np->backend_state);
690 break;
691 }
693 memcpy(dev->dev_addr, status->mac, ETH_ALEN);
695 network_connect(dev, status);
697 np->evtchn = status->evtchn;
698 np->irq = bind_evtchn_to_irq(np->evtchn);
699 (void)request_irq(np->irq, netif_int, SA_SAMPLE_RANDOM,
700 dev->name, dev);
702 netctrl_connected_count();
703 break;
705 default:
706 printk(KERN_WARNING "Status change to unknown value %d\n",
707 status->status);
708 break;
709 }
710 }
712 /** Create a network device.
713 * @param handle device handle
714 * @param val return parameter for created device
715 * @return 0 on success, error code otherwise
716 */
717 static int create_netdev(int handle, struct net_device **val)
718 {
719 int i, err = 0;
720 struct net_device *dev = NULL;
721 struct net_private *np = NULL;
723 if ( (dev = alloc_etherdev(sizeof(struct net_private))) == NULL )
724 {
725 printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__);
726 err = -ENOMEM;
727 goto exit;
728 }
730 np = dev->priv;
731 np->backend_state = BEST_CLOSED;
732 np->user_state = UST_CLOSED;
733 np->handle = handle;
735 spin_lock_init(&np->tx_lock);
736 spin_lock_init(&np->rx_lock);
738 /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
739 for ( i = 0; i <= NETIF_TX_RING_SIZE; i++ )
740 np->tx_skbs[i] = (void *)(i+1);
741 for ( i = 0; i <= NETIF_RX_RING_SIZE; i++ )
742 np->rx_skbs[i] = (void *)(i+1);
744 dev->open = network_open;
745 dev->hard_start_xmit = network_start_xmit;
746 dev->stop = network_close;
747 dev->get_stats = network_get_stats;
748 dev->poll = netif_poll;
749 dev->weight = 64;
751 if ( (err = register_netdev(dev)) != 0 )
752 {
753 printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err);
754 goto exit;
755 }
756 np->dev = dev;
757 list_add(&np->list, &dev_list);
759 exit:
760 if ( (err != 0) && (dev != NULL ) )
761 kfree(dev);
762 else if ( val != NULL )
763 *val = dev;
764 return err;
765 }
767 /*
768 * Initialize the network control interface. Set the number of network devices
769 * and create them.
770 */
771 static void netif_driver_status_change(
772 netif_fe_driver_status_changed_t *status)
773 {
774 int err = 0;
775 int i;
777 netctrl.interface_n = status->nr_interfaces;
778 netctrl.connected_n = 0;
780 for ( i = 0; i < netctrl.interface_n; i++ )
781 {
782 if ( (err = create_netdev(i, NULL)) != 0 )
783 {
784 netctrl_err(err);
785 break;
786 }
787 }
788 }
790 static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
791 {
792 int respond = 1;
794 switch ( msg->subtype )
795 {
796 case CMSG_NETIF_FE_INTERFACE_STATUS_CHANGED:
797 if ( msg->length != sizeof(netif_fe_interface_status_changed_t) )
798 goto error;
799 netif_status_change((netif_fe_interface_status_changed_t *)
800 &msg->msg[0]);
801 break;
803 case CMSG_NETIF_FE_DRIVER_STATUS_CHANGED:
804 if ( msg->length != sizeof(netif_fe_driver_status_changed_t) )
805 goto error;
806 netif_driver_status_change((netif_fe_driver_status_changed_t *)
807 &msg->msg[0]);
808 /* Message is a response */
809 respond = 0;
810 break;
812 error:
813 default:
814 msg->length = 0;
815 break;
816 }
818 if ( respond )
819 ctrl_if_send_response(msg);
820 }
823 static int __init netif_init(void)
824 {
825 ctrl_msg_t cmsg;
826 netif_fe_driver_status_changed_t st;
827 int err = 0, wait_i, wait_n = 20;
829 if ( (start_info.flags & SIF_INITDOMAIN) ||
830 (start_info.flags & SIF_NET_BE_DOMAIN) )
831 return 0;
833 printk("Initialising Xen virtual ethernet frontend driver");
835 INIT_LIST_HEAD(&dev_list);
837 netctrl_init();
839 (void)ctrl_if_register_receiver(CMSG_NETIF_FE, netif_ctrlif_rx,
840 CALLBACK_IN_BLOCKING_CONTEXT);
842 /* Send a driver-UP notification to the domain controller. */
843 cmsg.type = CMSG_NETIF_FE;
844 cmsg.subtype = CMSG_NETIF_FE_DRIVER_STATUS_CHANGED;
845 cmsg.length = sizeof(netif_fe_driver_status_changed_t);
846 st.status = NETIF_DRIVER_STATUS_UP;
847 st.nr_interfaces = 0;
848 memcpy(cmsg.msg, &st, sizeof(st));
849 ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
851 /* Wait for all interfaces to be connected. */
852 for ( wait_i = 0; ; wait_i++)
853 {
854 if ( (err = (wait_i < wait_n) ? netctrl_connected() : -ENETDOWN) != 0 )
855 {
856 err = (err > 0) ? 0 : err;
857 break;
858 }
859 set_current_state(TASK_INTERRUPTIBLE);
860 schedule_timeout(1);
861 }
863 if ( err )
864 ctrl_if_unregister_receiver(CMSG_NETIF_FE, netif_ctrlif_rx);
866 return err;
867 }
869 __initcall(netif_init);