ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c @ 10642:4b9876fe2f1f

[NET] back: Add GSO features field and check gso_size

This patch adds the as-yet unused GSO features which will contain
protocol-independent bits such as the ECN marker.

It also makes the backend check gso_size to ensure that it is non-zero.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
author kaf24@firebug.cl.cam.ac.uk
date Mon Jul 03 09:05:18 2006 +0100 (2006-07-03)
parents 5db7bbccf4d2
children addde2d2d97a
line source
1 /******************************************************************************
2 * Virtual network driver for conversing with remote driver backends.
3 *
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
30 */
32 #include <linux/config.h>
33 #include <linux/module.h>
34 #include <linux/version.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/string.h>
39 #include <linux/errno.h>
40 #include <linux/netdevice.h>
41 #include <linux/inetdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/skbuff.h>
44 #include <linux/init.h>
45 #include <linux/bitops.h>
46 #include <linux/ethtool.h>
47 #include <linux/in.h>
48 #include <linux/if_ether.h>
49 #include <net/sock.h>
50 #include <net/pkt_sched.h>
51 #include <net/arp.h>
52 #include <net/route.h>
53 #include <asm/io.h>
54 #include <asm/uaccess.h>
55 #include <xen/evtchn.h>
56 #include <xen/xenbus.h>
57 #include <xen/interface/io/netif.h>
58 #include <xen/interface/memory.h>
59 #include <xen/balloon.h>
60 #include <asm/page.h>
61 #include <asm/uaccess.h>
62 #include <xen/interface/grant_table.h>
63 #include <xen/gnttab.h>
65 #define GRANT_INVALID_REF 0
67 #define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE)
68 #define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE)
70 static inline void init_skb_shinfo(struct sk_buff *skb)
71 {
72 atomic_set(&(skb_shinfo(skb)->dataref), 1);
73 skb_shinfo(skb)->nr_frags = 0;
74 skb_shinfo(skb)->frag_list = NULL;
75 }
77 struct netfront_info {
78 struct list_head list;
79 struct net_device *netdev;
81 struct net_device_stats stats;
83 struct netif_tx_front_ring tx;
84 struct netif_rx_front_ring rx;
86 spinlock_t tx_lock;
87 spinlock_t rx_lock;
89 unsigned int handle;
90 unsigned int evtchn, irq;
92 /* Receive-ring batched refills. */
93 #define RX_MIN_TARGET 8
94 #define RX_DFL_MIN_TARGET 64
95 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
96 unsigned rx_min_target, rx_max_target, rx_target;
97 struct sk_buff_head rx_batch;
99 struct timer_list rx_refill_timer;
101 /*
102 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
103 * array is an index into a chain of free entries.
104 */
105 struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1];
106 struct sk_buff *rx_skbs[NET_RX_RING_SIZE+1];
108 #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
109 grant_ref_t gref_tx_head;
110 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
111 grant_ref_t gref_rx_head;
112 grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
114 struct xenbus_device *xbdev;
115 int tx_ring_ref;
116 int rx_ring_ref;
117 u8 mac[ETH_ALEN];
119 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
120 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
121 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
122 };
124 /*
125 * Access macros for acquiring freeing slots in {tx,rx}_skbs[].
126 */
128 static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id)
129 {
130 list[id] = list[0];
131 list[0] = (void *)(unsigned long)id;
132 }
134 static inline unsigned short get_id_from_freelist(struct sk_buff **list)
135 {
136 unsigned int id = (unsigned int)(unsigned long)list[0];
137 list[0] = list[id];
138 return id;
139 }
141 #define DPRINTK(fmt, args...) \
142 pr_debug("netfront (%s:%d) " fmt, \
143 __FUNCTION__, __LINE__, ##args)
144 #define IPRINTK(fmt, args...) \
145 printk(KERN_INFO "netfront: " fmt, ##args)
146 #define WPRINTK(fmt, args...) \
147 printk(KERN_WARNING "netfront: " fmt, ##args)
149 static int talk_to_backend(struct xenbus_device *, struct netfront_info *);
150 static int setup_device(struct xenbus_device *, struct netfront_info *);
151 static struct net_device *create_netdev(int, struct xenbus_device *);
153 static void netfront_closing(struct xenbus_device *);
155 static void end_access(int, void *);
156 static void netif_disconnect_backend(struct netfront_info *);
157 static void close_netdev(struct netfront_info *);
158 static void netif_free(struct netfront_info *);
160 static void network_connect(struct net_device *);
161 static void network_tx_buf_gc(struct net_device *);
162 static void network_alloc_rx_buffers(struct net_device *);
163 static int send_fake_arp(struct net_device *);
165 static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs);
167 #ifdef CONFIG_SYSFS
168 static int xennet_sysfs_addif(struct net_device *netdev);
169 static void xennet_sysfs_delif(struct net_device *netdev);
170 #else /* !CONFIG_SYSFS */
171 #define xennet_sysfs_addif(dev) (0)
172 #define xennet_sysfs_delif(dev) do { } while(0)
173 #endif
175 static inline int xennet_can_sg(struct net_device *dev)
176 {
177 return dev->features & NETIF_F_SG;
178 }
180 /**
181 * Entry point to this code when a new device is created. Allocate the basic
182 * structures and the ring buffers for communication with the backend, and
183 * inform the backend of the appropriate details for those. Switch to
184 * Connected state.
185 */
186 static int __devinit netfront_probe(struct xenbus_device *dev,
187 const struct xenbus_device_id *id)
188 {
189 int err;
190 struct net_device *netdev;
191 struct netfront_info *info;
192 unsigned int handle;
194 err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%u", &handle);
195 if (err != 1) {
196 xenbus_dev_fatal(dev, err, "reading handle");
197 return err;
198 }
200 netdev = create_netdev(handle, dev);
201 if (IS_ERR(netdev)) {
202 err = PTR_ERR(netdev);
203 xenbus_dev_fatal(dev, err, "creating netdev");
204 return err;
205 }
207 info = netdev_priv(netdev);
208 dev->dev.driver_data = info;
210 err = talk_to_backend(dev, info);
211 if (err) {
212 xennet_sysfs_delif(info->netdev);
213 unregister_netdev(netdev);
214 free_netdev(netdev);
215 dev->dev.driver_data = NULL;
216 return err;
217 }
219 return 0;
220 }
223 /**
224 * We are reconnecting to the backend, due to a suspend/resume, or a backend
225 * driver restart. We tear down our netif structure and recreate it, but
226 * leave the device-layer structures intact so that this is transparent to the
227 * rest of the kernel.
228 */
229 static int netfront_resume(struct xenbus_device *dev)
230 {
231 struct netfront_info *info = dev->dev.driver_data;
233 DPRINTK("%s\n", dev->nodename);
235 netif_disconnect_backend(info);
236 return talk_to_backend(dev, info);
237 }
239 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
240 {
241 char *s, *e, *macstr;
242 int i;
244 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
245 if (IS_ERR(macstr))
246 return PTR_ERR(macstr);
248 for (i = 0; i < ETH_ALEN; i++) {
249 mac[i] = simple_strtoul(s, &e, 16);
250 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
251 kfree(macstr);
252 return -ENOENT;
253 }
254 s = e+1;
255 }
257 kfree(macstr);
258 return 0;
259 }
261 /* Common code used when first setting up, and when resuming. */
262 static int talk_to_backend(struct xenbus_device *dev,
263 struct netfront_info *info)
264 {
265 const char *message;
266 struct xenbus_transaction xbt;
267 int err;
269 err = xen_net_read_mac(dev, info->mac);
270 if (err) {
271 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
272 goto out;
273 }
275 /* Create shared ring, alloc event channel. */
276 err = setup_device(dev, info);
277 if (err)
278 goto out;
280 again:
281 err = xenbus_transaction_start(&xbt);
282 if (err) {
283 xenbus_dev_fatal(dev, err, "starting transaction");
284 goto destroy_ring;
285 }
287 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u",
288 info->tx_ring_ref);
289 if (err) {
290 message = "writing tx ring-ref";
291 goto abort_transaction;
292 }
293 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u",
294 info->rx_ring_ref);
295 if (err) {
296 message = "writing rx ring-ref";
297 goto abort_transaction;
298 }
299 err = xenbus_printf(xbt, dev->nodename,
300 "event-channel", "%u", info->evtchn);
301 if (err) {
302 message = "writing event-channel";
303 goto abort_transaction;
304 }
306 err = xenbus_transaction_end(xbt, 0);
307 if (err) {
308 if (err == -EAGAIN)
309 goto again;
310 xenbus_dev_fatal(dev, err, "completing transaction");
311 goto destroy_ring;
312 }
314 return 0;
316 abort_transaction:
317 xenbus_transaction_end(xbt, 1);
318 xenbus_dev_fatal(dev, err, "%s", message);
319 destroy_ring:
320 netif_free(info);
321 out:
322 return err;
323 }
326 static int setup_device(struct xenbus_device *dev, struct netfront_info *info)
327 {
328 struct netif_tx_sring *txs;
329 struct netif_rx_sring *rxs;
330 int err;
331 struct net_device *netdev = info->netdev;
333 info->tx_ring_ref = GRANT_INVALID_REF;
334 info->rx_ring_ref = GRANT_INVALID_REF;
335 info->rx.sring = NULL;
336 info->tx.sring = NULL;
337 info->irq = 0;
339 txs = (struct netif_tx_sring *)get_zeroed_page(GFP_KERNEL);
340 if (!txs) {
341 err = -ENOMEM;
342 xenbus_dev_fatal(dev, err, "allocating tx ring page");
343 goto fail;
344 }
345 SHARED_RING_INIT(txs);
346 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
348 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
349 if (err < 0) {
350 free_page((unsigned long)txs);
351 goto fail;
352 }
353 info->tx_ring_ref = err;
355 rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_KERNEL);
356 if (!rxs) {
357 err = -ENOMEM;
358 xenbus_dev_fatal(dev, err, "allocating rx ring page");
359 goto fail;
360 }
361 SHARED_RING_INIT(rxs);
362 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
364 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
365 if (err < 0) {
366 free_page((unsigned long)rxs);
367 goto fail;
368 }
369 info->rx_ring_ref = err;
371 err = xenbus_alloc_evtchn(dev, &info->evtchn);
372 if (err)
373 goto fail;
375 memcpy(netdev->dev_addr, info->mac, ETH_ALEN);
376 err = bind_evtchn_to_irqhandler(info->evtchn, netif_int,
377 SA_SAMPLE_RANDOM, netdev->name, netdev);
378 if (err < 0)
379 goto fail;
380 info->irq = err;
381 return 0;
383 fail:
384 netif_free(info);
385 return err;
386 }
389 /**
390 * Callback received when the backend's state changes.
391 */
392 static void backend_changed(struct xenbus_device *dev,
393 enum xenbus_state backend_state)
394 {
395 struct netfront_info *np = dev->dev.driver_data;
396 struct net_device *netdev = np->netdev;
398 DPRINTK("\n");
400 switch (backend_state) {
401 case XenbusStateInitialising:
402 case XenbusStateInitialised:
403 case XenbusStateConnected:
404 case XenbusStateUnknown:
405 case XenbusStateClosed:
406 break;
408 case XenbusStateInitWait:
409 network_connect(netdev);
410 xenbus_switch_state(dev, XenbusStateConnected);
411 (void)send_fake_arp(netdev);
412 break;
414 case XenbusStateClosing:
415 netfront_closing(dev);
416 break;
417 }
418 }
421 /** Send a packet on a net device to encourage switches to learn the
422 * MAC. We send a fake ARP request.
423 *
424 * @param dev device
425 * @return 0 on success, error code otherwise
426 */
427 static int send_fake_arp(struct net_device *dev)
428 {
429 struct sk_buff *skb;
430 u32 src_ip, dst_ip;
432 dst_ip = INADDR_BROADCAST;
433 src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
435 /* No IP? Then nothing to do. */
436 if (src_ip == 0)
437 return 0;
439 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
440 dst_ip, dev, src_ip,
441 /*dst_hw*/ NULL, /*src_hw*/ NULL,
442 /*target_hw*/ dev->dev_addr);
443 if (skb == NULL)
444 return -ENOMEM;
446 return dev_queue_xmit(skb);
447 }
450 static int network_open(struct net_device *dev)
451 {
452 struct netfront_info *np = netdev_priv(dev);
454 memset(&np->stats, 0, sizeof(np->stats));
456 network_alloc_rx_buffers(dev);
457 np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
459 netif_start_queue(dev);
461 return 0;
462 }
464 static inline int netfront_tx_slot_available(struct netfront_info *np)
465 {
466 return RING_FREE_REQUESTS(&np->tx) >= MAX_SKB_FRAGS + 2;
467 }
469 static inline void network_maybe_wake_tx(struct net_device *dev)
470 {
471 struct netfront_info *np = netdev_priv(dev);
473 if (unlikely(netif_queue_stopped(dev)) &&
474 netfront_tx_slot_available(np) &&
475 likely(netif_running(dev)))
476 netif_wake_queue(dev);
477 }
479 static void network_tx_buf_gc(struct net_device *dev)
480 {
481 RING_IDX cons, prod;
482 unsigned short id;
483 struct netfront_info *np = netdev_priv(dev);
484 struct sk_buff *skb;
486 if (unlikely(!netif_carrier_ok(dev)))
487 return;
489 do {
490 prod = np->tx.sring->rsp_prod;
491 rmb(); /* Ensure we see responses up to 'rp'. */
493 for (cons = np->tx.rsp_cons; cons != prod; cons++) {
494 struct netif_tx_response *txrsp;
496 txrsp = RING_GET_RESPONSE(&np->tx, cons);
497 if (txrsp->status == NETIF_RSP_NULL)
498 continue;
500 id = txrsp->id;
501 skb = np->tx_skbs[id];
502 if (unlikely(gnttab_query_foreign_access(
503 np->grant_tx_ref[id]) != 0)) {
504 printk(KERN_ALERT "network_tx_buf_gc: warning "
505 "-- grant still in use by backend "
506 "domain.\n");
507 BUG();
508 }
509 gnttab_end_foreign_access_ref(
510 np->grant_tx_ref[id], GNTMAP_readonly);
511 gnttab_release_grant_reference(
512 &np->gref_tx_head, np->grant_tx_ref[id]);
513 np->grant_tx_ref[id] = GRANT_INVALID_REF;
514 add_id_to_freelist(np->tx_skbs, id);
515 dev_kfree_skb_irq(skb);
516 }
518 np->tx.rsp_cons = prod;
520 /*
521 * Set a new event, then check for race with update of tx_cons.
522 * Note that it is essential to schedule a callback, no matter
523 * how few buffers are pending. Even if there is space in the
524 * transmit ring, higher layers may be blocked because too much
525 * data is outstanding: in such cases notification from Xen is
526 * likely to be the only kick that we'll get.
527 */
528 np->tx.sring->rsp_event =
529 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
530 mb();
531 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
533 network_maybe_wake_tx(dev);
534 }
537 static void rx_refill_timeout(unsigned long data)
538 {
539 struct net_device *dev = (struct net_device *)data;
540 netif_rx_schedule(dev);
541 }
544 static void network_alloc_rx_buffers(struct net_device *dev)
545 {
546 unsigned short id;
547 struct netfront_info *np = netdev_priv(dev);
548 struct sk_buff *skb;
549 int i, batch_target;
550 RING_IDX req_prod = np->rx.req_prod_pvt;
551 struct xen_memory_reservation reservation;
552 grant_ref_t ref;
554 if (unlikely(!netif_carrier_ok(dev)))
555 return;
557 /*
558 * Allocate skbuffs greedily, even though we batch updates to the
559 * receive ring. This creates a less bursty demand on the memory
560 * allocator, so should reduce the chance of failed allocation requests
561 * both for ourself and for other kernel subsystems.
562 */
563 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
564 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
565 /*
566 * Subtract dev_alloc_skb headroom (16 bytes) and shared info
567 * tailroom then round down to SKB_DATA_ALIGN boundary.
568 */
569 skb = __dev_alloc_skb(
570 ((PAGE_SIZE - sizeof(struct skb_shared_info)) &
571 (-SKB_DATA_ALIGN(1))) - 16,
572 GFP_ATOMIC|__GFP_NOWARN);
573 if (skb == NULL) {
574 /* Any skbuffs queued for refill? Force them out. */
575 if (i != 0)
576 goto refill;
577 /* Could not allocate any skbuffs. Try again later. */
578 mod_timer(&np->rx_refill_timer,
579 jiffies + (HZ/10));
580 return;
581 }
582 __skb_queue_tail(&np->rx_batch, skb);
583 }
585 /* Is the batch large enough to be worthwhile? */
586 if (i < (np->rx_target/2))
587 return;
589 /* Adjust our fill target if we risked running out of buffers. */
590 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
591 ((np->rx_target *= 2) > np->rx_max_target))
592 np->rx_target = np->rx_max_target;
594 refill:
595 for (i = 0; ; i++) {
596 if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
597 break;
599 skb->dev = dev;
601 id = get_id_from_freelist(np->rx_skbs);
603 np->rx_skbs[id] = skb;
605 RING_GET_REQUEST(&np->rx, req_prod + i)->id = id;
606 ref = gnttab_claim_grant_reference(&np->gref_rx_head);
607 BUG_ON((signed short)ref < 0);
608 np->grant_rx_ref[id] = ref;
609 gnttab_grant_foreign_transfer_ref(ref,
610 np->xbdev->otherend_id,
611 __pa(skb->head)>>PAGE_SHIFT);
612 RING_GET_REQUEST(&np->rx, req_prod + i)->gref = ref;
613 np->rx_pfn_array[i] = virt_to_mfn(skb->head);
615 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
616 /* Remove this page before passing back to Xen. */
617 set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
618 INVALID_P2M_ENTRY);
619 MULTI_update_va_mapping(np->rx_mcl+i,
620 (unsigned long)skb->head,
621 __pte(0), 0);
622 }
623 }
625 /* Tell the ballon driver what is going on. */
626 balloon_update_driver_allowance(i);
628 set_xen_guest_handle(reservation.extent_start, np->rx_pfn_array);
629 reservation.nr_extents = i;
630 reservation.extent_order = 0;
631 reservation.address_bits = 0;
632 reservation.domid = DOMID_SELF;
634 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
635 /* After all PTEs have been zapped, flush the TLB. */
636 np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
637 UVMF_TLB_FLUSH|UVMF_ALL;
639 /* Give away a batch of pages. */
640 np->rx_mcl[i].op = __HYPERVISOR_memory_op;
641 np->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
642 np->rx_mcl[i].args[1] = (unsigned long)&reservation;
644 /* Zap PTEs and give away pages in one big multicall. */
645 (void)HYPERVISOR_multicall(np->rx_mcl, i+1);
647 /* Check return status of HYPERVISOR_memory_op(). */
648 if (unlikely(np->rx_mcl[i].result != i))
649 panic("Unable to reduce memory reservation\n");
650 } else
651 if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
652 &reservation) != i)
653 panic("Unable to reduce memory reservation\n");
655 /* Above is a suitable barrier to ensure backend will see requests. */
656 np->rx.req_prod_pvt = req_prod + i;
657 RING_PUSH_REQUESTS(&np->rx);
658 }
660 static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
661 struct netif_tx_request *tx)
662 {
663 struct netfront_info *np = netdev_priv(dev);
664 char *data = skb->data;
665 unsigned long mfn;
666 RING_IDX prod = np->tx.req_prod_pvt;
667 int frags = skb_shinfo(skb)->nr_frags;
668 unsigned int offset = offset_in_page(data);
669 unsigned int len = skb_headlen(skb);
670 unsigned int id;
671 grant_ref_t ref;
672 int i;
674 while (len > PAGE_SIZE - offset) {
675 tx->size = PAGE_SIZE - offset;
676 tx->flags |= NETTXF_more_data;
677 len -= tx->size;
678 data += tx->size;
679 offset = 0;
681 id = get_id_from_freelist(np->tx_skbs);
682 np->tx_skbs[id] = skb_get(skb);
683 tx = RING_GET_REQUEST(&np->tx, prod++);
684 tx->id = id;
685 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
686 BUG_ON((signed short)ref < 0);
688 mfn = virt_to_mfn(data);
689 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
690 mfn, GNTMAP_readonly);
692 tx->gref = np->grant_tx_ref[id] = ref;
693 tx->offset = offset;
694 tx->size = len;
695 tx->flags = 0;
696 }
698 for (i = 0; i < frags; i++) {
699 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
701 tx->flags |= NETTXF_more_data;
703 id = get_id_from_freelist(np->tx_skbs);
704 np->tx_skbs[id] = skb_get(skb);
705 tx = RING_GET_REQUEST(&np->tx, prod++);
706 tx->id = id;
707 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
708 BUG_ON((signed short)ref < 0);
710 mfn = pfn_to_mfn(page_to_pfn(frag->page));
711 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
712 mfn, GNTMAP_readonly);
714 tx->gref = np->grant_tx_ref[id] = ref;
715 tx->offset = frag->page_offset;
716 tx->size = frag->size;
717 tx->flags = 0;
718 }
720 np->tx.req_prod_pvt = prod;
721 }
723 static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
724 {
725 unsigned short id;
726 struct netfront_info *np = netdev_priv(dev);
727 struct netif_tx_request *tx;
728 struct netif_extra_info *extra;
729 char *data = skb->data;
730 RING_IDX i;
731 grant_ref_t ref;
732 unsigned long mfn;
733 int notify;
734 int frags = skb_shinfo(skb)->nr_frags;
735 unsigned int offset = offset_in_page(data);
736 unsigned int len = skb_headlen(skb);
738 frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE;
739 if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
740 printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
741 frags);
742 dump_stack();
743 goto drop;
744 }
746 spin_lock_irq(&np->tx_lock);
748 if (unlikely(!netif_carrier_ok(dev) ||
749 (frags > 1 && !xennet_can_sg(dev)) ||
750 netif_needs_gso(dev, skb))) {
751 spin_unlock_irq(&np->tx_lock);
752 goto drop;
753 }
755 i = np->tx.req_prod_pvt;
757 id = get_id_from_freelist(np->tx_skbs);
758 np->tx_skbs[id] = skb;
760 tx = RING_GET_REQUEST(&np->tx, i);
762 tx->id = id;
763 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
764 BUG_ON((signed short)ref < 0);
765 mfn = virt_to_mfn(data);
766 gnttab_grant_foreign_access_ref(
767 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
768 tx->gref = np->grant_tx_ref[id] = ref;
769 tx->offset = offset;
770 tx->size = len;
772 tx->flags = 0;
773 extra = NULL;
775 if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
776 tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
777 if (skb->proto_data_valid) /* remote but checksummed? */
778 tx->flags |= NETTXF_data_validated;
780 if (skb_shinfo(skb)->gso_size) {
781 struct netif_extra_info *gso = (struct netif_extra_info *)
782 RING_GET_REQUEST(&np->tx, ++i);
784 if (extra)
785 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
786 else
787 tx->flags |= NETTXF_extra_info;
789 gso->u.gso.size = skb_shinfo(skb)->gso_size;
790 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
792 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
793 gso->flags = 0;
794 extra = gso;
795 }
797 np->tx.req_prod_pvt = i + 1;
799 xennet_make_frags(skb, dev, tx);
800 tx->size = skb->len;
802 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
803 if (notify)
804 notify_remote_via_irq(np->irq);
806 network_tx_buf_gc(dev);
808 if (!netfront_tx_slot_available(np))
809 netif_stop_queue(dev);
811 spin_unlock_irq(&np->tx_lock);
813 np->stats.tx_bytes += skb->len;
814 np->stats.tx_packets++;
816 return 0;
818 drop:
819 np->stats.tx_dropped++;
820 dev_kfree_skb(skb);
821 return 0;
822 }
824 static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
825 {
826 struct net_device *dev = dev_id;
827 struct netfront_info *np = netdev_priv(dev);
828 unsigned long flags;
830 spin_lock_irqsave(&np->tx_lock, flags);
831 network_tx_buf_gc(dev);
832 spin_unlock_irqrestore(&np->tx_lock, flags);
834 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx) &&
835 likely(netif_running(dev)))
836 netif_rx_schedule(dev);
838 return IRQ_HANDLED;
839 }
842 static int netif_poll(struct net_device *dev, int *pbudget)
843 {
844 struct netfront_info *np = netdev_priv(dev);
845 struct sk_buff *skb, *nskb;
846 struct netif_rx_response *rx;
847 RING_IDX i, rp;
848 struct mmu_update *mmu = np->rx_mmu;
849 struct multicall_entry *mcl = np->rx_mcl;
850 int work_done, budget, more_to_do = 1;
851 struct sk_buff_head rxq;
852 unsigned long flags;
853 unsigned long mfn;
854 grant_ref_t ref;
856 spin_lock(&np->rx_lock);
858 if (unlikely(!netif_carrier_ok(dev))) {
859 spin_unlock(&np->rx_lock);
860 return 0;
861 }
863 skb_queue_head_init(&rxq);
865 if ((budget = *pbudget) > dev->quota)
866 budget = dev->quota;
867 rp = np->rx.sring->rsp_prod;
868 rmb(); /* Ensure we see queued responses up to 'rp'. */
870 for (i = np->rx.rsp_cons, work_done = 0;
871 (i != rp) && (work_done < budget);
872 i++, work_done++) {
873 rx = RING_GET_RESPONSE(&np->rx, i);
875 /*
876 * This definitely indicates a bug, either in this driver or in
877 * the backend driver. In future this should flag the bad
878 * situation to the system controller to reboot the backed.
879 */
880 if ((ref = np->grant_rx_ref[rx->id]) == GRANT_INVALID_REF) {
881 WPRINTK("Bad rx response id %d.\n", rx->id);
882 work_done--;
883 continue;
884 }
886 /* Memory pressure, insufficient buffer headroom, ... */
887 if ((mfn = gnttab_end_foreign_transfer_ref(ref)) == 0) {
888 if (net_ratelimit())
889 WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n",
890 rx->id, rx->status);
891 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id =
892 rx->id;
893 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref =
894 ref;
895 np->rx.req_prod_pvt++;
896 RING_PUSH_REQUESTS(&np->rx);
897 work_done--;
898 continue;
899 }
901 gnttab_release_grant_reference(&np->gref_rx_head, ref);
902 np->grant_rx_ref[rx->id] = GRANT_INVALID_REF;
904 skb = np->rx_skbs[rx->id];
905 add_id_to_freelist(np->rx_skbs, rx->id);
907 /* NB. We handle skb overflow later. */
908 skb->data = skb->head + rx->offset;
909 skb->len = rx->status;
910 skb->tail = skb->data + skb->len;
912 /*
913 * Old backends do not assert data_validated but we
914 * can infer it from csum_blank so test both flags.
915 */
916 if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank)) {
917 skb->ip_summed = CHECKSUM_UNNECESSARY;
918 skb->proto_data_valid = 1;
919 } else {
920 skb->ip_summed = CHECKSUM_NONE;
921 skb->proto_data_valid = 0;
922 }
923 skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank);
925 np->stats.rx_packets++;
926 np->stats.rx_bytes += rx->status;
928 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
929 /* Remap the page. */
930 MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
931 pfn_pte_ma(mfn, PAGE_KERNEL),
932 0);
933 mcl++;
934 mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
935 | MMU_MACHPHYS_UPDATE;
936 mmu->val = __pa(skb->head) >> PAGE_SHIFT;
937 mmu++;
939 set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
940 mfn);
941 }
943 __skb_queue_tail(&rxq, skb);
944 }
946 /* Some pages are no longer absent... */
947 balloon_update_driver_allowance(-work_done);
949 /* Do all the remapping work, and M2P updates, in one big hypercall. */
950 if (likely((mcl - np->rx_mcl) != 0)) {
951 mcl->op = __HYPERVISOR_mmu_update;
952 mcl->args[0] = (unsigned long)np->rx_mmu;
953 mcl->args[1] = mmu - np->rx_mmu;
954 mcl->args[2] = 0;
955 mcl->args[3] = DOMID_SELF;
956 mcl++;
957 (void)HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
958 }
960 while ((skb = __skb_dequeue(&rxq)) != NULL) {
961 if (skb->len > (dev->mtu + ETH_HLEN + 4)) {
962 if (net_ratelimit())
963 printk(KERN_INFO "Received packet too big for "
964 "MTU (%d > %d)\n",
965 skb->len - ETH_HLEN - 4, dev->mtu);
966 skb->len = 0;
967 skb->tail = skb->data;
968 init_skb_shinfo(skb);
969 dev_kfree_skb(skb);
970 continue;
971 }
973 /*
974 * Enough room in skbuff for the data we were passed? Also,
975 * Linux expects at least 16 bytes headroom in each rx buffer.
976 */
977 if (unlikely(skb->tail > skb->end) ||
978 unlikely((skb->data - skb->head) < 16)) {
979 if (net_ratelimit()) {
980 if (skb->tail > skb->end)
981 printk(KERN_INFO "Received packet "
982 "is %zd bytes beyond tail.\n",
983 skb->tail - skb->end);
984 else
985 printk(KERN_INFO "Received packet "
986 "is %zd bytes before head.\n",
987 16 - (skb->data - skb->head));
988 }
990 nskb = __dev_alloc_skb(skb->len + 2,
991 GFP_ATOMIC|__GFP_NOWARN);
992 if (nskb != NULL) {
993 skb_reserve(nskb, 2);
994 skb_put(nskb, skb->len);
995 memcpy(nskb->data, skb->data, skb->len);
996 /* Copy any other fields we already set up. */
997 nskb->dev = skb->dev;
998 nskb->ip_summed = skb->ip_summed;
999 nskb->proto_data_valid = skb->proto_data_valid;
1000 nskb->proto_csum_blank = skb->proto_csum_blank;
1003 /* Reinitialise and then destroy the old skbuff. */
1004 skb->len = 0;
1005 skb->tail = skb->data;
1006 init_skb_shinfo(skb);
1007 dev_kfree_skb(skb);
1009 /* Switch old for new, if we copied the buffer. */
1010 if ((skb = nskb) == NULL)
1011 continue;
1014 /* Set the shinfo area, which is hidden behind the data. */
1015 init_skb_shinfo(skb);
1016 /* Ethernet work: Delayed to here as it peeks the header. */
1017 skb->protocol = eth_type_trans(skb, dev);
1019 /* Pass it up. */
1020 netif_receive_skb(skb);
1021 dev->last_rx = jiffies;
1024 np->rx.rsp_cons = i;
1026 /* If we get a callback with very few responses, reduce fill target. */
1027 /* NB. Note exponential increase, linear decrease. */
1028 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1029 ((3*np->rx_target) / 4)) &&
1030 (--np->rx_target < np->rx_min_target))
1031 np->rx_target = np->rx_min_target;
1033 network_alloc_rx_buffers(dev);
1035 *pbudget -= work_done;
1036 dev->quota -= work_done;
1038 if (work_done < budget) {
1039 local_irq_save(flags);
1041 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
1042 if (!more_to_do)
1043 __netif_rx_complete(dev);
1045 local_irq_restore(flags);
1048 spin_unlock(&np->rx_lock);
1050 return more_to_do;
1054 static int network_close(struct net_device *dev)
1056 struct netfront_info *np = netdev_priv(dev);
1057 netif_stop_queue(np->netdev);
1058 return 0;
1062 static struct net_device_stats *network_get_stats(struct net_device *dev)
1064 struct netfront_info *np = netdev_priv(dev);
1065 return &np->stats;
1068 static int xennet_change_mtu(struct net_device *dev, int mtu)
1070 int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
1072 if (mtu > max)
1073 return -EINVAL;
1074 dev->mtu = mtu;
1075 return 0;
1078 static int xennet_set_sg(struct net_device *dev, u32 data)
1080 if (data) {
1081 struct netfront_info *np = netdev_priv(dev);
1082 int val;
1084 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1085 "%d", &val) < 0)
1086 val = 0;
1087 if (!val)
1088 return -ENOSYS;
1089 } else if (dev->mtu > ETH_DATA_LEN)
1090 dev->mtu = ETH_DATA_LEN;
1092 return ethtool_op_set_sg(dev, data);
1095 static int xennet_set_tso(struct net_device *dev, u32 data)
1097 if (data) {
1098 struct netfront_info *np = netdev_priv(dev);
1099 int val;
1101 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-tso",
1102 "%d", &val) < 0)
1103 val = 0;
1104 #if 0 /* KAF: After the protocol is finalised. */
1105 if (!val)
1106 #endif
1107 return -ENOSYS;
1110 return ethtool_op_set_tso(dev, data);
1113 static void xennet_set_features(struct net_device *dev)
1115 if (!xennet_set_sg(dev, 1))
1116 xennet_set_tso(dev, 1);
1119 static void network_connect(struct net_device *dev)
1121 struct netfront_info *np = netdev_priv(dev);
1122 int i, requeue_idx;
1123 struct sk_buff *skb;
1125 xennet_set_features(dev);
1127 spin_lock_irq(&np->tx_lock);
1128 spin_lock(&np->rx_lock);
1130 /*
1131 * Recovery procedure:
1132 * NB. Freelist index entries are always going to be less than
1133 * PAGE_OFFSET, whereas pointers to skbs will always be equal or
1134 * greater than PAGE_OFFSET: we use this property to distinguish
1135 * them.
1136 */
1138 /* Step 1: Discard all pending TX packet fragments. */
1139 for (requeue_idx = 0, i = 1; i <= NET_TX_RING_SIZE; i++) {
1140 if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET)
1141 continue;
1143 skb = np->tx_skbs[i];
1144 gnttab_end_foreign_access_ref(
1145 np->grant_tx_ref[i], GNTMAP_readonly);
1146 gnttab_release_grant_reference(
1147 &np->gref_tx_head, np->grant_tx_ref[i]);
1148 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1149 add_id_to_freelist(np->tx_skbs, i);
1150 dev_kfree_skb_irq(skb);
1153 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1154 for (requeue_idx = 0, i = 1; i <= NET_RX_RING_SIZE; i++) {
1155 if ((unsigned long)np->rx_skbs[i] < PAGE_OFFSET)
1156 continue;
1157 gnttab_grant_foreign_transfer_ref(
1158 np->grant_rx_ref[i], np->xbdev->otherend_id,
1159 __pa(np->rx_skbs[i]->data) >> PAGE_SHIFT);
1160 RING_GET_REQUEST(&np->rx, requeue_idx)->gref =
1161 np->grant_rx_ref[i];
1162 RING_GET_REQUEST(&np->rx, requeue_idx)->id = i;
1163 requeue_idx++;
1166 np->rx.req_prod_pvt = requeue_idx;
1167 RING_PUSH_REQUESTS(&np->rx);
1169 /*
1170 * Step 3: All public and private state should now be sane. Get
1171 * ready to start sending and receiving packets and give the driver
1172 * domain a kick because we've probably just requeued some
1173 * packets.
1174 */
1175 netif_carrier_on(dev);
1176 notify_remote_via_irq(np->irq);
1177 network_tx_buf_gc(dev);
1178 network_alloc_rx_buffers(dev);
1180 spin_unlock(&np->rx_lock);
1181 spin_unlock_irq(&np->tx_lock);
1184 static void netif_uninit(struct net_device *dev)
1186 struct netfront_info *np = netdev_priv(dev);
1187 gnttab_free_grant_references(np->gref_tx_head);
1188 gnttab_free_grant_references(np->gref_rx_head);
1191 static struct ethtool_ops network_ethtool_ops =
1193 .get_tx_csum = ethtool_op_get_tx_csum,
1194 .set_tx_csum = ethtool_op_set_tx_csum,
1195 .get_sg = ethtool_op_get_sg,
1196 .set_sg = xennet_set_sg,
1197 .get_tso = ethtool_op_get_tso,
1198 .set_tso = xennet_set_tso,
1199 };
1201 #ifdef CONFIG_SYSFS
1202 static ssize_t show_rxbuf_min(struct class_device *cd, char *buf)
1204 struct net_device *netdev = container_of(cd, struct net_device,
1205 class_dev);
1206 struct netfront_info *info = netdev_priv(netdev);
1208 return sprintf(buf, "%u\n", info->rx_min_target);
1211 static ssize_t store_rxbuf_min(struct class_device *cd,
1212 const char *buf, size_t len)
1214 struct net_device *netdev = container_of(cd, struct net_device,
1215 class_dev);
1216 struct netfront_info *np = netdev_priv(netdev);
1217 char *endp;
1218 unsigned long target;
1220 if (!capable(CAP_NET_ADMIN))
1221 return -EPERM;
1223 target = simple_strtoul(buf, &endp, 0);
1224 if (endp == buf)
1225 return -EBADMSG;
1227 if (target < RX_MIN_TARGET)
1228 target = RX_MIN_TARGET;
1229 if (target > RX_MAX_TARGET)
1230 target = RX_MAX_TARGET;
1232 spin_lock(&np->rx_lock);
1233 if (target > np->rx_max_target)
1234 np->rx_max_target = target;
1235 np->rx_min_target = target;
1236 if (target > np->rx_target)
1237 np->rx_target = target;
1239 network_alloc_rx_buffers(netdev);
1241 spin_unlock(&np->rx_lock);
1242 return len;
1245 static ssize_t show_rxbuf_max(struct class_device *cd, char *buf)
1247 struct net_device *netdev = container_of(cd, struct net_device,
1248 class_dev);
1249 struct netfront_info *info = netdev_priv(netdev);
1251 return sprintf(buf, "%u\n", info->rx_max_target);
1254 static ssize_t store_rxbuf_max(struct class_device *cd,
1255 const char *buf, size_t len)
1257 struct net_device *netdev = container_of(cd, struct net_device,
1258 class_dev);
1259 struct netfront_info *np = netdev_priv(netdev);
1260 char *endp;
1261 unsigned long target;
1263 if (!capable(CAP_NET_ADMIN))
1264 return -EPERM;
1266 target = simple_strtoul(buf, &endp, 0);
1267 if (endp == buf)
1268 return -EBADMSG;
1270 if (target < RX_MIN_TARGET)
1271 target = RX_MIN_TARGET;
1272 if (target > RX_MAX_TARGET)
1273 target = RX_MAX_TARGET;
1275 spin_lock(&np->rx_lock);
1276 if (target < np->rx_min_target)
1277 np->rx_min_target = target;
1278 np->rx_max_target = target;
1279 if (target < np->rx_target)
1280 np->rx_target = target;
1282 network_alloc_rx_buffers(netdev);
1284 spin_unlock(&np->rx_lock);
1285 return len;
1288 static ssize_t show_rxbuf_cur(struct class_device *cd, char *buf)
1290 struct net_device *netdev = container_of(cd, struct net_device,
1291 class_dev);
1292 struct netfront_info *info = netdev_priv(netdev);
1294 return sprintf(buf, "%u\n", info->rx_target);
1297 static const struct class_device_attribute xennet_attrs[] = {
1298 __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
1299 __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
1300 __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
1301 };
1303 static int xennet_sysfs_addif(struct net_device *netdev)
1305 int i;
1306 int error = 0;
1308 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
1309 error = class_device_create_file(&netdev->class_dev,
1310 &xennet_attrs[i]);
1311 if (error)
1312 goto fail;
1314 return 0;
1316 fail:
1317 while (--i >= 0)
1318 class_device_remove_file(&netdev->class_dev,
1319 &xennet_attrs[i]);
1320 return error;
1323 static void xennet_sysfs_delif(struct net_device *netdev)
1325 int i;
1327 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
1328 class_device_remove_file(&netdev->class_dev,
1329 &xennet_attrs[i]);
1333 #endif /* CONFIG_SYSFS */
1336 /*
1337 * Nothing to do here. Virtual interface is point-to-point and the
1338 * physical interface is probably promiscuous anyway.
1339 */
1340 static void network_set_multicast_list(struct net_device *dev)
1344 /** Create a network device.
1345 * @param handle device handle
1346 * @param val return parameter for created device
1347 * @return 0 on success, error code otherwise
1348 */
1349 static struct net_device * __devinit create_netdev(int handle,
1350 struct xenbus_device *dev)
1352 int i, err = 0;
1353 struct net_device *netdev = NULL;
1354 struct netfront_info *np = NULL;
1356 netdev = alloc_etherdev(sizeof(struct netfront_info));
1357 if (!netdev) {
1358 printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
1359 __FUNCTION__);
1360 return ERR_PTR(-ENOMEM);
1363 np = netdev_priv(netdev);
1364 np->handle = handle;
1365 np->xbdev = dev;
1367 netif_carrier_off(netdev);
1369 spin_lock_init(&np->tx_lock);
1370 spin_lock_init(&np->rx_lock);
1372 skb_queue_head_init(&np->rx_batch);
1373 np->rx_target = RX_DFL_MIN_TARGET;
1374 np->rx_min_target = RX_DFL_MIN_TARGET;
1375 np->rx_max_target = RX_MAX_TARGET;
1377 init_timer(&np->rx_refill_timer);
1378 np->rx_refill_timer.data = (unsigned long)netdev;
1379 np->rx_refill_timer.function = rx_refill_timeout;
1381 /* Initialise {tx,rx}_skbs as a free chain containing every entry. */
1382 for (i = 0; i <= NET_TX_RING_SIZE; i++) {
1383 np->tx_skbs[i] = (void *)((unsigned long) i+1);
1384 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1387 for (i = 0; i <= NET_RX_RING_SIZE; i++) {
1388 np->rx_skbs[i] = (void *)((unsigned long) i+1);
1389 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1392 /* A grant for every tx ring slot */
1393 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1394 &np->gref_tx_head) < 0) {
1395 printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
1396 err = -ENOMEM;
1397 goto exit;
1399 /* A grant for every rx ring slot */
1400 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1401 &np->gref_rx_head) < 0) {
1402 printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
1403 err = -ENOMEM;
1404 goto exit_free_tx;
1407 netdev->open = network_open;
1408 netdev->hard_start_xmit = network_start_xmit;
1409 netdev->stop = network_close;
1410 netdev->get_stats = network_get_stats;
1411 netdev->poll = netif_poll;
1412 netdev->set_multicast_list = network_set_multicast_list;
1413 netdev->uninit = netif_uninit;
1414 netdev->change_mtu = xennet_change_mtu;
1415 netdev->weight = 64;
1416 netdev->features = NETIF_F_IP_CSUM;
1418 SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
1419 SET_MODULE_OWNER(netdev);
1420 SET_NETDEV_DEV(netdev, &dev->dev);
1422 err = register_netdev(netdev);
1423 if (err) {
1424 printk(KERN_WARNING "%s> register_netdev err=%d\n",
1425 __FUNCTION__, err);
1426 goto exit_free_rx;
1429 err = xennet_sysfs_addif(netdev);
1430 if (err) {
1431 /* This can be non-fatal: it only means no tuning parameters */
1432 printk(KERN_WARNING "%s> add sysfs failed err=%d\n",
1433 __FUNCTION__, err);
1436 np->netdev = netdev;
1438 return netdev;
1441 exit_free_rx:
1442 gnttab_free_grant_references(np->gref_rx_head);
1443 exit_free_tx:
1444 gnttab_free_grant_references(np->gref_tx_head);
1445 exit:
1446 free_netdev(netdev);
1447 return ERR_PTR(err);
1450 /*
1451 * We use this notifier to send out a fake ARP reply to reset switches and
1452 * router ARP caches when an IP interface is brought up on a VIF.
1453 */
1454 static int
1455 inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
1457 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
1458 struct net_device *dev = ifa->ifa_dev->dev;
1460 /* UP event and is it one of our devices? */
1461 if (event == NETDEV_UP && dev->open == network_open)
1462 (void)send_fake_arp(dev);
1464 return NOTIFY_DONE;
1468 /* ** Close down ** */
1471 /**
1472 * Handle the change of state of the backend to Closing. We must delete our
1473 * device-layer structures now, to ensure that writes are flushed through to
1474 * the backend. Once is this done, we can switch to Closed in
1475 * acknowledgement.
1476 */
1477 static void netfront_closing(struct xenbus_device *dev)
1479 struct netfront_info *info = dev->dev.driver_data;
1481 DPRINTK("netfront_closing: %s removed\n", dev->nodename);
1483 close_netdev(info);
1485 xenbus_switch_state(dev, XenbusStateClosed);
1489 static int __devexit netfront_remove(struct xenbus_device *dev)
1491 struct netfront_info *info = dev->dev.driver_data;
1493 DPRINTK("%s\n", dev->nodename);
1495 netif_disconnect_backend(info);
1496 free_netdev(info->netdev);
1498 return 0;
1502 static void close_netdev(struct netfront_info *info)
1504 del_timer_sync(&info->rx_refill_timer);
1506 xennet_sysfs_delif(info->netdev);
1507 unregister_netdev(info->netdev);
1511 static void netif_disconnect_backend(struct netfront_info *info)
1513 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1514 spin_lock_irq(&info->tx_lock);
1515 spin_lock(&info->rx_lock);
1516 netif_carrier_off(info->netdev);
1517 spin_unlock(&info->rx_lock);
1518 spin_unlock_irq(&info->tx_lock);
1520 if (info->irq)
1521 unbind_from_irqhandler(info->irq, info->netdev);
1522 info->evtchn = info->irq = 0;
1524 end_access(info->tx_ring_ref, info->tx.sring);
1525 end_access(info->rx_ring_ref, info->rx.sring);
1526 info->tx_ring_ref = GRANT_INVALID_REF;
1527 info->rx_ring_ref = GRANT_INVALID_REF;
1528 info->tx.sring = NULL;
1529 info->rx.sring = NULL;
1533 static void netif_free(struct netfront_info *info)
1535 close_netdev(info);
1536 netif_disconnect_backend(info);
1537 free_netdev(info->netdev);
1541 static void end_access(int ref, void *page)
1543 if (ref != GRANT_INVALID_REF)
1544 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1548 /* ** Driver registration ** */
1551 static struct xenbus_device_id netfront_ids[] = {
1552 { "vif" },
1553 { "" }
1554 };
1557 static struct xenbus_driver netfront = {
1558 .name = "vif",
1559 .owner = THIS_MODULE,
1560 .ids = netfront_ids,
1561 .probe = netfront_probe,
1562 .remove = __devexit_p(netfront_remove),
1563 .resume = netfront_resume,
1564 .otherend_changed = backend_changed,
1565 };
1568 static struct notifier_block notifier_inetdev = {
1569 .notifier_call = inetdev_notify,
1570 .next = NULL,
1571 .priority = 0
1572 };
1574 static int __init netif_init(void)
1576 if (!is_running_on_xen())
1577 return -ENODEV;
1579 if (xen_start_info->flags & SIF_INITDOMAIN)
1580 return 0;
1582 IPRINTK("Initialising virtual ethernet driver.\n");
1584 (void)register_inetaddr_notifier(&notifier_inetdev);
1586 return xenbus_register_frontend(&netfront);
1588 module_init(netif_init);
1591 static void __exit netif_exit(void)
1593 unregister_inetaddr_notifier(&notifier_inetdev);
1595 return xenbus_unregister_driver(&netfront);
1597 module_exit(netif_exit);
1599 MODULE_LICENSE("Dual BSD/GPL");