ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c @ 14100:e47738923a05

[LINUX] Purge include <linux/config.h>. It has been obsolete for some time now.

Signed-off-by: Ian Campbell <ian.campbell@xensource.com>
author Ian Campbell <ian.campbell@xensource.com>
date Fri Feb 23 16:56:45 2007 +0000 (2007-02-23)
parents 3f63c1825b0d
children 42b29f084c31
line source
1 /******************************************************************************
2 * Virtual network driver for conversing with remote driver backends.
3 *
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
30 */
32 #include <linux/module.h>
33 #include <linux/version.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <linux/netdevice.h>
40 #include <linux/inetdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/skbuff.h>
43 #include <linux/init.h>
44 #include <linux/bitops.h>
45 #include <linux/ethtool.h>
46 #include <linux/in.h>
47 #include <linux/if_ether.h>
48 #include <linux/io.h>
49 #include <linux/moduleparam.h>
50 #include <net/sock.h>
51 #include <net/pkt_sched.h>
52 #include <net/arp.h>
53 #include <net/route.h>
54 #include <asm/uaccess.h>
55 #include <xen/evtchn.h>
56 #include <xen/xenbus.h>
57 #include <xen/interface/io/netif.h>
58 #include <xen/interface/memory.h>
59 #include <xen/balloon.h>
60 #include <asm/page.h>
61 #include <asm/maddr.h>
62 #include <asm/uaccess.h>
63 #include <xen/interface/grant_table.h>
64 #include <xen/gnttab.h>
66 #ifdef HAVE_XEN_PLATFORM_COMPAT_H
67 #include <xen/platform-compat.h>
68 #endif
70 /*
71 * Mutually-exclusive module options to select receive data path:
72 * rx_copy : Packets are copied by network backend into local memory
73 * rx_flip : Page containing packet data is transferred to our ownership
74 * For fully-virtualised guests there is no option - copying must be used.
75 * For paravirtualised guests, flipping is the default.
76 */
77 #ifdef CONFIG_XEN
78 static int MODPARM_rx_copy = 0;
79 module_param_named(rx_copy, MODPARM_rx_copy, bool, 0);
80 MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)");
81 static int MODPARM_rx_flip = 0;
82 module_param_named(rx_flip, MODPARM_rx_flip, bool, 0);
83 MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)");
84 #else
85 static const int MODPARM_rx_copy = 1;
86 static const int MODPARM_rx_flip = 0;
87 #endif
89 #define RX_COPY_THRESHOLD 256
91 /* If we don't have GSO, fake things up so that we never try to use it. */
92 #if defined(NETIF_F_GSO)
93 #define HAVE_GSO 1
94 #define HAVE_TSO 1 /* TSO is a subset of GSO */
95 static inline void dev_disable_gso_features(struct net_device *dev)
96 {
97 /* Turn off all GSO bits except ROBUST. */
98 dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1;
99 dev->features |= NETIF_F_GSO_ROBUST;
100 }
101 #elif defined(NETIF_F_TSO)
102 #define HAVE_TSO 1
104 /* Some older kernels cannot cope with incorrect checksums,
105 * particularly in netfilter. I'm not sure there is 100% correlation
106 * with the presence of NETIF_F_TSO but it appears to be a good first
107 * approximiation.
108 */
109 #define HAVE_NO_CSUM_OFFLOAD 1
111 #define gso_size tso_size
112 #define gso_segs tso_segs
113 static inline void dev_disable_gso_features(struct net_device *dev)
114 {
115 /* Turn off all TSO bits. */
116 dev->features &= ~NETIF_F_TSO;
117 }
118 static inline int skb_is_gso(const struct sk_buff *skb)
119 {
120 return skb_shinfo(skb)->tso_size;
121 }
122 static inline int skb_gso_ok(struct sk_buff *skb, int features)
123 {
124 return (features & NETIF_F_TSO);
125 }
127 static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
128 {
129 return skb_is_gso(skb) &&
130 (!skb_gso_ok(skb, dev->features) ||
131 unlikely(skb->ip_summed != CHECKSUM_HW));
132 }
133 #else
134 #define netif_needs_gso(dev, skb) 0
135 #define dev_disable_gso_features(dev) ((void)0)
136 #endif
138 #define GRANT_INVALID_REF 0
140 #define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE)
141 #define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE)
143 struct netfront_info {
144 struct list_head list;
145 struct net_device *netdev;
147 struct net_device_stats stats;
149 struct netif_tx_front_ring tx;
150 struct netif_rx_front_ring rx;
152 spinlock_t tx_lock;
153 spinlock_t rx_lock;
155 unsigned int irq;
156 unsigned int copying_receiver;
158 /* Receive-ring batched refills. */
159 #define RX_MIN_TARGET 8
160 #define RX_DFL_MIN_TARGET 64
161 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
162 unsigned rx_min_target, rx_max_target, rx_target;
163 struct sk_buff_head rx_batch;
165 struct timer_list rx_refill_timer;
167 /*
168 * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs
169 * is an index into a chain of free entries.
170 */
171 struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1];
172 struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
174 #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
175 grant_ref_t gref_tx_head;
176 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
177 grant_ref_t gref_rx_head;
178 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
180 struct xenbus_device *xbdev;
181 int tx_ring_ref;
182 int rx_ring_ref;
183 u8 mac[ETH_ALEN];
185 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
186 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
187 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
188 };
190 struct netfront_rx_info {
191 struct netif_rx_response rx;
192 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
193 };
195 /*
196 * Access macros for acquiring freeing slots in tx_skbs[].
197 */
199 static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id)
200 {
201 list[id] = list[0];
202 list[0] = (void *)(unsigned long)id;
203 }
205 static inline unsigned short get_id_from_freelist(struct sk_buff **list)
206 {
207 unsigned int id = (unsigned int)(unsigned long)list[0];
208 list[0] = list[id];
209 return id;
210 }
212 static inline int xennet_rxidx(RING_IDX idx)
213 {
214 return idx & (NET_RX_RING_SIZE - 1);
215 }
217 static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
218 RING_IDX ri)
219 {
220 int i = xennet_rxidx(ri);
221 struct sk_buff *skb = np->rx_skbs[i];
222 np->rx_skbs[i] = NULL;
223 return skb;
224 }
226 static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
227 RING_IDX ri)
228 {
229 int i = xennet_rxidx(ri);
230 grant_ref_t ref = np->grant_rx_ref[i];
231 np->grant_rx_ref[i] = GRANT_INVALID_REF;
232 return ref;
233 }
235 #define DPRINTK(fmt, args...) \
236 pr_debug("netfront (%s:%d) " fmt, \
237 __FUNCTION__, __LINE__, ##args)
238 #define IPRINTK(fmt, args...) \
239 printk(KERN_INFO "netfront: " fmt, ##args)
240 #define WPRINTK(fmt, args...) \
241 printk(KERN_WARNING "netfront: " fmt, ##args)
243 static int setup_device(struct xenbus_device *, struct netfront_info *);
244 static struct net_device *create_netdev(struct xenbus_device *);
246 static void end_access(int, void *);
247 static void netif_disconnect_backend(struct netfront_info *);
249 static int network_connect(struct net_device *);
250 static void network_tx_buf_gc(struct net_device *);
251 static void network_alloc_rx_buffers(struct net_device *);
252 static int send_fake_arp(struct net_device *);
254 static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs);
256 #ifdef CONFIG_SYSFS
257 static int xennet_sysfs_addif(struct net_device *netdev);
258 static void xennet_sysfs_delif(struct net_device *netdev);
259 #else /* !CONFIG_SYSFS */
260 #define xennet_sysfs_addif(dev) (0)
261 #define xennet_sysfs_delif(dev) do { } while(0)
262 #endif
264 static inline int xennet_can_sg(struct net_device *dev)
265 {
266 return dev->features & NETIF_F_SG;
267 }
269 /**
270 * Entry point to this code when a new device is created. Allocate the basic
271 * structures and the ring buffers for communication with the backend, and
272 * inform the backend of the appropriate details for those.
273 */
274 static int __devinit netfront_probe(struct xenbus_device *dev,
275 const struct xenbus_device_id *id)
276 {
277 int err;
278 struct net_device *netdev;
279 struct netfront_info *info;
281 netdev = create_netdev(dev);
282 if (IS_ERR(netdev)) {
283 err = PTR_ERR(netdev);
284 xenbus_dev_fatal(dev, err, "creating netdev");
285 return err;
286 }
288 info = netdev_priv(netdev);
289 dev->dev.driver_data = info;
291 err = register_netdev(info->netdev);
292 if (err) {
293 printk(KERN_WARNING "%s: register_netdev err=%d\n",
294 __FUNCTION__, err);
295 goto fail;
296 }
298 err = xennet_sysfs_addif(info->netdev);
299 if (err) {
300 unregister_netdev(info->netdev);
301 printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
302 __FUNCTION__, err);
303 goto fail;
304 }
306 return 0;
308 fail:
309 free_netdev(netdev);
310 dev->dev.driver_data = NULL;
311 return err;
312 }
314 static int __devexit netfront_remove(struct xenbus_device *dev)
315 {
316 struct netfront_info *info = dev->dev.driver_data;
318 DPRINTK("%s\n", dev->nodename);
320 netif_disconnect_backend(info);
322 del_timer_sync(&info->rx_refill_timer);
324 xennet_sysfs_delif(info->netdev);
326 unregister_netdev(info->netdev);
328 free_netdev(info->netdev);
330 return 0;
331 }
333 /**
334 * We are reconnecting to the backend, due to a suspend/resume, or a backend
335 * driver restart. We tear down our netif structure and recreate it, but
336 * leave the device-layer structures intact so that this is transparent to the
337 * rest of the kernel.
338 */
339 static int netfront_resume(struct xenbus_device *dev)
340 {
341 struct netfront_info *info = dev->dev.driver_data;
343 DPRINTK("%s\n", dev->nodename);
345 netif_disconnect_backend(info);
346 return 0;
347 }
349 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
350 {
351 char *s, *e, *macstr;
352 int i;
354 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
355 if (IS_ERR(macstr))
356 return PTR_ERR(macstr);
358 for (i = 0; i < ETH_ALEN; i++) {
359 mac[i] = simple_strtoul(s, &e, 16);
360 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
361 kfree(macstr);
362 return -ENOENT;
363 }
364 s = e+1;
365 }
367 kfree(macstr);
368 return 0;
369 }
371 /* Common code used when first setting up, and when resuming. */
372 static int talk_to_backend(struct xenbus_device *dev,
373 struct netfront_info *info)
374 {
375 const char *message;
376 struct xenbus_transaction xbt;
377 int err;
379 err = xen_net_read_mac(dev, info->mac);
380 if (err) {
381 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
382 goto out;
383 }
385 /* Create shared ring, alloc event channel. */
386 err = setup_device(dev, info);
387 if (err)
388 goto out;
390 again:
391 err = xenbus_transaction_start(&xbt);
392 if (err) {
393 xenbus_dev_fatal(dev, err, "starting transaction");
394 goto destroy_ring;
395 }
397 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u",
398 info->tx_ring_ref);
399 if (err) {
400 message = "writing tx ring-ref";
401 goto abort_transaction;
402 }
403 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u",
404 info->rx_ring_ref);
405 if (err) {
406 message = "writing rx ring-ref";
407 goto abort_transaction;
408 }
409 err = xenbus_printf(xbt, dev->nodename,
410 "event-channel", "%u",
411 irq_to_evtchn_port(info->irq));
412 if (err) {
413 message = "writing event-channel";
414 goto abort_transaction;
415 }
417 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
418 info->copying_receiver);
419 if (err) {
420 message = "writing request-rx-copy";
421 goto abort_transaction;
422 }
424 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
425 if (err) {
426 message = "writing feature-rx-notify";
427 goto abort_transaction;
428 }
430 #ifdef HAVE_NO_CSUM_OFFLOAD
431 err = xenbus_printf(xbt, dev->nodename, "feature-no-csum-offload", "%d", 1);
432 if (err) {
433 message = "writing feature-no-csum-offload";
434 goto abort_transaction;
435 }
436 #endif
438 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
439 if (err) {
440 message = "writing feature-sg";
441 goto abort_transaction;
442 }
444 #ifdef HAVE_TSO
445 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
446 if (err) {
447 message = "writing feature-gso-tcpv4";
448 goto abort_transaction;
449 }
450 #endif
452 err = xenbus_transaction_end(xbt, 0);
453 if (err) {
454 if (err == -EAGAIN)
455 goto again;
456 xenbus_dev_fatal(dev, err, "completing transaction");
457 goto destroy_ring;
458 }
460 return 0;
462 abort_transaction:
463 xenbus_transaction_end(xbt, 1);
464 xenbus_dev_fatal(dev, err, "%s", message);
465 destroy_ring:
466 netif_disconnect_backend(info);
467 out:
468 return err;
469 }
471 static int setup_device(struct xenbus_device *dev, struct netfront_info *info)
472 {
473 struct netif_tx_sring *txs;
474 struct netif_rx_sring *rxs;
475 int err;
476 struct net_device *netdev = info->netdev;
478 info->tx_ring_ref = GRANT_INVALID_REF;
479 info->rx_ring_ref = GRANT_INVALID_REF;
480 info->rx.sring = NULL;
481 info->tx.sring = NULL;
482 info->irq = 0;
484 txs = (struct netif_tx_sring *)get_zeroed_page(GFP_KERNEL);
485 if (!txs) {
486 err = -ENOMEM;
487 xenbus_dev_fatal(dev, err, "allocating tx ring page");
488 goto fail;
489 }
490 SHARED_RING_INIT(txs);
491 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
493 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
494 if (err < 0) {
495 free_page((unsigned long)txs);
496 goto fail;
497 }
498 info->tx_ring_ref = err;
500 rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_KERNEL);
501 if (!rxs) {
502 err = -ENOMEM;
503 xenbus_dev_fatal(dev, err, "allocating rx ring page");
504 goto fail;
505 }
506 SHARED_RING_INIT(rxs);
507 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
509 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
510 if (err < 0) {
511 free_page((unsigned long)rxs);
512 goto fail;
513 }
514 info->rx_ring_ref = err;
516 memcpy(netdev->dev_addr, info->mac, ETH_ALEN);
518 err = bind_listening_port_to_irqhandler(
519 dev->otherend_id, netif_int, SA_SAMPLE_RANDOM, netdev->name,
520 netdev);
521 if (err < 0)
522 goto fail;
523 info->irq = err;
525 return 0;
527 fail:
528 return err;
529 }
531 /**
532 * Callback received when the backend's state changes.
533 */
534 static void backend_changed(struct xenbus_device *dev,
535 enum xenbus_state backend_state)
536 {
537 struct netfront_info *np = dev->dev.driver_data;
538 struct net_device *netdev = np->netdev;
540 DPRINTK("%s\n", xenbus_strstate(backend_state));
542 switch (backend_state) {
543 case XenbusStateInitialising:
544 case XenbusStateInitialised:
545 case XenbusStateConnected:
546 case XenbusStateUnknown:
547 case XenbusStateClosed:
548 break;
550 case XenbusStateInitWait:
551 if (dev->state != XenbusStateInitialising)
552 break;
553 if (network_connect(netdev) != 0)
554 break;
555 xenbus_switch_state(dev, XenbusStateConnected);
556 (void)send_fake_arp(netdev);
557 break;
559 case XenbusStateClosing:
560 xenbus_frontend_closed(dev);
561 break;
562 }
563 }
565 /** Send a packet on a net device to encourage switches to learn the
566 * MAC. We send a fake ARP request.
567 *
568 * @param dev device
569 * @return 0 on success, error code otherwise
570 */
571 static int send_fake_arp(struct net_device *dev)
572 {
573 struct sk_buff *skb;
574 u32 src_ip, dst_ip;
576 dst_ip = INADDR_BROADCAST;
577 src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
579 /* No IP? Then nothing to do. */
580 if (src_ip == 0)
581 return 0;
583 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
584 dst_ip, dev, src_ip,
585 /*dst_hw*/ NULL, /*src_hw*/ NULL,
586 /*target_hw*/ dev->dev_addr);
587 if (skb == NULL)
588 return -ENOMEM;
590 return dev_queue_xmit(skb);
591 }
593 static int network_open(struct net_device *dev)
594 {
595 struct netfront_info *np = netdev_priv(dev);
597 memset(&np->stats, 0, sizeof(np->stats));
599 spin_lock(&np->rx_lock);
600 if (netif_carrier_ok(dev)) {
601 network_alloc_rx_buffers(dev);
602 np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
603 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
604 netif_rx_schedule(dev);
605 }
606 spin_unlock(&np->rx_lock);
608 netif_start_queue(dev);
610 return 0;
611 }
613 static inline int netfront_tx_slot_available(struct netfront_info *np)
614 {
615 return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
616 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
617 }
619 static inline void network_maybe_wake_tx(struct net_device *dev)
620 {
621 struct netfront_info *np = netdev_priv(dev);
623 if (unlikely(netif_queue_stopped(dev)) &&
624 netfront_tx_slot_available(np) &&
625 likely(netif_running(dev)))
626 netif_wake_queue(dev);
627 }
629 static void network_tx_buf_gc(struct net_device *dev)
630 {
631 RING_IDX cons, prod;
632 unsigned short id;
633 struct netfront_info *np = netdev_priv(dev);
634 struct sk_buff *skb;
636 BUG_ON(!netif_carrier_ok(dev));
638 do {
639 prod = np->tx.sring->rsp_prod;
640 rmb(); /* Ensure we see responses up to 'rp'. */
642 for (cons = np->tx.rsp_cons; cons != prod; cons++) {
643 struct netif_tx_response *txrsp;
645 txrsp = RING_GET_RESPONSE(&np->tx, cons);
646 if (txrsp->status == NETIF_RSP_NULL)
647 continue;
649 id = txrsp->id;
650 skb = np->tx_skbs[id];
651 if (unlikely(gnttab_query_foreign_access(
652 np->grant_tx_ref[id]) != 0)) {
653 printk(KERN_ALERT "network_tx_buf_gc: warning "
654 "-- grant still in use by backend "
655 "domain.\n");
656 BUG();
657 }
658 gnttab_end_foreign_access_ref(
659 np->grant_tx_ref[id], GNTMAP_readonly);
660 gnttab_release_grant_reference(
661 &np->gref_tx_head, np->grant_tx_ref[id]);
662 np->grant_tx_ref[id] = GRANT_INVALID_REF;
663 add_id_to_freelist(np->tx_skbs, id);
664 dev_kfree_skb_irq(skb);
665 }
667 np->tx.rsp_cons = prod;
669 /*
670 * Set a new event, then check for race with update of tx_cons.
671 * Note that it is essential to schedule a callback, no matter
672 * how few buffers are pending. Even if there is space in the
673 * transmit ring, higher layers may be blocked because too much
674 * data is outstanding: in such cases notification from Xen is
675 * likely to be the only kick that we'll get.
676 */
677 np->tx.sring->rsp_event =
678 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
679 mb();
680 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
682 network_maybe_wake_tx(dev);
683 }
685 static void rx_refill_timeout(unsigned long data)
686 {
687 struct net_device *dev = (struct net_device *)data;
688 netif_rx_schedule(dev);
689 }
691 static void network_alloc_rx_buffers(struct net_device *dev)
692 {
693 unsigned short id;
694 struct netfront_info *np = netdev_priv(dev);
695 struct sk_buff *skb;
696 struct page *page;
697 int i, batch_target, notify;
698 RING_IDX req_prod = np->rx.req_prod_pvt;
699 struct xen_memory_reservation reservation;
700 grant_ref_t ref;
701 unsigned long pfn;
702 void *vaddr;
703 int nr_flips;
704 netif_rx_request_t *req;
706 if (unlikely(!netif_carrier_ok(dev)))
707 return;
709 /*
710 * Allocate skbuffs greedily, even though we batch updates to the
711 * receive ring. This creates a less bursty demand on the memory
712 * allocator, so should reduce the chance of failed allocation requests
713 * both for ourself and for other kernel subsystems.
714 */
715 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
716 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
717 /*
718 * Allocate an skb and a page. Do not use __dev_alloc_skb as
719 * that will allocate page-sized buffers which is not
720 * necessary here.
721 * 16 bytes added as necessary headroom for netif_receive_skb.
722 */
723 skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN,
724 GFP_ATOMIC | __GFP_NOWARN);
725 if (unlikely(!skb))
726 goto no_skb;
728 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
729 if (!page) {
730 kfree_skb(skb);
731 no_skb:
732 /* Any skbuffs queued for refill? Force them out. */
733 if (i != 0)
734 goto refill;
735 /* Could not allocate any skbuffs. Try again later. */
736 mod_timer(&np->rx_refill_timer,
737 jiffies + (HZ/10));
738 break;
739 }
741 skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */
742 skb_shinfo(skb)->frags[0].page = page;
743 skb_shinfo(skb)->nr_frags = 1;
744 __skb_queue_tail(&np->rx_batch, skb);
745 }
747 /* Is the batch large enough to be worthwhile? */
748 if (i < (np->rx_target/2)) {
749 if (req_prod > np->rx.sring->req_prod)
750 goto push;
751 return;
752 }
754 /* Adjust our fill target if we risked running out of buffers. */
755 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
756 ((np->rx_target *= 2) > np->rx_max_target))
757 np->rx_target = np->rx_max_target;
759 refill:
760 for (nr_flips = i = 0; ; i++) {
761 if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
762 break;
764 skb->dev = dev;
766 id = xennet_rxidx(req_prod + i);
768 BUG_ON(np->rx_skbs[id]);
769 np->rx_skbs[id] = skb;
771 ref = gnttab_claim_grant_reference(&np->gref_rx_head);
772 BUG_ON((signed short)ref < 0);
773 np->grant_rx_ref[id] = ref;
775 pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
776 vaddr = page_address(skb_shinfo(skb)->frags[0].page);
778 req = RING_GET_REQUEST(&np->rx, req_prod + i);
779 if (!np->copying_receiver) {
780 gnttab_grant_foreign_transfer_ref(ref,
781 np->xbdev->otherend_id,
782 pfn);
783 np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn);
784 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
785 /* Remove this page before passing
786 * back to Xen. */
787 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
788 MULTI_update_va_mapping(np->rx_mcl+i,
789 (unsigned long)vaddr,
790 __pte(0), 0);
791 }
792 nr_flips++;
793 } else {
794 gnttab_grant_foreign_access_ref(ref,
795 np->xbdev->otherend_id,
796 pfn_to_mfn(pfn),
797 0);
798 }
800 req->id = id;
801 req->gref = ref;
802 }
804 if ( nr_flips != 0 ) {
805 /* Tell the ballon driver what is going on. */
806 balloon_update_driver_allowance(i);
808 set_xen_guest_handle(reservation.extent_start,
809 np->rx_pfn_array);
810 reservation.nr_extents = nr_flips;
811 reservation.extent_order = 0;
812 reservation.address_bits = 0;
813 reservation.domid = DOMID_SELF;
815 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
816 /* After all PTEs have been zapped, flush the TLB. */
817 np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
818 UVMF_TLB_FLUSH|UVMF_ALL;
820 /* Give away a batch of pages. */
821 np->rx_mcl[i].op = __HYPERVISOR_memory_op;
822 np->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
823 np->rx_mcl[i].args[1] = (unsigned long)&reservation;
825 /* Zap PTEs and give away pages in one big
826 * multicall. */
827 (void)HYPERVISOR_multicall(np->rx_mcl, i+1);
829 /* Check return status of HYPERVISOR_memory_op(). */
830 if (unlikely(np->rx_mcl[i].result != i))
831 panic("Unable to reduce memory reservation\n");
832 } else {
833 if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
834 &reservation) != i)
835 panic("Unable to reduce memory reservation\n");
836 }
837 } else {
838 wmb();
839 }
841 /* Above is a suitable barrier to ensure backend will see requests. */
842 np->rx.req_prod_pvt = req_prod + i;
843 push:
844 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
845 if (notify)
846 notify_remote_via_irq(np->irq);
847 }
849 static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
850 struct netif_tx_request *tx)
851 {
852 struct netfront_info *np = netdev_priv(dev);
853 char *data = skb->data;
854 unsigned long mfn;
855 RING_IDX prod = np->tx.req_prod_pvt;
856 int frags = skb_shinfo(skb)->nr_frags;
857 unsigned int offset = offset_in_page(data);
858 unsigned int len = skb_headlen(skb);
859 unsigned int id;
860 grant_ref_t ref;
861 int i;
863 while (len > PAGE_SIZE - offset) {
864 tx->size = PAGE_SIZE - offset;
865 tx->flags |= NETTXF_more_data;
866 len -= tx->size;
867 data += tx->size;
868 offset = 0;
870 id = get_id_from_freelist(np->tx_skbs);
871 np->tx_skbs[id] = skb_get(skb);
872 tx = RING_GET_REQUEST(&np->tx, prod++);
873 tx->id = id;
874 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
875 BUG_ON((signed short)ref < 0);
877 mfn = virt_to_mfn(data);
878 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
879 mfn, GNTMAP_readonly);
881 tx->gref = np->grant_tx_ref[id] = ref;
882 tx->offset = offset;
883 tx->size = len;
884 tx->flags = 0;
885 }
887 for (i = 0; i < frags; i++) {
888 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
890 tx->flags |= NETTXF_more_data;
892 id = get_id_from_freelist(np->tx_skbs);
893 np->tx_skbs[id] = skb_get(skb);
894 tx = RING_GET_REQUEST(&np->tx, prod++);
895 tx->id = id;
896 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
897 BUG_ON((signed short)ref < 0);
899 mfn = pfn_to_mfn(page_to_pfn(frag->page));
900 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
901 mfn, GNTMAP_readonly);
903 tx->gref = np->grant_tx_ref[id] = ref;
904 tx->offset = frag->page_offset;
905 tx->size = frag->size;
906 tx->flags = 0;
907 }
909 np->tx.req_prod_pvt = prod;
910 }
912 static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
913 {
914 unsigned short id;
915 struct netfront_info *np = netdev_priv(dev);
916 struct netif_tx_request *tx;
917 struct netif_extra_info *extra;
918 char *data = skb->data;
919 RING_IDX i;
920 grant_ref_t ref;
921 unsigned long mfn;
922 int notify;
923 int frags = skb_shinfo(skb)->nr_frags;
924 unsigned int offset = offset_in_page(data);
925 unsigned int len = skb_headlen(skb);
927 frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE;
928 if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
929 printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
930 frags);
931 dump_stack();
932 goto drop;
933 }
935 spin_lock_irq(&np->tx_lock);
937 if (unlikely(!netif_carrier_ok(dev) ||
938 (frags > 1 && !xennet_can_sg(dev)) ||
939 netif_needs_gso(dev, skb))) {
940 spin_unlock_irq(&np->tx_lock);
941 goto drop;
942 }
944 i = np->tx.req_prod_pvt;
946 id = get_id_from_freelist(np->tx_skbs);
947 np->tx_skbs[id] = skb;
949 tx = RING_GET_REQUEST(&np->tx, i);
951 tx->id = id;
952 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
953 BUG_ON((signed short)ref < 0);
954 mfn = virt_to_mfn(data);
955 gnttab_grant_foreign_access_ref(
956 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
957 tx->gref = np->grant_tx_ref[id] = ref;
958 tx->offset = offset;
959 tx->size = len;
961 tx->flags = 0;
962 extra = NULL;
964 if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
965 tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
966 #ifdef CONFIG_XEN
967 if (skb->proto_data_valid) /* remote but checksummed? */
968 tx->flags |= NETTXF_data_validated;
969 #endif
971 #ifdef HAVE_TSO
972 if (skb_shinfo(skb)->gso_size) {
973 struct netif_extra_info *gso = (struct netif_extra_info *)
974 RING_GET_REQUEST(&np->tx, ++i);
976 if (extra)
977 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
978 else
979 tx->flags |= NETTXF_extra_info;
981 gso->u.gso.size = skb_shinfo(skb)->gso_size;
982 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
983 gso->u.gso.pad = 0;
984 gso->u.gso.features = 0;
986 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
987 gso->flags = 0;
988 extra = gso;
989 }
990 #endif
992 np->tx.req_prod_pvt = i + 1;
994 xennet_make_frags(skb, dev, tx);
995 tx->size = skb->len;
997 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
998 if (notify)
999 notify_remote_via_irq(np->irq);
1001 network_tx_buf_gc(dev);
1003 if (!netfront_tx_slot_available(np))
1004 netif_stop_queue(dev);
1006 spin_unlock_irq(&np->tx_lock);
1008 np->stats.tx_bytes += skb->len;
1009 np->stats.tx_packets++;
1011 return 0;
1013 drop:
1014 np->stats.tx_dropped++;
1015 dev_kfree_skb(skb);
1016 return 0;
1019 static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
1021 struct net_device *dev = dev_id;
1022 struct netfront_info *np = netdev_priv(dev);
1023 unsigned long flags;
1025 spin_lock_irqsave(&np->tx_lock, flags);
1027 if (likely(netif_carrier_ok(dev))) {
1028 network_tx_buf_gc(dev);
1029 /* Under tx_lock: protects access to rx shared-ring indexes. */
1030 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
1031 netif_rx_schedule(dev);
1034 spin_unlock_irqrestore(&np->tx_lock, flags);
1036 return IRQ_HANDLED;
1039 static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
1040 grant_ref_t ref)
1042 int new = xennet_rxidx(np->rx.req_prod_pvt);
1044 BUG_ON(np->rx_skbs[new]);
1045 np->rx_skbs[new] = skb;
1046 np->grant_rx_ref[new] = ref;
1047 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
1048 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
1049 np->rx.req_prod_pvt++;
1052 int xennet_get_extras(struct netfront_info *np,
1053 struct netif_extra_info *extras, RING_IDX rp)
1056 struct netif_extra_info *extra;
1057 RING_IDX cons = np->rx.rsp_cons;
1058 int err = 0;
1060 do {
1061 struct sk_buff *skb;
1062 grant_ref_t ref;
1064 if (unlikely(cons + 1 == rp)) {
1065 if (net_ratelimit())
1066 WPRINTK("Missing extra info\n");
1067 err = -EBADR;
1068 break;
1071 extra = (struct netif_extra_info *)
1072 RING_GET_RESPONSE(&np->rx, ++cons);
1074 if (unlikely(!extra->type ||
1075 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1076 if (net_ratelimit())
1077 WPRINTK("Invalid extra type: %d\n",
1078 extra->type);
1079 err = -EINVAL;
1080 } else {
1081 memcpy(&extras[extra->type - 1], extra,
1082 sizeof(*extra));
1085 skb = xennet_get_rx_skb(np, cons);
1086 ref = xennet_get_rx_ref(np, cons);
1087 xennet_move_rx_slot(np, skb, ref);
1088 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1090 np->rx.rsp_cons = cons;
1091 return err;
1094 static int xennet_get_responses(struct netfront_info *np,
1095 struct netfront_rx_info *rinfo, RING_IDX rp,
1096 struct sk_buff_head *list,
1097 int *pages_flipped_p)
1099 int pages_flipped = *pages_flipped_p;
1100 struct mmu_update *mmu;
1101 struct multicall_entry *mcl;
1102 struct netif_rx_response *rx = &rinfo->rx;
1103 struct netif_extra_info *extras = rinfo->extras;
1104 RING_IDX cons = np->rx.rsp_cons;
1105 struct sk_buff *skb = xennet_get_rx_skb(np, cons);
1106 grant_ref_t ref = xennet_get_rx_ref(np, cons);
1107 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
1108 int frags = 1;
1109 int err = 0;
1110 unsigned long ret;
1112 if (rx->flags & NETRXF_extra_info) {
1113 err = xennet_get_extras(np, extras, rp);
1114 cons = np->rx.rsp_cons;
1117 for (;;) {
1118 unsigned long mfn;
1120 if (unlikely(rx->status < 0 ||
1121 rx->offset + rx->status > PAGE_SIZE)) {
1122 if (net_ratelimit())
1123 WPRINTK("rx->offset: %x, size: %u\n",
1124 rx->offset, rx->status);
1125 xennet_move_rx_slot(np, skb, ref);
1126 err = -EINVAL;
1127 goto next;
1130 /*
1131 * This definitely indicates a bug, either in this driver or in
1132 * the backend driver. In future this should flag the bad
1133 * situation to the system controller to reboot the backed.
1134 */
1135 if (ref == GRANT_INVALID_REF) {
1136 if (net_ratelimit())
1137 WPRINTK("Bad rx response id %d.\n", rx->id);
1138 err = -EINVAL;
1139 goto next;
1142 if (!np->copying_receiver) {
1143 /* Memory pressure, insufficient buffer
1144 * headroom, ... */
1145 if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) {
1146 if (net_ratelimit())
1147 WPRINTK("Unfulfilled rx req "
1148 "(id=%d, st=%d).\n",
1149 rx->id, rx->status);
1150 xennet_move_rx_slot(np, skb, ref);
1151 err = -ENOMEM;
1152 goto next;
1155 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1156 /* Remap the page. */
1157 struct page *page =
1158 skb_shinfo(skb)->frags[0].page;
1159 unsigned long pfn = page_to_pfn(page);
1160 void *vaddr = page_address(page);
1162 mcl = np->rx_mcl + pages_flipped;
1163 mmu = np->rx_mmu + pages_flipped;
1165 MULTI_update_va_mapping(mcl,
1166 (unsigned long)vaddr,
1167 pfn_pte_ma(mfn,
1168 PAGE_KERNEL),
1169 0);
1170 mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
1171 | MMU_MACHPHYS_UPDATE;
1172 mmu->val = pfn;
1174 set_phys_to_machine(pfn, mfn);
1176 pages_flipped++;
1177 } else {
1178 ret = gnttab_end_foreign_access_ref(ref, 0);
1179 BUG_ON(!ret);
1182 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1184 __skb_queue_tail(list, skb);
1186 next:
1187 if (!(rx->flags & NETRXF_more_data))
1188 break;
1190 if (cons + frags == rp) {
1191 if (net_ratelimit())
1192 WPRINTK("Need more frags\n");
1193 err = -ENOENT;
1194 break;
1197 rx = RING_GET_RESPONSE(&np->rx, cons + frags);
1198 skb = xennet_get_rx_skb(np, cons + frags);
1199 ref = xennet_get_rx_ref(np, cons + frags);
1200 frags++;
1203 if (unlikely(frags > max)) {
1204 if (net_ratelimit())
1205 WPRINTK("Too many frags\n");
1206 err = -E2BIG;
1209 if (unlikely(err))
1210 np->rx.rsp_cons = cons + frags;
1212 *pages_flipped_p = pages_flipped;
1214 return err;
1217 static RING_IDX xennet_fill_frags(struct netfront_info *np,
1218 struct sk_buff *skb,
1219 struct sk_buff_head *list)
1221 struct skb_shared_info *shinfo = skb_shinfo(skb);
1222 int nr_frags = shinfo->nr_frags;
1223 RING_IDX cons = np->rx.rsp_cons;
1224 skb_frag_t *frag = shinfo->frags + nr_frags;
1225 struct sk_buff *nskb;
1227 while ((nskb = __skb_dequeue(list))) {
1228 struct netif_rx_response *rx =
1229 RING_GET_RESPONSE(&np->rx, ++cons);
1231 frag->page = skb_shinfo(nskb)->frags[0].page;
1232 frag->page_offset = rx->offset;
1233 frag->size = rx->status;
1235 skb->data_len += rx->status;
1237 skb_shinfo(nskb)->nr_frags = 0;
1238 kfree_skb(nskb);
1240 frag++;
1241 nr_frags++;
1244 shinfo->nr_frags = nr_frags;
1245 return cons;
1248 static int xennet_set_skb_gso(struct sk_buff *skb,
1249 struct netif_extra_info *gso)
1251 if (!gso->u.gso.size) {
1252 if (net_ratelimit())
1253 WPRINTK("GSO size must not be zero.\n");
1254 return -EINVAL;
1257 /* Currently only TCPv4 S.O. is supported. */
1258 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1259 if (net_ratelimit())
1260 WPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
1261 return -EINVAL;
1264 #ifdef HAVE_TSO
1265 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1266 #ifdef HAVE_GSO
1267 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1269 /* Header must be checked, and gso_segs computed. */
1270 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1271 #endif
1272 skb_shinfo(skb)->gso_segs = 0;
1274 return 0;
1275 #else
1276 if (net_ratelimit())
1277 WPRINTK("GSO unsupported by this kernel.\n");
1278 return -EINVAL;
1279 #endif
1282 static int netif_poll(struct net_device *dev, int *pbudget)
1284 struct netfront_info *np = netdev_priv(dev);
1285 struct sk_buff *skb;
1286 struct netfront_rx_info rinfo;
1287 struct netif_rx_response *rx = &rinfo.rx;
1288 struct netif_extra_info *extras = rinfo.extras;
1289 RING_IDX i, rp;
1290 struct multicall_entry *mcl;
1291 int work_done, budget, more_to_do = 1;
1292 struct sk_buff_head rxq;
1293 struct sk_buff_head errq;
1294 struct sk_buff_head tmpq;
1295 unsigned long flags;
1296 unsigned int len;
1297 int pages_flipped = 0;
1298 int err;
1300 spin_lock(&np->rx_lock);
1302 if (unlikely(!netif_carrier_ok(dev))) {
1303 spin_unlock(&np->rx_lock);
1304 return 0;
1307 skb_queue_head_init(&rxq);
1308 skb_queue_head_init(&errq);
1309 skb_queue_head_init(&tmpq);
1311 if ((budget = *pbudget) > dev->quota)
1312 budget = dev->quota;
1313 rp = np->rx.sring->rsp_prod;
1314 rmb(); /* Ensure we see queued responses up to 'rp'. */
1316 i = np->rx.rsp_cons;
1317 work_done = 0;
1318 while ((i != rp) && (work_done < budget)) {
1319 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
1320 memset(extras, 0, sizeof(extras));
1322 err = xennet_get_responses(np, &rinfo, rp, &tmpq,
1323 &pages_flipped);
1325 if (unlikely(err)) {
1326 err:
1327 while ((skb = __skb_dequeue(&tmpq)))
1328 __skb_queue_tail(&errq, skb);
1329 np->stats.rx_errors++;
1330 i = np->rx.rsp_cons;
1331 continue;
1334 skb = __skb_dequeue(&tmpq);
1336 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1337 struct netif_extra_info *gso;
1338 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1340 if (unlikely(xennet_set_skb_gso(skb, gso))) {
1341 __skb_queue_head(&tmpq, skb);
1342 np->rx.rsp_cons += skb_queue_len(&tmpq);
1343 goto err;
1347 skb->nh.raw = (void *)skb_shinfo(skb)->frags[0].page;
1348 skb->h.raw = skb->nh.raw + rx->offset;
1350 len = rx->status;
1351 if (len > RX_COPY_THRESHOLD)
1352 len = RX_COPY_THRESHOLD;
1353 skb_put(skb, len);
1355 if (rx->status > len) {
1356 skb_shinfo(skb)->frags[0].page_offset =
1357 rx->offset + len;
1358 skb_shinfo(skb)->frags[0].size = rx->status - len;
1359 skb->data_len = rx->status - len;
1360 } else {
1361 skb_shinfo(skb)->frags[0].page = NULL;
1362 skb_shinfo(skb)->nr_frags = 0;
1365 i = xennet_fill_frags(np, skb, &tmpq);
1367 /*
1368 * Truesize must approximates the size of true data plus
1369 * any supervisor overheads. Adding hypervisor overheads
1370 * has been shown to significantly reduce achievable
1371 * bandwidth with the default receive buffer size. It is
1372 * therefore not wise to account for it here.
1374 * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to
1375 * RX_COPY_THRESHOLD + the supervisor overheads. Here, we
1376 * add the size of the data pulled in xennet_fill_frags().
1378 * We also adjust for any unused space in the main data
1379 * area by subtracting (RX_COPY_THRESHOLD - len). This is
1380 * especially important with drivers which split incoming
1381 * packets into header and data, using only 66 bytes of
1382 * the main data area (see the e1000 driver for example.)
1383 * On such systems, without this last adjustement, our
1384 * achievable receive throughout using the standard receive
1385 * buffer size was cut by 25%(!!!).
1386 */
1387 skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
1388 skb->len += skb->data_len;
1390 /*
1391 * Old backends do not assert data_validated but we
1392 * can infer it from csum_blank so test both flags.
1393 */
1394 if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank))
1395 skb->ip_summed = CHECKSUM_UNNECESSARY;
1396 else
1397 skb->ip_summed = CHECKSUM_NONE;
1398 #ifdef CONFIG_XEN
1399 skb->proto_data_valid = (skb->ip_summed != CHECKSUM_NONE);
1400 skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank);
1401 #endif
1402 np->stats.rx_packets++;
1403 np->stats.rx_bytes += skb->len;
1405 __skb_queue_tail(&rxq, skb);
1407 np->rx.rsp_cons = ++i;
1408 work_done++;
1411 if (pages_flipped) {
1412 /* Some pages are no longer absent... */
1413 balloon_update_driver_allowance(-pages_flipped);
1415 /* Do all the remapping work and M2P updates. */
1416 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1417 mcl = np->rx_mcl + pages_flipped;
1418 mcl->op = __HYPERVISOR_mmu_update;
1419 mcl->args[0] = (unsigned long)np->rx_mmu;
1420 mcl->args[1] = pages_flipped;
1421 mcl->args[2] = 0;
1422 mcl->args[3] = DOMID_SELF;
1423 (void)HYPERVISOR_multicall(np->rx_mcl,
1424 pages_flipped + 1);
1428 while ((skb = __skb_dequeue(&errq)))
1429 kfree_skb(skb);
1431 while ((skb = __skb_dequeue(&rxq)) != NULL) {
1432 struct page *page = (struct page *)skb->nh.raw;
1433 void *vaddr = page_address(page);
1435 memcpy(skb->data, vaddr + (skb->h.raw - skb->nh.raw),
1436 skb_headlen(skb));
1438 if (page != skb_shinfo(skb)->frags[0].page)
1439 __free_page(page);
1441 /* Ethernet work: Delayed to here as it peeks the header. */
1442 skb->protocol = eth_type_trans(skb, dev);
1444 /* Pass it up. */
1445 netif_receive_skb(skb);
1446 dev->last_rx = jiffies;
1449 /* If we get a callback with very few responses, reduce fill target. */
1450 /* NB. Note exponential increase, linear decrease. */
1451 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1452 ((3*np->rx_target) / 4)) &&
1453 (--np->rx_target < np->rx_min_target))
1454 np->rx_target = np->rx_min_target;
1456 network_alloc_rx_buffers(dev);
1458 *pbudget -= work_done;
1459 dev->quota -= work_done;
1461 if (work_done < budget) {
1462 local_irq_save(flags);
1464 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
1465 if (!more_to_do)
1466 __netif_rx_complete(dev);
1468 local_irq_restore(flags);
1471 spin_unlock(&np->rx_lock);
1473 return more_to_do;
1476 static void netif_release_tx_bufs(struct netfront_info *np)
1478 struct sk_buff *skb;
1479 int i;
1481 for (i = 1; i <= NET_TX_RING_SIZE; i++) {
1482 if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET)
1483 continue;
1485 skb = np->tx_skbs[i];
1486 gnttab_end_foreign_access_ref(
1487 np->grant_tx_ref[i], GNTMAP_readonly);
1488 gnttab_release_grant_reference(
1489 &np->gref_tx_head, np->grant_tx_ref[i]);
1490 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1491 add_id_to_freelist(np->tx_skbs, i);
1492 dev_kfree_skb_irq(skb);
1496 static void netif_release_rx_bufs(struct netfront_info *np)
1498 struct mmu_update *mmu = np->rx_mmu;
1499 struct multicall_entry *mcl = np->rx_mcl;
1500 struct sk_buff_head free_list;
1501 struct sk_buff *skb;
1502 unsigned long mfn;
1503 int xfer = 0, noxfer = 0, unused = 0;
1504 int id, ref;
1506 if (np->copying_receiver) {
1507 WPRINTK("%s: fix me for copying receiver.\n", __FUNCTION__);
1508 return;
1511 skb_queue_head_init(&free_list);
1513 spin_lock(&np->rx_lock);
1515 for (id = 0; id < NET_RX_RING_SIZE; id++) {
1516 if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) {
1517 unused++;
1518 continue;
1521 skb = np->rx_skbs[id];
1522 mfn = gnttab_end_foreign_transfer_ref(ref);
1523 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1524 np->grant_rx_ref[id] = GRANT_INVALID_REF;
1525 add_id_to_freelist(np->rx_skbs, id);
1527 if (0 == mfn) {
1528 struct page *page = skb_shinfo(skb)->frags[0].page;
1529 balloon_release_driver_page(page);
1530 skb_shinfo(skb)->nr_frags = 0;
1531 dev_kfree_skb(skb);
1532 noxfer++;
1533 continue;
1536 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1537 /* Remap the page. */
1538 struct page *page = skb_shinfo(skb)->frags[0].page;
1539 unsigned long pfn = page_to_pfn(page);
1540 void *vaddr = page_address(page);
1542 MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
1543 pfn_pte_ma(mfn, PAGE_KERNEL),
1544 0);
1545 mcl++;
1546 mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
1547 | MMU_MACHPHYS_UPDATE;
1548 mmu->val = pfn;
1549 mmu++;
1551 set_phys_to_machine(pfn, mfn);
1553 __skb_queue_tail(&free_list, skb);
1554 xfer++;
1557 IPRINTK("%s: %d xfer, %d noxfer, %d unused\n",
1558 __FUNCTION__, xfer, noxfer, unused);
1560 if (xfer) {
1561 /* Some pages are no longer absent... */
1562 balloon_update_driver_allowance(-xfer);
1564 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1565 /* Do all the remapping work and M2P updates. */
1566 mcl->op = __HYPERVISOR_mmu_update;
1567 mcl->args[0] = (unsigned long)np->rx_mmu;
1568 mcl->args[1] = mmu - np->rx_mmu;
1569 mcl->args[2] = 0;
1570 mcl->args[3] = DOMID_SELF;
1571 mcl++;
1572 HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
1576 while ((skb = __skb_dequeue(&free_list)) != NULL)
1577 dev_kfree_skb(skb);
1579 spin_unlock(&np->rx_lock);
1582 static int network_close(struct net_device *dev)
1584 struct netfront_info *np = netdev_priv(dev);
1585 netif_stop_queue(np->netdev);
1586 return 0;
1590 static struct net_device_stats *network_get_stats(struct net_device *dev)
1592 struct netfront_info *np = netdev_priv(dev);
1593 return &np->stats;
1596 static int xennet_change_mtu(struct net_device *dev, int mtu)
1598 int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
1600 if (mtu > max)
1601 return -EINVAL;
1602 dev->mtu = mtu;
1603 return 0;
1606 static int xennet_set_sg(struct net_device *dev, u32 data)
1608 if (data) {
1609 struct netfront_info *np = netdev_priv(dev);
1610 int val;
1612 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1613 "%d", &val) < 0)
1614 val = 0;
1615 if (!val)
1616 return -ENOSYS;
1617 } else if (dev->mtu > ETH_DATA_LEN)
1618 dev->mtu = ETH_DATA_LEN;
1620 return ethtool_op_set_sg(dev, data);
1623 static int xennet_set_tso(struct net_device *dev, u32 data)
1625 #ifdef HAVE_TSO
1626 if (data) {
1627 struct netfront_info *np = netdev_priv(dev);
1628 int val;
1630 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1631 "feature-gso-tcpv4", "%d", &val) < 0)
1632 val = 0;
1633 if (!val)
1634 return -ENOSYS;
1637 return ethtool_op_set_tso(dev, data);
1638 #else
1639 return -ENOSYS;
1640 #endif
1643 static void xennet_set_features(struct net_device *dev)
1645 dev_disable_gso_features(dev);
1646 xennet_set_sg(dev, 0);
1648 /* We need checksum offload to enable scatter/gather and TSO. */
1649 if (!(dev->features & NETIF_F_IP_CSUM))
1650 return;
1652 if (xennet_set_sg(dev, 1))
1653 return;
1655 /* Before 2.6.9 TSO seems to be unreliable so do not enable it
1656 * on older kernels.
1657 */
1658 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
1659 xennet_set_tso(dev, 1);
1660 #endif
1664 static int network_connect(struct net_device *dev)
1666 struct netfront_info *np = netdev_priv(dev);
1667 int i, requeue_idx, err;
1668 struct sk_buff *skb;
1669 grant_ref_t ref;
1670 netif_rx_request_t *req;
1671 unsigned int feature_rx_copy, feature_rx_flip;
1673 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1674 "feature-rx-copy", "%u", &feature_rx_copy);
1675 if (err != 1)
1676 feature_rx_copy = 0;
1677 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1678 "feature-rx-flip", "%u", &feature_rx_flip);
1679 if (err != 1)
1680 feature_rx_flip = 1;
1682 /*
1683 * Copy packets on receive path if:
1684 * (a) This was requested by user, and the backend supports it; or
1685 * (b) Flipping was requested, but this is unsupported by the backend.
1686 */
1687 np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) ||
1688 (MODPARM_rx_flip && !feature_rx_flip));
1690 err = talk_to_backend(np->xbdev, np);
1691 if (err)
1692 return err;
1694 xennet_set_features(dev);
1696 IPRINTK("device %s has %sing receive path.\n",
1697 dev->name, np->copying_receiver ? "copy" : "flipp");
1699 spin_lock_irq(&np->tx_lock);
1700 spin_lock(&np->rx_lock);
1702 /*
1703 * Recovery procedure:
1704 * NB. Freelist index entries are always going to be less than
1705 * PAGE_OFFSET, whereas pointers to skbs will always be equal or
1706 * greater than PAGE_OFFSET: we use this property to distinguish
1707 * them.
1708 */
1710 /* Step 1: Discard all pending TX packet fragments. */
1711 netif_release_tx_bufs(np);
1713 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1714 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1715 if (!np->rx_skbs[i])
1716 continue;
1718 skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
1719 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1720 req = RING_GET_REQUEST(&np->rx, requeue_idx);
1722 if (!np->copying_receiver) {
1723 gnttab_grant_foreign_transfer_ref(
1724 ref, np->xbdev->otherend_id,
1725 page_to_pfn(skb_shinfo(skb)->frags->page));
1726 } else {
1727 gnttab_grant_foreign_access_ref(
1728 ref, np->xbdev->otherend_id,
1729 pfn_to_mfn(page_to_pfn(skb_shinfo(skb)->
1730 frags->page)),
1731 0);
1733 req->gref = ref;
1734 req->id = requeue_idx;
1736 requeue_idx++;
1739 np->rx.req_prod_pvt = requeue_idx;
1741 /*
1742 * Step 3: All public and private state should now be sane. Get
1743 * ready to start sending and receiving packets and give the driver
1744 * domain a kick because we've probably just requeued some
1745 * packets.
1746 */
1747 netif_carrier_on(dev);
1748 notify_remote_via_irq(np->irq);
1749 network_tx_buf_gc(dev);
1750 network_alloc_rx_buffers(dev);
1752 spin_unlock(&np->rx_lock);
1753 spin_unlock_irq(&np->tx_lock);
1755 return 0;
1758 static void netif_uninit(struct net_device *dev)
1760 struct netfront_info *np = netdev_priv(dev);
1761 netif_release_tx_bufs(np);
1762 netif_release_rx_bufs(np);
1763 gnttab_free_grant_references(np->gref_tx_head);
1764 gnttab_free_grant_references(np->gref_rx_head);
1767 static struct ethtool_ops network_ethtool_ops =
1769 .get_tx_csum = ethtool_op_get_tx_csum,
1770 .set_tx_csum = ethtool_op_set_tx_csum,
1771 .get_sg = ethtool_op_get_sg,
1772 .set_sg = xennet_set_sg,
1773 .get_tso = ethtool_op_get_tso,
1774 .set_tso = xennet_set_tso,
1775 .get_link = ethtool_op_get_link,
1776 };
1778 #ifdef CONFIG_SYSFS
1779 static ssize_t show_rxbuf_min(struct class_device *cd, char *buf)
1781 struct net_device *netdev = container_of(cd, struct net_device,
1782 class_dev);
1783 struct netfront_info *info = netdev_priv(netdev);
1785 return sprintf(buf, "%u\n", info->rx_min_target);
1788 static ssize_t store_rxbuf_min(struct class_device *cd,
1789 const char *buf, size_t len)
1791 struct net_device *netdev = container_of(cd, struct net_device,
1792 class_dev);
1793 struct netfront_info *np = netdev_priv(netdev);
1794 char *endp;
1795 unsigned long target;
1797 if (!capable(CAP_NET_ADMIN))
1798 return -EPERM;
1800 target = simple_strtoul(buf, &endp, 0);
1801 if (endp == buf)
1802 return -EBADMSG;
1804 if (target < RX_MIN_TARGET)
1805 target = RX_MIN_TARGET;
1806 if (target > RX_MAX_TARGET)
1807 target = RX_MAX_TARGET;
1809 spin_lock(&np->rx_lock);
1810 if (target > np->rx_max_target)
1811 np->rx_max_target = target;
1812 np->rx_min_target = target;
1813 if (target > np->rx_target)
1814 np->rx_target = target;
1816 network_alloc_rx_buffers(netdev);
1818 spin_unlock(&np->rx_lock);
1819 return len;
1822 static ssize_t show_rxbuf_max(struct class_device *cd, char *buf)
1824 struct net_device *netdev = container_of(cd, struct net_device,
1825 class_dev);
1826 struct netfront_info *info = netdev_priv(netdev);
1828 return sprintf(buf, "%u\n", info->rx_max_target);
1831 static ssize_t store_rxbuf_max(struct class_device *cd,
1832 const char *buf, size_t len)
1834 struct net_device *netdev = container_of(cd, struct net_device,
1835 class_dev);
1836 struct netfront_info *np = netdev_priv(netdev);
1837 char *endp;
1838 unsigned long target;
1840 if (!capable(CAP_NET_ADMIN))
1841 return -EPERM;
1843 target = simple_strtoul(buf, &endp, 0);
1844 if (endp == buf)
1845 return -EBADMSG;
1847 if (target < RX_MIN_TARGET)
1848 target = RX_MIN_TARGET;
1849 if (target > RX_MAX_TARGET)
1850 target = RX_MAX_TARGET;
1852 spin_lock(&np->rx_lock);
1853 if (target < np->rx_min_target)
1854 np->rx_min_target = target;
1855 np->rx_max_target = target;
1856 if (target < np->rx_target)
1857 np->rx_target = target;
1859 network_alloc_rx_buffers(netdev);
1861 spin_unlock(&np->rx_lock);
1862 return len;
1865 static ssize_t show_rxbuf_cur(struct class_device *cd, char *buf)
1867 struct net_device *netdev = container_of(cd, struct net_device,
1868 class_dev);
1869 struct netfront_info *info = netdev_priv(netdev);
1871 return sprintf(buf, "%u\n", info->rx_target);
1874 static const struct class_device_attribute xennet_attrs[] = {
1875 __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
1876 __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
1877 __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
1878 };
1880 static int xennet_sysfs_addif(struct net_device *netdev)
1882 int i;
1883 int error = 0;
1885 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
1886 error = class_device_create_file(&netdev->class_dev,
1887 &xennet_attrs[i]);
1888 if (error)
1889 goto fail;
1891 return 0;
1893 fail:
1894 while (--i >= 0)
1895 class_device_remove_file(&netdev->class_dev,
1896 &xennet_attrs[i]);
1897 return error;
1900 static void xennet_sysfs_delif(struct net_device *netdev)
1902 int i;
1904 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
1905 class_device_remove_file(&netdev->class_dev,
1906 &xennet_attrs[i]);
1910 #endif /* CONFIG_SYSFS */
1913 /*
1914 * Nothing to do here. Virtual interface is point-to-point and the
1915 * physical interface is probably promiscuous anyway.
1916 */
1917 static void network_set_multicast_list(struct net_device *dev)
1921 static struct net_device * __devinit create_netdev(struct xenbus_device *dev)
1923 int i, err = 0;
1924 struct net_device *netdev = NULL;
1925 struct netfront_info *np = NULL;
1927 netdev = alloc_etherdev(sizeof(struct netfront_info));
1928 if (!netdev) {
1929 printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
1930 __FUNCTION__);
1931 return ERR_PTR(-ENOMEM);
1934 np = netdev_priv(netdev);
1935 np->xbdev = dev;
1937 spin_lock_init(&np->tx_lock);
1938 spin_lock_init(&np->rx_lock);
1940 skb_queue_head_init(&np->rx_batch);
1941 np->rx_target = RX_DFL_MIN_TARGET;
1942 np->rx_min_target = RX_DFL_MIN_TARGET;
1943 np->rx_max_target = RX_MAX_TARGET;
1945 init_timer(&np->rx_refill_timer);
1946 np->rx_refill_timer.data = (unsigned long)netdev;
1947 np->rx_refill_timer.function = rx_refill_timeout;
1949 /* Initialise {tx,rx}_skbs as a free chain containing every entry. */
1950 for (i = 0; i <= NET_TX_RING_SIZE; i++) {
1951 np->tx_skbs[i] = (void *)((unsigned long) i+1);
1952 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1955 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1956 np->rx_skbs[i] = NULL;
1957 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1960 /* A grant for every tx ring slot */
1961 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1962 &np->gref_tx_head) < 0) {
1963 printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
1964 err = -ENOMEM;
1965 goto exit;
1967 /* A grant for every rx ring slot */
1968 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1969 &np->gref_rx_head) < 0) {
1970 printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
1971 err = -ENOMEM;
1972 goto exit_free_tx;
1975 netdev->open = network_open;
1976 netdev->hard_start_xmit = network_start_xmit;
1977 netdev->stop = network_close;
1978 netdev->get_stats = network_get_stats;
1979 netdev->poll = netif_poll;
1980 netdev->set_multicast_list = network_set_multicast_list;
1981 netdev->uninit = netif_uninit;
1982 netdev->change_mtu = xennet_change_mtu;
1983 netdev->weight = 64;
1984 netdev->features = NETIF_F_IP_CSUM;
1986 SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
1987 SET_MODULE_OWNER(netdev);
1988 SET_NETDEV_DEV(netdev, &dev->dev);
1990 np->netdev = netdev;
1992 netif_carrier_off(netdev);
1994 return netdev;
1996 exit_free_tx:
1997 gnttab_free_grant_references(np->gref_tx_head);
1998 exit:
1999 free_netdev(netdev);
2000 return ERR_PTR(err);
2003 /*
2004 * We use this notifier to send out a fake ARP reply to reset switches and
2005 * router ARP caches when an IP interface is brought up on a VIF.
2006 */
2007 static int
2008 inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
2010 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
2011 struct net_device *dev = ifa->ifa_dev->dev;
2013 /* UP event and is it one of our devices? */
2014 if (event == NETDEV_UP && dev->open == network_open)
2015 (void)send_fake_arp(dev);
2017 return NOTIFY_DONE;
2021 static void netif_disconnect_backend(struct netfront_info *info)
2023 /* Stop old i/f to prevent errors whilst we rebuild the state. */
2024 spin_lock_irq(&info->tx_lock);
2025 spin_lock(&info->rx_lock);
2026 netif_carrier_off(info->netdev);
2027 spin_unlock(&info->rx_lock);
2028 spin_unlock_irq(&info->tx_lock);
2030 if (info->irq)
2031 unbind_from_irqhandler(info->irq, info->netdev);
2032 info->irq = 0;
2034 end_access(info->tx_ring_ref, info->tx.sring);
2035 end_access(info->rx_ring_ref, info->rx.sring);
2036 info->tx_ring_ref = GRANT_INVALID_REF;
2037 info->rx_ring_ref = GRANT_INVALID_REF;
2038 info->tx.sring = NULL;
2039 info->rx.sring = NULL;
2043 static void end_access(int ref, void *page)
2045 if (ref != GRANT_INVALID_REF)
2046 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
2050 /* ** Driver registration ** */
2053 static struct xenbus_device_id netfront_ids[] = {
2054 { "vif" },
2055 { "" }
2056 };
2059 static struct xenbus_driver netfront = {
2060 .name = "vif",
2061 .owner = THIS_MODULE,
2062 .ids = netfront_ids,
2063 .probe = netfront_probe,
2064 .remove = __devexit_p(netfront_remove),
2065 .resume = netfront_resume,
2066 .otherend_changed = backend_changed,
2067 };
2070 static struct notifier_block notifier_inetdev = {
2071 .notifier_call = inetdev_notify,
2072 .next = NULL,
2073 .priority = 0
2074 };
2076 static int __init netif_init(void)
2078 if (!is_running_on_xen())
2079 return -ENODEV;
2081 #ifdef CONFIG_XEN
2082 if (MODPARM_rx_flip && MODPARM_rx_copy) {
2083 WPRINTK("Cannot specify both rx_copy and rx_flip.\n");
2084 return -EINVAL;
2087 if (!MODPARM_rx_flip && !MODPARM_rx_copy)
2088 MODPARM_rx_flip = 1; /* Default is to flip. */
2089 #endif
2091 if (is_initial_xendomain())
2092 return 0;
2094 IPRINTK("Initialising virtual ethernet driver.\n");
2096 (void)register_inetaddr_notifier(&notifier_inetdev);
2098 return xenbus_register_frontend(&netfront);
2100 module_init(netif_init);
2103 static void __exit netif_exit(void)
2105 if (is_initial_xendomain())
2106 return;
2108 unregister_inetaddr_notifier(&notifier_inetdev);
2110 return xenbus_unregister_driver(&netfront);
2112 module_exit(netif_exit);
2114 MODULE_LICENSE("Dual BSD/GPL");