ia64/xen-unstable

view linux-2.6-xen-sparse/net/core/dev.c @ 10714:a4041ac6f152

[NET] net-gso.patch: Fix up GSO packets with broken checksums

Here is the original changelog:

[NET] gso: Fix up GSO packets with broken checksums

Certain subsystems in the stack (e.g., netfilter) can break the
partial
checksum on GSO packets. Until they're fixed, this patch allows
this to
work by recomputing the partial checksums through the GSO
mechanism.

Once they've all been converted to update the partial checksum
instead of
clearing it, this workaround can be removed.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
author kfraser@localhost.localdomain
date Mon Jul 10 15:36:04 2006 +0100 (2006-07-10)
parents 6e7027a2abca
children 9519445d9e9d
line source
1 /*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/config.h>
80 #include <linux/cpu.h>
81 #include <linux/types.h>
82 #include <linux/kernel.h>
83 #include <linux/sched.h>
84 #include <linux/string.h>
85 #include <linux/mm.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/notifier.h>
94 #include <linux/skbuff.h>
95 #include <net/sock.h>
96 #include <linux/rtnetlink.h>
97 #include <linux/proc_fs.h>
98 #include <linux/seq_file.h>
99 #include <linux/stat.h>
100 #include <linux/if_bridge.h>
101 #include <linux/divert.h>
102 #include <net/dst.h>
103 #include <net/pkt_sched.h>
104 #include <net/checksum.h>
105 #include <linux/highmem.h>
106 #include <linux/init.h>
107 #include <linux/kmod.h>
108 #include <linux/module.h>
109 #include <linux/kallsyms.h>
110 #include <linux/netpoll.h>
111 #include <linux/rcupdate.h>
112 #include <linux/delay.h>
113 #ifdef CONFIG_NET_RADIO
114 #include <linux/wireless.h> /* Note : will define WIRELESS_EXT */
115 #include <net/iw_handler.h>
116 #endif /* CONFIG_NET_RADIO */
117 #include <asm/current.h>
118 #include <linux/err.h>
120 #ifdef CONFIG_XEN
121 #include <net/ip.h>
122 #include <linux/tcp.h>
123 #include <linux/udp.h>
124 #endif
126 /*
127 * The list of packet types we will receive (as opposed to discard)
128 * and the routines to invoke.
129 *
130 * Why 16. Because with 16 the only overlap we get on a hash of the
131 * low nibble of the protocol value is RARP/SNAP/X.25.
132 *
133 * NOTE: That is no longer true with the addition of VLAN tags. Not
134 * sure which should go first, but I bet it won't make much
135 * difference if we are running VLANs. The good news is that
136 * this protocol won't be in the list unless compiled in, so
137 * the average user (w/out VLANs) will not be adversly affected.
138 * --BLG
139 *
140 * 0800 IP
141 * 8100 802.1Q VLAN
142 * 0001 802.3
143 * 0002 AX.25
144 * 0004 802.2
145 * 8035 RARP
146 * 0005 SNAP
147 * 0805 X.25
148 * 0806 ARP
149 * 8137 IPX
150 * 0009 Localtalk
151 * 86DD IPv6
152 */
154 static DEFINE_SPINLOCK(ptype_lock);
155 static struct list_head ptype_base[16]; /* 16 way hashed list */
156 static struct list_head ptype_all; /* Taps */
158 /*
159 * The @dev_base list is protected by @dev_base_lock and the rtln
160 * semaphore.
161 *
162 * Pure readers hold dev_base_lock for reading.
163 *
164 * Writers must hold the rtnl semaphore while they loop through the
165 * dev_base list, and hold dev_base_lock for writing when they do the
166 * actual updates. This allows pure readers to access the list even
167 * while a writer is preparing to update it.
168 *
169 * To put it another way, dev_base_lock is held for writing only to
170 * protect against pure readers; the rtnl semaphore provides the
171 * protection against other writers.
172 *
173 * See, for example usages, register_netdevice() and
174 * unregister_netdevice(), which must be called with the rtnl
175 * semaphore held.
176 */
177 struct net_device *dev_base;
178 static struct net_device **dev_tail = &dev_base;
179 DEFINE_RWLOCK(dev_base_lock);
181 EXPORT_SYMBOL(dev_base);
182 EXPORT_SYMBOL(dev_base_lock);
184 #define NETDEV_HASHBITS 8
185 static struct hlist_head dev_name_head[1<<NETDEV_HASHBITS];
186 static struct hlist_head dev_index_head[1<<NETDEV_HASHBITS];
188 static inline struct hlist_head *dev_name_hash(const char *name)
189 {
190 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
191 return &dev_name_head[hash & ((1<<NETDEV_HASHBITS)-1)];
192 }
194 static inline struct hlist_head *dev_index_hash(int ifindex)
195 {
196 return &dev_index_head[ifindex & ((1<<NETDEV_HASHBITS)-1)];
197 }
199 /*
200 * Our notifier list
201 */
203 static struct notifier_block *netdev_chain;
205 /*
206 * Device drivers call our routines to queue packets here. We empty the
207 * queue in the local softnet handler.
208 */
209 DEFINE_PER_CPU(struct softnet_data, softnet_data) = { NULL };
211 #ifdef CONFIG_SYSFS
212 extern int netdev_sysfs_init(void);
213 extern int netdev_register_sysfs(struct net_device *);
214 extern void netdev_unregister_sysfs(struct net_device *);
215 #else
216 #define netdev_sysfs_init() (0)
217 #define netdev_register_sysfs(dev) (0)
218 #define netdev_unregister_sysfs(dev) do { } while(0)
219 #endif
222 /*******************************************************************************
224 Protocol management and registration routines
226 *******************************************************************************/
228 /*
229 * For efficiency
230 */
232 int netdev_nit;
234 /*
235 * Add a protocol ID to the list. Now that the input handler is
236 * smarter we can dispense with all the messy stuff that used to be
237 * here.
238 *
239 * BEWARE!!! Protocol handlers, mangling input packets,
240 * MUST BE last in hash buckets and checking protocol handlers
241 * MUST start from promiscuous ptype_all chain in net_bh.
242 * It is true now, do not change it.
243 * Explanation follows: if protocol handler, mangling packet, will
244 * be the first on list, it is not able to sense, that packet
245 * is cloned and should be copied-on-write, so that it will
246 * change it and subsequent readers will get broken packet.
247 * --ANK (980803)
248 */
250 /**
251 * dev_add_pack - add packet handler
252 * @pt: packet type declaration
253 *
254 * Add a protocol handler to the networking stack. The passed &packet_type
255 * is linked into kernel lists and may not be freed until it has been
256 * removed from the kernel lists.
257 *
258 * This call does not sleep therefore it can not
259 * guarantee all CPU's that are in middle of receiving packets
260 * will see the new packet type (until the next received packet).
261 */
263 void dev_add_pack(struct packet_type *pt)
264 {
265 int hash;
267 spin_lock_bh(&ptype_lock);
268 if (pt->type == htons(ETH_P_ALL)) {
269 netdev_nit++;
270 list_add_rcu(&pt->list, &ptype_all);
271 } else {
272 hash = ntohs(pt->type) & 15;
273 list_add_rcu(&pt->list, &ptype_base[hash]);
274 }
275 spin_unlock_bh(&ptype_lock);
276 }
278 /**
279 * __dev_remove_pack - remove packet handler
280 * @pt: packet type declaration
281 *
282 * Remove a protocol handler that was previously added to the kernel
283 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
284 * from the kernel lists and can be freed or reused once this function
285 * returns.
286 *
287 * The packet type might still be in use by receivers
288 * and must not be freed until after all the CPU's have gone
289 * through a quiescent state.
290 */
291 void __dev_remove_pack(struct packet_type *pt)
292 {
293 struct list_head *head;
294 struct packet_type *pt1;
296 spin_lock_bh(&ptype_lock);
298 if (pt->type == htons(ETH_P_ALL)) {
299 netdev_nit--;
300 head = &ptype_all;
301 } else
302 head = &ptype_base[ntohs(pt->type) & 15];
304 list_for_each_entry(pt1, head, list) {
305 if (pt == pt1) {
306 list_del_rcu(&pt->list);
307 goto out;
308 }
309 }
311 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
312 out:
313 spin_unlock_bh(&ptype_lock);
314 }
315 /**
316 * dev_remove_pack - remove packet handler
317 * @pt: packet type declaration
318 *
319 * Remove a protocol handler that was previously added to the kernel
320 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
321 * from the kernel lists and can be freed or reused once this function
322 * returns.
323 *
324 * This call sleeps to guarantee that no CPU is looking at the packet
325 * type after return.
326 */
327 void dev_remove_pack(struct packet_type *pt)
328 {
329 __dev_remove_pack(pt);
331 synchronize_net();
332 }
334 /******************************************************************************
336 Device Boot-time Settings Routines
338 *******************************************************************************/
340 /* Boot time configuration table */
341 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
343 /**
344 * netdev_boot_setup_add - add new setup entry
345 * @name: name of the device
346 * @map: configured settings for the device
347 *
348 * Adds new setup entry to the dev_boot_setup list. The function
349 * returns 0 on error and 1 on success. This is a generic routine to
350 * all netdevices.
351 */
352 static int netdev_boot_setup_add(char *name, struct ifmap *map)
353 {
354 struct netdev_boot_setup *s;
355 int i;
357 s = dev_boot_setup;
358 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
359 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
360 memset(s[i].name, 0, sizeof(s[i].name));
361 strcpy(s[i].name, name);
362 memcpy(&s[i].map, map, sizeof(s[i].map));
363 break;
364 }
365 }
367 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
368 }
370 /**
371 * netdev_boot_setup_check - check boot time settings
372 * @dev: the netdevice
373 *
374 * Check boot time settings for the device.
375 * The found settings are set for the device to be used
376 * later in the device probing.
377 * Returns 0 if no settings found, 1 if they are.
378 */
379 int netdev_boot_setup_check(struct net_device *dev)
380 {
381 struct netdev_boot_setup *s = dev_boot_setup;
382 int i;
384 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
385 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
386 !strncmp(dev->name, s[i].name, strlen(s[i].name))) {
387 dev->irq = s[i].map.irq;
388 dev->base_addr = s[i].map.base_addr;
389 dev->mem_start = s[i].map.mem_start;
390 dev->mem_end = s[i].map.mem_end;
391 return 1;
392 }
393 }
394 return 0;
395 }
398 /**
399 * netdev_boot_base - get address from boot time settings
400 * @prefix: prefix for network device
401 * @unit: id for network device
402 *
403 * Check boot time settings for the base address of device.
404 * The found settings are set for the device to be used
405 * later in the device probing.
406 * Returns 0 if no settings found.
407 */
408 unsigned long netdev_boot_base(const char *prefix, int unit)
409 {
410 const struct netdev_boot_setup *s = dev_boot_setup;
411 char name[IFNAMSIZ];
412 int i;
414 sprintf(name, "%s%d", prefix, unit);
416 /*
417 * If device already registered then return base of 1
418 * to indicate not to probe for this interface
419 */
420 if (__dev_get_by_name(name))
421 return 1;
423 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
424 if (!strcmp(name, s[i].name))
425 return s[i].map.base_addr;
426 return 0;
427 }
429 /*
430 * Saves at boot time configured settings for any netdevice.
431 */
432 int __init netdev_boot_setup(char *str)
433 {
434 int ints[5];
435 struct ifmap map;
437 str = get_options(str, ARRAY_SIZE(ints), ints);
438 if (!str || !*str)
439 return 0;
441 /* Save settings */
442 memset(&map, 0, sizeof(map));
443 if (ints[0] > 0)
444 map.irq = ints[1];
445 if (ints[0] > 1)
446 map.base_addr = ints[2];
447 if (ints[0] > 2)
448 map.mem_start = ints[3];
449 if (ints[0] > 3)
450 map.mem_end = ints[4];
452 /* Add new entry to the list */
453 return netdev_boot_setup_add(str, &map);
454 }
456 __setup("netdev=", netdev_boot_setup);
458 /*******************************************************************************
460 Device Interface Subroutines
462 *******************************************************************************/
464 /**
465 * __dev_get_by_name - find a device by its name
466 * @name: name to find
467 *
468 * Find an interface by name. Must be called under RTNL semaphore
469 * or @dev_base_lock. If the name is found a pointer to the device
470 * is returned. If the name is not found then %NULL is returned. The
471 * reference counters are not incremented so the caller must be
472 * careful with locks.
473 */
475 struct net_device *__dev_get_by_name(const char *name)
476 {
477 struct hlist_node *p;
479 hlist_for_each(p, dev_name_hash(name)) {
480 struct net_device *dev
481 = hlist_entry(p, struct net_device, name_hlist);
482 if (!strncmp(dev->name, name, IFNAMSIZ))
483 return dev;
484 }
485 return NULL;
486 }
488 /**
489 * dev_get_by_name - find a device by its name
490 * @name: name to find
491 *
492 * Find an interface by name. This can be called from any
493 * context and does its own locking. The returned handle has
494 * the usage count incremented and the caller must use dev_put() to
495 * release it when it is no longer needed. %NULL is returned if no
496 * matching device is found.
497 */
499 struct net_device *dev_get_by_name(const char *name)
500 {
501 struct net_device *dev;
503 read_lock(&dev_base_lock);
504 dev = __dev_get_by_name(name);
505 if (dev)
506 dev_hold(dev);
507 read_unlock(&dev_base_lock);
508 return dev;
509 }
511 /**
512 * __dev_get_by_index - find a device by its ifindex
513 * @ifindex: index of device
514 *
515 * Search for an interface by index. Returns %NULL if the device
516 * is not found or a pointer to the device. The device has not
517 * had its reference counter increased so the caller must be careful
518 * about locking. The caller must hold either the RTNL semaphore
519 * or @dev_base_lock.
520 */
522 struct net_device *__dev_get_by_index(int ifindex)
523 {
524 struct hlist_node *p;
526 hlist_for_each(p, dev_index_hash(ifindex)) {
527 struct net_device *dev
528 = hlist_entry(p, struct net_device, index_hlist);
529 if (dev->ifindex == ifindex)
530 return dev;
531 }
532 return NULL;
533 }
536 /**
537 * dev_get_by_index - find a device by its ifindex
538 * @ifindex: index of device
539 *
540 * Search for an interface by index. Returns NULL if the device
541 * is not found or a pointer to the device. The device returned has
542 * had a reference added and the pointer is safe until the user calls
543 * dev_put to indicate they have finished with it.
544 */
546 struct net_device *dev_get_by_index(int ifindex)
547 {
548 struct net_device *dev;
550 read_lock(&dev_base_lock);
551 dev = __dev_get_by_index(ifindex);
552 if (dev)
553 dev_hold(dev);
554 read_unlock(&dev_base_lock);
555 return dev;
556 }
558 /**
559 * dev_getbyhwaddr - find a device by its hardware address
560 * @type: media type of device
561 * @ha: hardware address
562 *
563 * Search for an interface by MAC address. Returns NULL if the device
564 * is not found or a pointer to the device. The caller must hold the
565 * rtnl semaphore. The returned device has not had its ref count increased
566 * and the caller must therefore be careful about locking
567 *
568 * BUGS:
569 * If the API was consistent this would be __dev_get_by_hwaddr
570 */
572 struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
573 {
574 struct net_device *dev;
576 ASSERT_RTNL();
578 for (dev = dev_base; dev; dev = dev->next)
579 if (dev->type == type &&
580 !memcmp(dev->dev_addr, ha, dev->addr_len))
581 break;
582 return dev;
583 }
585 EXPORT_SYMBOL(dev_getbyhwaddr);
587 struct net_device *dev_getfirstbyhwtype(unsigned short type)
588 {
589 struct net_device *dev;
591 rtnl_lock();
592 for (dev = dev_base; dev; dev = dev->next) {
593 if (dev->type == type) {
594 dev_hold(dev);
595 break;
596 }
597 }
598 rtnl_unlock();
599 return dev;
600 }
602 EXPORT_SYMBOL(dev_getfirstbyhwtype);
604 /**
605 * dev_get_by_flags - find any device with given flags
606 * @if_flags: IFF_* values
607 * @mask: bitmask of bits in if_flags to check
608 *
609 * Search for any interface with the given flags. Returns NULL if a device
610 * is not found or a pointer to the device. The device returned has
611 * had a reference added and the pointer is safe until the user calls
612 * dev_put to indicate they have finished with it.
613 */
615 struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mask)
616 {
617 struct net_device *dev;
619 read_lock(&dev_base_lock);
620 for (dev = dev_base; dev != NULL; dev = dev->next) {
621 if (((dev->flags ^ if_flags) & mask) == 0) {
622 dev_hold(dev);
623 break;
624 }
625 }
626 read_unlock(&dev_base_lock);
627 return dev;
628 }
630 /**
631 * dev_valid_name - check if name is okay for network device
632 * @name: name string
633 *
634 * Network device names need to be valid file names to
635 * to allow sysfs to work
636 */
637 int dev_valid_name(const char *name)
638 {
639 return !(*name == '\0'
640 || !strcmp(name, ".")
641 || !strcmp(name, "..")
642 || strchr(name, '/'));
643 }
645 /**
646 * dev_alloc_name - allocate a name for a device
647 * @dev: device
648 * @name: name format string
649 *
650 * Passed a format string - eg "lt%d" it will try and find a suitable
651 * id. Not efficient for many devices, not called a lot. The caller
652 * must hold the dev_base or rtnl lock while allocating the name and
653 * adding the device in order to avoid duplicates. Returns the number
654 * of the unit assigned or a negative errno code.
655 */
657 int dev_alloc_name(struct net_device *dev, const char *name)
658 {
659 int i = 0;
660 char buf[IFNAMSIZ];
661 const char *p;
662 const int max_netdevices = 8*PAGE_SIZE;
663 long *inuse;
664 struct net_device *d;
666 p = strnchr(name, IFNAMSIZ-1, '%');
667 if (p) {
668 /*
669 * Verify the string as this thing may have come from
670 * the user. There must be either one "%d" and no other "%"
671 * characters.
672 */
673 if (p[1] != 'd' || strchr(p + 2, '%'))
674 return -EINVAL;
676 /* Use one page as a bit array of possible slots */
677 inuse = (long *) get_zeroed_page(GFP_ATOMIC);
678 if (!inuse)
679 return -ENOMEM;
681 for (d = dev_base; d; d = d->next) {
682 if (!sscanf(d->name, name, &i))
683 continue;
684 if (i < 0 || i >= max_netdevices)
685 continue;
687 /* avoid cases where sscanf is not exact inverse of printf */
688 snprintf(buf, sizeof(buf), name, i);
689 if (!strncmp(buf, d->name, IFNAMSIZ))
690 set_bit(i, inuse);
691 }
693 i = find_first_zero_bit(inuse, max_netdevices);
694 free_page((unsigned long) inuse);
695 }
697 snprintf(buf, sizeof(buf), name, i);
698 if (!__dev_get_by_name(buf)) {
699 strlcpy(dev->name, buf, IFNAMSIZ);
700 return i;
701 }
703 /* It is possible to run out of possible slots
704 * when the name is long and there isn't enough space left
705 * for the digits, or if all bits are used.
706 */
707 return -ENFILE;
708 }
711 /**
712 * dev_change_name - change name of a device
713 * @dev: device
714 * @newname: name (or format string) must be at least IFNAMSIZ
715 *
716 * Change name of a device, can pass format strings "eth%d".
717 * for wildcarding.
718 */
719 int dev_change_name(struct net_device *dev, char *newname)
720 {
721 int err = 0;
723 ASSERT_RTNL();
725 if (dev->flags & IFF_UP)
726 return -EBUSY;
728 if (!dev_valid_name(newname))
729 return -EINVAL;
731 if (strchr(newname, '%')) {
732 err = dev_alloc_name(dev, newname);
733 if (err < 0)
734 return err;
735 strcpy(newname, dev->name);
736 }
737 else if (__dev_get_by_name(newname))
738 return -EEXIST;
739 else
740 strlcpy(dev->name, newname, IFNAMSIZ);
742 err = class_device_rename(&dev->class_dev, dev->name);
743 if (!err) {
744 hlist_del(&dev->name_hlist);
745 hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
746 notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
747 }
749 return err;
750 }
752 /**
753 * netdev_features_change - device changes fatures
754 * @dev: device to cause notification
755 *
756 * Called to indicate a device has changed features.
757 */
758 void netdev_features_change(struct net_device *dev)
759 {
760 notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
761 }
762 EXPORT_SYMBOL(netdev_features_change);
764 /**
765 * netdev_state_change - device changes state
766 * @dev: device to cause notification
767 *
768 * Called to indicate a device has changed state. This function calls
769 * the notifier chains for netdev_chain and sends a NEWLINK message
770 * to the routing socket.
771 */
772 void netdev_state_change(struct net_device *dev)
773 {
774 if (dev->flags & IFF_UP) {
775 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
776 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
777 }
778 }
780 /**
781 * dev_load - load a network module
782 * @name: name of interface
783 *
784 * If a network interface is not present and the process has suitable
785 * privileges this function loads the module. If module loading is not
786 * available in this kernel then it becomes a nop.
787 */
789 void dev_load(const char *name)
790 {
791 struct net_device *dev;
793 read_lock(&dev_base_lock);
794 dev = __dev_get_by_name(name);
795 read_unlock(&dev_base_lock);
797 if (!dev && capable(CAP_SYS_MODULE))
798 request_module("%s", name);
799 }
801 static int default_rebuild_header(struct sk_buff *skb)
802 {
803 printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n",
804 skb->dev ? skb->dev->name : "NULL!!!");
805 kfree_skb(skb);
806 return 1;
807 }
810 /**
811 * dev_open - prepare an interface for use.
812 * @dev: device to open
813 *
814 * Takes a device from down to up state. The device's private open
815 * function is invoked and then the multicast lists are loaded. Finally
816 * the device is moved into the up state and a %NETDEV_UP message is
817 * sent to the netdev notifier chain.
818 *
819 * Calling this function on an active interface is a nop. On a failure
820 * a negative errno code is returned.
821 */
822 int dev_open(struct net_device *dev)
823 {
824 int ret = 0;
826 /*
827 * Is it already up?
828 */
830 if (dev->flags & IFF_UP)
831 return 0;
833 /*
834 * Is it even present?
835 */
836 if (!netif_device_present(dev))
837 return -ENODEV;
839 /*
840 * Call device private open method
841 */
842 set_bit(__LINK_STATE_START, &dev->state);
843 if (dev->open) {
844 ret = dev->open(dev);
845 if (ret)
846 clear_bit(__LINK_STATE_START, &dev->state);
847 }
849 /*
850 * If it went open OK then:
851 */
853 if (!ret) {
854 /*
855 * Set the flags.
856 */
857 dev->flags |= IFF_UP;
859 /*
860 * Initialize multicasting status
861 */
862 dev_mc_upload(dev);
864 /*
865 * Wakeup transmit queue engine
866 */
867 dev_activate(dev);
869 /*
870 * ... and announce new interface.
871 */
872 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
873 }
874 return ret;
875 }
877 /**
878 * dev_close - shutdown an interface.
879 * @dev: device to shutdown
880 *
881 * This function moves an active device into down state. A
882 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
883 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
884 * chain.
885 */
886 int dev_close(struct net_device *dev)
887 {
888 if (!(dev->flags & IFF_UP))
889 return 0;
891 /*
892 * Tell people we are going down, so that they can
893 * prepare to death, when device is still operating.
894 */
895 notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
897 dev_deactivate(dev);
899 clear_bit(__LINK_STATE_START, &dev->state);
901 /* Synchronize to scheduled poll. We cannot touch poll list,
902 * it can be even on different cpu. So just clear netif_running(),
903 * and wait when poll really will happen. Actually, the best place
904 * for this is inside dev->stop() after device stopped its irq
905 * engine, but this requires more changes in devices. */
907 smp_mb__after_clear_bit(); /* Commit netif_running(). */
908 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
909 /* No hurry. */
910 msleep(1);
911 }
913 /*
914 * Call the device specific close. This cannot fail.
915 * Only if device is UP
916 *
917 * We allow it to be called even after a DETACH hot-plug
918 * event.
919 */
920 if (dev->stop)
921 dev->stop(dev);
923 /*
924 * Device is now down.
925 */
927 dev->flags &= ~IFF_UP;
929 /*
930 * Tell people we are down
931 */
932 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
934 return 0;
935 }
938 /*
939 * Device change register/unregister. These are not inline or static
940 * as we export them to the world.
941 */
943 /**
944 * register_netdevice_notifier - register a network notifier block
945 * @nb: notifier
946 *
947 * Register a notifier to be called when network device events occur.
948 * The notifier passed is linked into the kernel structures and must
949 * not be reused until it has been unregistered. A negative errno code
950 * is returned on a failure.
951 *
952 * When registered all registration and up events are replayed
953 * to the new notifier to allow device to have a race free
954 * view of the network device list.
955 */
957 int register_netdevice_notifier(struct notifier_block *nb)
958 {
959 struct net_device *dev;
960 int err;
962 rtnl_lock();
963 err = notifier_chain_register(&netdev_chain, nb);
964 if (!err) {
965 for (dev = dev_base; dev; dev = dev->next) {
966 nb->notifier_call(nb, NETDEV_REGISTER, dev);
968 if (dev->flags & IFF_UP)
969 nb->notifier_call(nb, NETDEV_UP, dev);
970 }
971 }
972 rtnl_unlock();
973 return err;
974 }
976 /**
977 * unregister_netdevice_notifier - unregister a network notifier block
978 * @nb: notifier
979 *
980 * Unregister a notifier previously registered by
981 * register_netdevice_notifier(). The notifier is unlinked into the
982 * kernel structures and may then be reused. A negative errno code
983 * is returned on a failure.
984 */
986 int unregister_netdevice_notifier(struct notifier_block *nb)
987 {
988 return notifier_chain_unregister(&netdev_chain, nb);
989 }
991 /**
992 * call_netdevice_notifiers - call all network notifier blocks
993 * @val: value passed unmodified to notifier function
994 * @v: pointer passed unmodified to notifier function
995 *
996 * Call all network notifier blocks. Parameters and return value
997 * are as for notifier_call_chain().
998 */
1000 int call_netdevice_notifiers(unsigned long val, void *v)
1002 return notifier_call_chain(&netdev_chain, val, v);
1005 /* When > 0 there are consumers of rx skb time stamps */
1006 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1008 void net_enable_timestamp(void)
1010 atomic_inc(&netstamp_needed);
1013 void net_disable_timestamp(void)
1015 atomic_dec(&netstamp_needed);
1018 void __net_timestamp(struct sk_buff *skb)
1020 struct timeval tv;
1022 do_gettimeofday(&tv);
1023 skb_set_timestamp(skb, &tv);
1025 EXPORT_SYMBOL(__net_timestamp);
1027 static inline void net_timestamp(struct sk_buff *skb)
1029 if (atomic_read(&netstamp_needed))
1030 __net_timestamp(skb);
1031 else {
1032 skb->tstamp.off_sec = 0;
1033 skb->tstamp.off_usec = 0;
1037 /*
1038 * Support routine. Sends outgoing frames to any network
1039 * taps currently in use.
1040 */
1042 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1044 struct packet_type *ptype;
1046 net_timestamp(skb);
1048 rcu_read_lock();
1049 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1050 /* Never send packets back to the socket
1051 * they originated from - MvS (miquels@drinkel.ow.org)
1052 */
1053 if ((ptype->dev == dev || !ptype->dev) &&
1054 (ptype->af_packet_priv == NULL ||
1055 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1056 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1057 if (!skb2)
1058 break;
1060 /* skb->nh should be correctly
1061 set by sender, so that the second statement is
1062 just protection against buggy protocols.
1063 */
1064 skb2->mac.raw = skb2->data;
1066 if (skb2->nh.raw < skb2->data ||
1067 skb2->nh.raw > skb2->tail) {
1068 if (net_ratelimit())
1069 printk(KERN_CRIT "protocol %04x is "
1070 "buggy, dev %s\n",
1071 skb2->protocol, dev->name);
1072 skb2->nh.raw = skb2->data;
1075 skb2->h.raw = skb2->nh.raw;
1076 skb2->pkt_type = PACKET_OUTGOING;
1077 ptype->func(skb2, skb->dev, ptype, skb->dev);
1080 rcu_read_unlock();
1083 /*
1084 * Invalidate hardware checksum when packet is to be mangled, and
1085 * complete checksum manually on outgoing path.
1086 */
1087 int skb_checksum_help(struct sk_buff *skb, int inward)
1089 unsigned int csum;
1090 int ret = 0, offset = skb->h.raw - skb->data;
1092 if (inward)
1093 goto out_set_summed;
1095 if (unlikely(skb_shinfo(skb)->gso_size)) {
1096 static int warned;
1098 WARN_ON(!warned);
1099 warned = 1;
1101 /* Let GSO fix up the checksum. */
1102 goto out_set_summed;
1105 if (skb_cloned(skb)) {
1106 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1107 if (ret)
1108 goto out;
1111 BUG_ON(offset > (int)skb->len);
1112 csum = skb_checksum(skb, offset, skb->len-offset, 0);
1114 offset = skb->tail - skb->h.raw;
1115 BUG_ON(offset <= 0);
1116 BUG_ON(skb->csum + 2 > offset);
1118 *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
1120 out_set_summed:
1121 skb->ip_summed = CHECKSUM_NONE;
1122 out:
1123 return ret;
1126 /**
1127 * skb_gso_segment - Perform segmentation on skb.
1128 * @skb: buffer to segment
1129 * @features: features for the output path (see dev->features)
1131 * This function segments the given skb and returns a list of segments.
1133 * It may return NULL if the skb requires no segmentation. This is
1134 * only possible when GSO is used for verifying header integrity.
1135 */
1136 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1138 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1139 struct packet_type *ptype;
1140 int type = skb->protocol;
1141 int err;
1143 BUG_ON(skb_shinfo(skb)->frag_list);
1145 skb->mac.raw = skb->data;
1146 skb->mac_len = skb->nh.raw - skb->data;
1147 __skb_pull(skb, skb->mac_len);
1149 if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
1150 static int warned;
1152 WARN_ON(!warned);
1153 warned = 1;
1155 if (skb_header_cloned(skb) &&
1156 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1157 return ERR_PTR(err);
1160 rcu_read_lock();
1161 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
1162 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1163 if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
1164 err = ptype->gso_send_check(skb);
1165 segs = ERR_PTR(err);
1166 if (err || skb_gso_ok(skb, features))
1167 break;
1168 __skb_push(skb, skb->data - skb->nh.raw);
1170 segs = ptype->gso_segment(skb, features);
1171 break;
1174 rcu_read_unlock();
1176 __skb_push(skb, skb->data - skb->mac.raw);
1178 return segs;
1181 EXPORT_SYMBOL(skb_gso_segment);
1183 /* Take action when hardware reception checksum errors are detected. */
1184 #ifdef CONFIG_BUG
1185 void netdev_rx_csum_fault(struct net_device *dev)
1187 if (net_ratelimit()) {
1188 printk(KERN_ERR "%s: hw csum failure.\n",
1189 dev ? dev->name : "<unknown>");
1190 dump_stack();
1193 EXPORT_SYMBOL(netdev_rx_csum_fault);
1194 #endif
1196 #ifdef CONFIG_HIGHMEM
1197 /* Actually, we should eliminate this check as soon as we know, that:
1198 * 1. IOMMU is present and allows to map all the memory.
1199 * 2. No high memory really exists on this machine.
1200 */
1202 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1204 int i;
1206 if (dev->features & NETIF_F_HIGHDMA)
1207 return 0;
1209 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1210 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1211 return 1;
1213 return 0;
1215 #else
1216 #define illegal_highdma(dev, skb) (0)
1217 #endif
1219 struct dev_gso_cb {
1220 void (*destructor)(struct sk_buff *skb);
1221 };
1223 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1225 static void dev_gso_skb_destructor(struct sk_buff *skb)
1227 struct dev_gso_cb *cb;
1229 do {
1230 struct sk_buff *nskb = skb->next;
1232 skb->next = nskb->next;
1233 nskb->next = NULL;
1234 kfree_skb(nskb);
1235 } while (skb->next);
1237 cb = DEV_GSO_CB(skb);
1238 if (cb->destructor)
1239 cb->destructor(skb);
1242 /**
1243 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1244 * @skb: buffer to segment
1246 * This function segments the given skb and stores the list of segments
1247 * in skb->next.
1248 */
1249 static int dev_gso_segment(struct sk_buff *skb)
1251 struct net_device *dev = skb->dev;
1252 struct sk_buff *segs;
1253 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1254 NETIF_F_SG : 0);
1256 segs = skb_gso_segment(skb, features);
1258 /* Verifying header integrity only. */
1259 if (!segs)
1260 return 0;
1262 if (unlikely(IS_ERR(segs)))
1263 return PTR_ERR(segs);
1265 skb->next = segs;
1266 DEV_GSO_CB(skb)->destructor = skb->destructor;
1267 skb->destructor = dev_gso_skb_destructor;
1269 return 0;
1272 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1274 if (likely(!skb->next)) {
1275 if (netdev_nit)
1276 dev_queue_xmit_nit(skb, dev);
1278 if (netif_needs_gso(dev, skb)) {
1279 if (unlikely(dev_gso_segment(skb)))
1280 goto out_kfree_skb;
1281 if (skb->next)
1282 goto gso;
1285 return dev->hard_start_xmit(skb, dev);
1288 gso:
1289 do {
1290 struct sk_buff *nskb = skb->next;
1291 int rc;
1293 skb->next = nskb->next;
1294 nskb->next = NULL;
1295 rc = dev->hard_start_xmit(nskb, dev);
1296 if (unlikely(rc)) {
1297 nskb->next = skb->next;
1298 skb->next = nskb;
1299 return rc;
1301 if (unlikely(netif_queue_stopped(dev) && skb->next))
1302 return NETDEV_TX_BUSY;
1303 } while (skb->next);
1305 skb->destructor = DEV_GSO_CB(skb)->destructor;
1307 out_kfree_skb:
1308 kfree_skb(skb);
1309 return 0;
1312 #define HARD_TX_LOCK(dev, cpu) { \
1313 if ((dev->features & NETIF_F_LLTX) == 0) { \
1314 netif_tx_lock(dev); \
1315 } \
1318 #define HARD_TX_UNLOCK(dev) { \
1319 if ((dev->features & NETIF_F_LLTX) == 0) { \
1320 netif_tx_unlock(dev); \
1321 } \
1324 #ifdef CONFIG_XEN
1325 inline int skb_checksum_setup(struct sk_buff *skb)
1327 if (skb->proto_csum_blank) {
1328 if (skb->protocol != htons(ETH_P_IP))
1329 goto out;
1330 skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl;
1331 if (skb->h.raw >= skb->tail)
1332 goto out;
1333 switch (skb->nh.iph->protocol) {
1334 case IPPROTO_TCP:
1335 skb->csum = offsetof(struct tcphdr, check);
1336 break;
1337 case IPPROTO_UDP:
1338 skb->csum = offsetof(struct udphdr, check);
1339 break;
1340 default:
1341 if (net_ratelimit())
1342 printk(KERN_ERR "Attempting to checksum a non-"
1343 "TCP/UDP packet, dropping a protocol"
1344 " %d packet", skb->nh.iph->protocol);
1345 goto out;
1347 if ((skb->h.raw + skb->csum + 2) > skb->tail)
1348 goto out;
1349 skb->ip_summed = CHECKSUM_HW;
1350 skb->proto_csum_blank = 0;
1352 return 0;
1353 out:
1354 return -EPROTO;
1356 #else
1357 inline int skb_checksum_setup(struct sk_buff *skb) { return 0; }
1358 #endif
1361 /**
1362 * dev_queue_xmit - transmit a buffer
1363 * @skb: buffer to transmit
1365 * Queue a buffer for transmission to a network device. The caller must
1366 * have set the device and priority and built the buffer before calling
1367 * this function. The function can be called from an interrupt.
1369 * A negative errno code is returned on a failure. A success does not
1370 * guarantee the frame will be transmitted as it may be dropped due
1371 * to congestion or traffic shaping.
1373 * -----------------------------------------------------------------------------------
1374 * I notice this method can also return errors from the queue disciplines,
1375 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1376 * be positive.
1378 * Regardless of the return value, the skb is consumed, so it is currently
1379 * difficult to retry a send to this method. (You can bump the ref count
1380 * before sending to hold a reference for retry if you are careful.)
1382 * When calling this method, interrupts MUST be enabled. This is because
1383 * the BH enable code must have IRQs enabled so that it will not deadlock.
1384 * --BLG
1385 */
1387 int dev_queue_xmit(struct sk_buff *skb)
1389 struct net_device *dev = skb->dev;
1390 struct Qdisc *q;
1391 int rc = -ENOMEM;
1393 /* If a checksum-deferred packet is forwarded to a device that needs a
1394 * checksum, correct the pointers and force checksumming.
1395 */
1396 if (skb_checksum_setup(skb))
1397 goto out_kfree_skb;
1399 /* GSO will handle the following emulations directly. */
1400 if (netif_needs_gso(dev, skb))
1401 goto gso;
1403 if (skb_shinfo(skb)->frag_list &&
1404 !(dev->features & NETIF_F_FRAGLIST) &&
1405 __skb_linearize(skb))
1406 goto out_kfree_skb;
1408 /* Fragmented skb is linearized if device does not support SG,
1409 * or if at least one of fragments is in highmem and device
1410 * does not support DMA from it.
1411 */
1412 if (skb_shinfo(skb)->nr_frags &&
1413 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1414 __skb_linearize(skb))
1415 goto out_kfree_skb;
1417 /* If packet is not checksummed and device does not support
1418 * checksumming for this protocol, complete checksumming here.
1419 */
1420 if (skb->ip_summed == CHECKSUM_HW &&
1421 (!(dev->features & NETIF_F_GEN_CSUM) &&
1422 (!(dev->features & NETIF_F_IP_CSUM) ||
1423 skb->protocol != htons(ETH_P_IP))))
1424 if (skb_checksum_help(skb, 0))
1425 goto out_kfree_skb;
1427 gso:
1428 spin_lock_prefetch(&dev->queue_lock);
1430 /* Disable soft irqs for various locks below. Also
1431 * stops preemption for RCU.
1432 */
1433 rcu_read_lock_bh();
1435 /* Updates of qdisc are serialized by queue_lock.
1436 * The struct Qdisc which is pointed to by qdisc is now a
1437 * rcu structure - it may be accessed without acquiring
1438 * a lock (but the structure may be stale.) The freeing of the
1439 * qdisc will be deferred until it's known that there are no
1440 * more references to it.
1442 * If the qdisc has an enqueue function, we still need to
1443 * hold the queue_lock before calling it, since queue_lock
1444 * also serializes access to the device queue.
1445 */
1447 q = rcu_dereference(dev->qdisc);
1448 #ifdef CONFIG_NET_CLS_ACT
1449 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1450 #endif
1451 if (q->enqueue) {
1452 /* Grab device queue */
1453 spin_lock(&dev->queue_lock);
1455 rc = q->enqueue(skb, q);
1457 qdisc_run(dev);
1459 spin_unlock(&dev->queue_lock);
1460 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1461 goto out;
1464 /* The device has no queue. Common case for software devices:
1465 loopback, all the sorts of tunnels...
1467 Really, it is unlikely that netif_tx_lock protection is necessary
1468 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1469 counters.)
1470 However, it is possible, that they rely on protection
1471 made by us here.
1473 Check this and shot the lock. It is not prone from deadlocks.
1474 Either shot noqueue qdisc, it is even simpler 8)
1475 */
1476 if (dev->flags & IFF_UP) {
1477 int cpu = smp_processor_id(); /* ok because BHs are off */
1479 if (dev->xmit_lock_owner != cpu) {
1481 HARD_TX_LOCK(dev, cpu);
1483 if (!netif_queue_stopped(dev)) {
1484 rc = 0;
1485 if (!dev_hard_start_xmit(skb, dev)) {
1486 HARD_TX_UNLOCK(dev);
1487 goto out;
1490 HARD_TX_UNLOCK(dev);
1491 if (net_ratelimit())
1492 printk(KERN_CRIT "Virtual device %s asks to "
1493 "queue packet!\n", dev->name);
1494 } else {
1495 /* Recursion is detected! It is possible,
1496 * unfortunately */
1497 if (net_ratelimit())
1498 printk(KERN_CRIT "Dead loop on virtual device "
1499 "%s, fix it urgently!\n", dev->name);
1503 rc = -ENETDOWN;
1504 rcu_read_unlock_bh();
1506 out_kfree_skb:
1507 kfree_skb(skb);
1508 return rc;
1509 out:
1510 rcu_read_unlock_bh();
1511 return rc;
1515 /*=======================================================================
1516 Receiver routines
1517 =======================================================================*/
1519 int netdev_max_backlog = 1000;
1520 int netdev_budget = 300;
1521 int weight_p = 64; /* old backlog weight */
1523 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1526 /**
1527 * netif_rx - post buffer to the network code
1528 * @skb: buffer to post
1530 * This function receives a packet from a device driver and queues it for
1531 * the upper (protocol) levels to process. It always succeeds. The buffer
1532 * may be dropped during processing for congestion control or by the
1533 * protocol layers.
1535 * return values:
1536 * NET_RX_SUCCESS (no congestion)
1537 * NET_RX_CN_LOW (low congestion)
1538 * NET_RX_CN_MOD (moderate congestion)
1539 * NET_RX_CN_HIGH (high congestion)
1540 * NET_RX_DROP (packet was dropped)
1542 */
1544 int netif_rx(struct sk_buff *skb)
1546 struct softnet_data *queue;
1547 unsigned long flags;
1549 /* if netpoll wants it, pretend we never saw it */
1550 if (netpoll_rx(skb))
1551 return NET_RX_DROP;
1553 if (!skb->tstamp.off_sec)
1554 net_timestamp(skb);
1556 /*
1557 * The code is rearranged so that the path is the most
1558 * short when CPU is congested, but is still operating.
1559 */
1560 local_irq_save(flags);
1561 queue = &__get_cpu_var(softnet_data);
1563 __get_cpu_var(netdev_rx_stat).total++;
1564 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1565 if (queue->input_pkt_queue.qlen) {
1566 enqueue:
1567 dev_hold(skb->dev);
1568 __skb_queue_tail(&queue->input_pkt_queue, skb);
1569 local_irq_restore(flags);
1570 return NET_RX_SUCCESS;
1573 netif_rx_schedule(&queue->backlog_dev);
1574 goto enqueue;
1577 __get_cpu_var(netdev_rx_stat).dropped++;
1578 local_irq_restore(flags);
1580 kfree_skb(skb);
1581 return NET_RX_DROP;
1584 int netif_rx_ni(struct sk_buff *skb)
1586 int err;
1588 preempt_disable();
1589 err = netif_rx(skb);
1590 if (local_softirq_pending())
1591 do_softirq();
1592 preempt_enable();
1594 return err;
1597 EXPORT_SYMBOL(netif_rx_ni);
1599 static inline struct net_device *skb_bond(struct sk_buff *skb)
1601 struct net_device *dev = skb->dev;
1603 if (dev->master)
1604 skb->dev = dev->master;
1606 return dev;
1609 static void net_tx_action(struct softirq_action *h)
1611 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1613 if (sd->completion_queue) {
1614 struct sk_buff *clist;
1616 local_irq_disable();
1617 clist = sd->completion_queue;
1618 sd->completion_queue = NULL;
1619 local_irq_enable();
1621 while (clist) {
1622 struct sk_buff *skb = clist;
1623 clist = clist->next;
1625 BUG_TRAP(!atomic_read(&skb->users));
1626 __kfree_skb(skb);
1630 if (sd->output_queue) {
1631 struct net_device *head;
1633 local_irq_disable();
1634 head = sd->output_queue;
1635 sd->output_queue = NULL;
1636 local_irq_enable();
1638 while (head) {
1639 struct net_device *dev = head;
1640 head = head->next_sched;
1642 smp_mb__before_clear_bit();
1643 clear_bit(__LINK_STATE_SCHED, &dev->state);
1645 if (spin_trylock(&dev->queue_lock)) {
1646 qdisc_run(dev);
1647 spin_unlock(&dev->queue_lock);
1648 } else {
1649 netif_schedule(dev);
1655 static __inline__ int deliver_skb(struct sk_buff *skb,
1656 struct packet_type *pt_prev,
1657 struct net_device *orig_dev)
1659 atomic_inc(&skb->users);
1660 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1663 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
1664 int (*br_handle_frame_hook)(struct net_bridge_port *p, struct sk_buff **pskb);
1665 struct net_bridge;
1666 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
1667 unsigned char *addr);
1668 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent);
1670 static __inline__ int handle_bridge(struct sk_buff **pskb,
1671 struct packet_type **pt_prev, int *ret,
1672 struct net_device *orig_dev)
1674 struct net_bridge_port *port;
1676 if ((*pskb)->pkt_type == PACKET_LOOPBACK ||
1677 (port = rcu_dereference((*pskb)->dev->br_port)) == NULL)
1678 return 0;
1680 if (*pt_prev) {
1681 *ret = deliver_skb(*pskb, *pt_prev, orig_dev);
1682 *pt_prev = NULL;
1685 return br_handle_frame_hook(port, pskb);
1687 #else
1688 #define handle_bridge(skb, pt_prev, ret, orig_dev) (0)
1689 #endif
1691 #ifdef CONFIG_NET_CLS_ACT
1692 /* TODO: Maybe we should just force sch_ingress to be compiled in
1693 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
1694 * a compare and 2 stores extra right now if we dont have it on
1695 * but have CONFIG_NET_CLS_ACT
1696 * NOTE: This doesnt stop any functionality; if you dont have
1697 * the ingress scheduler, you just cant add policies on ingress.
1699 */
1700 static int ing_filter(struct sk_buff *skb)
1702 struct Qdisc *q;
1703 struct net_device *dev = skb->dev;
1704 int result = TC_ACT_OK;
1706 if (dev->qdisc_ingress) {
1707 __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
1708 if (MAX_RED_LOOP < ttl++) {
1709 printk("Redir loop detected Dropping packet (%s->%s)\n",
1710 skb->input_dev->name, skb->dev->name);
1711 return TC_ACT_SHOT;
1714 skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl);
1716 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
1718 spin_lock(&dev->ingress_lock);
1719 if ((q = dev->qdisc_ingress) != NULL)
1720 result = q->enqueue(skb, q);
1721 spin_unlock(&dev->ingress_lock);
1725 return result;
1727 #endif
1729 int netif_receive_skb(struct sk_buff *skb)
1731 struct packet_type *ptype, *pt_prev;
1732 struct net_device *orig_dev;
1733 int ret = NET_RX_DROP;
1734 unsigned short type;
1736 /* if we've gotten here through NAPI, check netpoll */
1737 if (skb->dev->poll && netpoll_rx(skb))
1738 return NET_RX_DROP;
1740 if (!skb->tstamp.off_sec)
1741 net_timestamp(skb);
1743 if (!skb->input_dev)
1744 skb->input_dev = skb->dev;
1746 orig_dev = skb_bond(skb);
1748 __get_cpu_var(netdev_rx_stat).total++;
1750 skb->h.raw = skb->nh.raw = skb->data;
1751 skb->mac_len = skb->nh.raw - skb->mac.raw;
1753 pt_prev = NULL;
1755 rcu_read_lock();
1757 #ifdef CONFIG_NET_CLS_ACT
1758 if (skb->tc_verd & TC_NCLS) {
1759 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
1760 goto ncls;
1762 #endif
1764 #ifdef CONFIG_XEN
1765 switch (skb->ip_summed) {
1766 case CHECKSUM_UNNECESSARY:
1767 skb->proto_data_valid = 1;
1768 break;
1769 case CHECKSUM_HW:
1770 /* XXX Implement me. */
1771 default:
1772 skb->proto_data_valid = 0;
1773 break;
1775 #endif
1777 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1778 if (!ptype->dev || ptype->dev == skb->dev) {
1779 if (pt_prev)
1780 ret = deliver_skb(skb, pt_prev, orig_dev);
1781 pt_prev = ptype;
1785 #ifdef CONFIG_NET_CLS_ACT
1786 if (pt_prev) {
1787 ret = deliver_skb(skb, pt_prev, orig_dev);
1788 pt_prev = NULL; /* noone else should process this after*/
1789 } else {
1790 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
1793 ret = ing_filter(skb);
1795 if (ret == TC_ACT_SHOT || (ret == TC_ACT_STOLEN)) {
1796 kfree_skb(skb);
1797 goto out;
1800 skb->tc_verd = 0;
1801 ncls:
1802 #endif
1804 handle_diverter(skb);
1806 if (handle_bridge(&skb, &pt_prev, &ret, orig_dev))
1807 goto out;
1809 type = skb->protocol;
1810 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
1811 if (ptype->type == type &&
1812 (!ptype->dev || ptype->dev == skb->dev)) {
1813 if (pt_prev)
1814 ret = deliver_skb(skb, pt_prev, orig_dev);
1815 pt_prev = ptype;
1819 if (pt_prev) {
1820 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1821 } else {
1822 kfree_skb(skb);
1823 /* Jamal, now you will not able to escape explaining
1824 * me how you were going to use this. :-)
1825 */
1826 ret = NET_RX_DROP;
1829 out:
1830 rcu_read_unlock();
1831 return ret;
1834 static int process_backlog(struct net_device *backlog_dev, int *budget)
1836 int work = 0;
1837 int quota = min(backlog_dev->quota, *budget);
1838 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1839 unsigned long start_time = jiffies;
1841 backlog_dev->weight = weight_p;
1842 for (;;) {
1843 struct sk_buff *skb;
1844 struct net_device *dev;
1846 local_irq_disable();
1847 skb = __skb_dequeue(&queue->input_pkt_queue);
1848 if (!skb)
1849 goto job_done;
1850 local_irq_enable();
1852 dev = skb->dev;
1854 netif_receive_skb(skb);
1856 dev_put(dev);
1858 work++;
1860 if (work >= quota || jiffies - start_time > 1)
1861 break;
1865 backlog_dev->quota -= work;
1866 *budget -= work;
1867 return -1;
1869 job_done:
1870 backlog_dev->quota -= work;
1871 *budget -= work;
1873 list_del(&backlog_dev->poll_list);
1874 smp_mb__before_clear_bit();
1875 netif_poll_enable(backlog_dev);
1877 local_irq_enable();
1878 return 0;
1881 static void net_rx_action(struct softirq_action *h)
1883 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1884 unsigned long start_time = jiffies;
1885 int budget = netdev_budget;
1886 void *have;
1888 local_irq_disable();
1890 while (!list_empty(&queue->poll_list)) {
1891 struct net_device *dev;
1893 if (budget <= 0 || jiffies - start_time > 1)
1894 goto softnet_break;
1896 local_irq_enable();
1898 dev = list_entry(queue->poll_list.next,
1899 struct net_device, poll_list);
1900 have = netpoll_poll_lock(dev);
1902 if (dev->quota <= 0 || dev->poll(dev, &budget)) {
1903 netpoll_poll_unlock(have);
1904 local_irq_disable();
1905 list_del(&dev->poll_list);
1906 list_add_tail(&dev->poll_list, &queue->poll_list);
1907 if (dev->quota < 0)
1908 dev->quota += dev->weight;
1909 else
1910 dev->quota = dev->weight;
1911 } else {
1912 netpoll_poll_unlock(have);
1913 dev_put(dev);
1914 local_irq_disable();
1917 out:
1918 local_irq_enable();
1919 return;
1921 softnet_break:
1922 __get_cpu_var(netdev_rx_stat).time_squeeze++;
1923 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
1924 goto out;
1927 static gifconf_func_t * gifconf_list [NPROTO];
1929 /**
1930 * register_gifconf - register a SIOCGIF handler
1931 * @family: Address family
1932 * @gifconf: Function handler
1934 * Register protocol dependent address dumping routines. The handler
1935 * that is passed must not be freed or reused until it has been replaced
1936 * by another handler.
1937 */
1938 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
1940 if (family >= NPROTO)
1941 return -EINVAL;
1942 gifconf_list[family] = gifconf;
1943 return 0;
1947 /*
1948 * Map an interface index to its name (SIOCGIFNAME)
1949 */
1951 /*
1952 * We need this ioctl for efficient implementation of the
1953 * if_indextoname() function required by the IPv6 API. Without
1954 * it, we would have to search all the interfaces to find a
1955 * match. --pb
1956 */
1958 static int dev_ifname(struct ifreq __user *arg)
1960 struct net_device *dev;
1961 struct ifreq ifr;
1963 /*
1964 * Fetch the caller's info block.
1965 */
1967 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
1968 return -EFAULT;
1970 read_lock(&dev_base_lock);
1971 dev = __dev_get_by_index(ifr.ifr_ifindex);
1972 if (!dev) {
1973 read_unlock(&dev_base_lock);
1974 return -ENODEV;
1977 strcpy(ifr.ifr_name, dev->name);
1978 read_unlock(&dev_base_lock);
1980 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
1981 return -EFAULT;
1982 return 0;
1985 /*
1986 * Perform a SIOCGIFCONF call. This structure will change
1987 * size eventually, and there is nothing I can do about it.
1988 * Thus we will need a 'compatibility mode'.
1989 */
1991 static int dev_ifconf(char __user *arg)
1993 struct ifconf ifc;
1994 struct net_device *dev;
1995 char __user *pos;
1996 int len;
1997 int total;
1998 int i;
2000 /*
2001 * Fetch the caller's info block.
2002 */
2004 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2005 return -EFAULT;
2007 pos = ifc.ifc_buf;
2008 len = ifc.ifc_len;
2010 /*
2011 * Loop over the interfaces, and write an info block for each.
2012 */
2014 total = 0;
2015 for (dev = dev_base; dev; dev = dev->next) {
2016 for (i = 0; i < NPROTO; i++) {
2017 if (gifconf_list[i]) {
2018 int done;
2019 if (!pos)
2020 done = gifconf_list[i](dev, NULL, 0);
2021 else
2022 done = gifconf_list[i](dev, pos + total,
2023 len - total);
2024 if (done < 0)
2025 return -EFAULT;
2026 total += done;
2031 /*
2032 * All done. Write the updated control block back to the caller.
2033 */
2034 ifc.ifc_len = total;
2036 /*
2037 * Both BSD and Solaris return 0 here, so we do too.
2038 */
2039 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2042 #ifdef CONFIG_PROC_FS
2043 /*
2044 * This is invoked by the /proc filesystem handler to display a device
2045 * in detail.
2046 */
2047 static __inline__ struct net_device *dev_get_idx(loff_t pos)
2049 struct net_device *dev;
2050 loff_t i;
2052 for (i = 0, dev = dev_base; dev && i < pos; ++i, dev = dev->next);
2054 return i == pos ? dev : NULL;
2057 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2059 read_lock(&dev_base_lock);
2060 return *pos ? dev_get_idx(*pos - 1) : SEQ_START_TOKEN;
2063 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2065 ++*pos;
2066 return v == SEQ_START_TOKEN ? dev_base : ((struct net_device *)v)->next;
2069 void dev_seq_stop(struct seq_file *seq, void *v)
2071 read_unlock(&dev_base_lock);
2074 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2076 if (dev->get_stats) {
2077 struct net_device_stats *stats = dev->get_stats(dev);
2079 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2080 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2081 dev->name, stats->rx_bytes, stats->rx_packets,
2082 stats->rx_errors,
2083 stats->rx_dropped + stats->rx_missed_errors,
2084 stats->rx_fifo_errors,
2085 stats->rx_length_errors + stats->rx_over_errors +
2086 stats->rx_crc_errors + stats->rx_frame_errors,
2087 stats->rx_compressed, stats->multicast,
2088 stats->tx_bytes, stats->tx_packets,
2089 stats->tx_errors, stats->tx_dropped,
2090 stats->tx_fifo_errors, stats->collisions,
2091 stats->tx_carrier_errors +
2092 stats->tx_aborted_errors +
2093 stats->tx_window_errors +
2094 stats->tx_heartbeat_errors,
2095 stats->tx_compressed);
2096 } else
2097 seq_printf(seq, "%6s: No statistics available.\n", dev->name);
2100 /*
2101 * Called from the PROCfs module. This now uses the new arbitrary sized
2102 * /proc/net interface to create /proc/net/dev
2103 */
2104 static int dev_seq_show(struct seq_file *seq, void *v)
2106 if (v == SEQ_START_TOKEN)
2107 seq_puts(seq, "Inter-| Receive "
2108 " | Transmit\n"
2109 " face |bytes packets errs drop fifo frame "
2110 "compressed multicast|bytes packets errs "
2111 "drop fifo colls carrier compressed\n");
2112 else
2113 dev_seq_printf_stats(seq, v);
2114 return 0;
2117 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2119 struct netif_rx_stats *rc = NULL;
2121 while (*pos < NR_CPUS)
2122 if (cpu_online(*pos)) {
2123 rc = &per_cpu(netdev_rx_stat, *pos);
2124 break;
2125 } else
2126 ++*pos;
2127 return rc;
2130 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2132 return softnet_get_online(pos);
2135 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2137 ++*pos;
2138 return softnet_get_online(pos);
2141 static void softnet_seq_stop(struct seq_file *seq, void *v)
2145 static int softnet_seq_show(struct seq_file *seq, void *v)
2147 struct netif_rx_stats *s = v;
2149 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2150 s->total, s->dropped, s->time_squeeze, 0,
2151 0, 0, 0, 0, /* was fastroute */
2152 s->cpu_collision );
2153 return 0;
2156 static struct seq_operations dev_seq_ops = {
2157 .start = dev_seq_start,
2158 .next = dev_seq_next,
2159 .stop = dev_seq_stop,
2160 .show = dev_seq_show,
2161 };
2163 static int dev_seq_open(struct inode *inode, struct file *file)
2165 return seq_open(file, &dev_seq_ops);
2168 static struct file_operations dev_seq_fops = {
2169 .owner = THIS_MODULE,
2170 .open = dev_seq_open,
2171 .read = seq_read,
2172 .llseek = seq_lseek,
2173 .release = seq_release,
2174 };
2176 static struct seq_operations softnet_seq_ops = {
2177 .start = softnet_seq_start,
2178 .next = softnet_seq_next,
2179 .stop = softnet_seq_stop,
2180 .show = softnet_seq_show,
2181 };
2183 static int softnet_seq_open(struct inode *inode, struct file *file)
2185 return seq_open(file, &softnet_seq_ops);
2188 static struct file_operations softnet_seq_fops = {
2189 .owner = THIS_MODULE,
2190 .open = softnet_seq_open,
2191 .read = seq_read,
2192 .llseek = seq_lseek,
2193 .release = seq_release,
2194 };
2196 #ifdef WIRELESS_EXT
2197 extern int wireless_proc_init(void);
2198 #else
2199 #define wireless_proc_init() 0
2200 #endif
2202 static int __init dev_proc_init(void)
2204 int rc = -ENOMEM;
2206 if (!proc_net_fops_create("dev", S_IRUGO, &dev_seq_fops))
2207 goto out;
2208 if (!proc_net_fops_create("softnet_stat", S_IRUGO, &softnet_seq_fops))
2209 goto out_dev;
2210 if (wireless_proc_init())
2211 goto out_softnet;
2212 rc = 0;
2213 out:
2214 return rc;
2215 out_softnet:
2216 proc_net_remove("softnet_stat");
2217 out_dev:
2218 proc_net_remove("dev");
2219 goto out;
2221 #else
2222 #define dev_proc_init() 0
2223 #endif /* CONFIG_PROC_FS */
2226 /**
2227 * netdev_set_master - set up master/slave pair
2228 * @slave: slave device
2229 * @master: new master device
2231 * Changes the master device of the slave. Pass %NULL to break the
2232 * bonding. The caller must hold the RTNL semaphore. On a failure
2233 * a negative errno code is returned. On success the reference counts
2234 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2235 * function returns zero.
2236 */
2237 int netdev_set_master(struct net_device *slave, struct net_device *master)
2239 struct net_device *old = slave->master;
2241 ASSERT_RTNL();
2243 if (master) {
2244 if (old)
2245 return -EBUSY;
2246 dev_hold(master);
2249 slave->master = master;
2251 synchronize_net();
2253 if (old)
2254 dev_put(old);
2256 if (master)
2257 slave->flags |= IFF_SLAVE;
2258 else
2259 slave->flags &= ~IFF_SLAVE;
2261 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2262 return 0;
2265 /**
2266 * dev_set_promiscuity - update promiscuity count on a device
2267 * @dev: device
2268 * @inc: modifier
2270 * Add or remove promsicuity from a device. While the count in the device
2271 * remains above zero the interface remains promiscuous. Once it hits zero
2272 * the device reverts back to normal filtering operation. A negative inc
2273 * value is used to drop promiscuity on the device.
2274 */
2275 void dev_set_promiscuity(struct net_device *dev, int inc)
2277 unsigned short old_flags = dev->flags;
2279 if ((dev->promiscuity += inc) == 0)
2280 dev->flags &= ~IFF_PROMISC;
2281 else
2282 dev->flags |= IFF_PROMISC;
2283 if (dev->flags != old_flags) {
2284 dev_mc_upload(dev);
2285 printk(KERN_INFO "device %s %s promiscuous mode\n",
2286 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2287 "left");
2291 /**
2292 * dev_set_allmulti - update allmulti count on a device
2293 * @dev: device
2294 * @inc: modifier
2296 * Add or remove reception of all multicast frames to a device. While the
2297 * count in the device remains above zero the interface remains listening
2298 * to all interfaces. Once it hits zero the device reverts back to normal
2299 * filtering operation. A negative @inc value is used to drop the counter
2300 * when releasing a resource needing all multicasts.
2301 */
2303 void dev_set_allmulti(struct net_device *dev, int inc)
2305 unsigned short old_flags = dev->flags;
2307 dev->flags |= IFF_ALLMULTI;
2308 if ((dev->allmulti += inc) == 0)
2309 dev->flags &= ~IFF_ALLMULTI;
2310 if (dev->flags ^ old_flags)
2311 dev_mc_upload(dev);
2314 unsigned dev_get_flags(const struct net_device *dev)
2316 unsigned flags;
2318 flags = (dev->flags & ~(IFF_PROMISC |
2319 IFF_ALLMULTI |
2320 IFF_RUNNING)) |
2321 (dev->gflags & (IFF_PROMISC |
2322 IFF_ALLMULTI));
2324 if (netif_running(dev) && netif_carrier_ok(dev))
2325 flags |= IFF_RUNNING;
2327 return flags;
2330 int dev_change_flags(struct net_device *dev, unsigned flags)
2332 int ret;
2333 int old_flags = dev->flags;
2335 /*
2336 * Set the flags on our device.
2337 */
2339 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
2340 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
2341 IFF_AUTOMEDIA)) |
2342 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
2343 IFF_ALLMULTI));
2345 /*
2346 * Load in the correct multicast list now the flags have changed.
2347 */
2349 dev_mc_upload(dev);
2351 /*
2352 * Have we downed the interface. We handle IFF_UP ourselves
2353 * according to user attempts to set it, rather than blindly
2354 * setting it.
2355 */
2357 ret = 0;
2358 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
2359 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
2361 if (!ret)
2362 dev_mc_upload(dev);
2365 if (dev->flags & IFF_UP &&
2366 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
2367 IFF_VOLATILE)))
2368 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
2370 if ((flags ^ dev->gflags) & IFF_PROMISC) {
2371 int inc = (flags & IFF_PROMISC) ? +1 : -1;
2372 dev->gflags ^= IFF_PROMISC;
2373 dev_set_promiscuity(dev, inc);
2376 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
2377 is important. Some (broken) drivers set IFF_PROMISC, when
2378 IFF_ALLMULTI is requested not asking us and not reporting.
2379 */
2380 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
2381 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
2382 dev->gflags ^= IFF_ALLMULTI;
2383 dev_set_allmulti(dev, inc);
2386 if (old_flags ^ dev->flags)
2387 rtmsg_ifinfo(RTM_NEWLINK, dev, old_flags ^ dev->flags);
2389 return ret;
2392 int dev_set_mtu(struct net_device *dev, int new_mtu)
2394 int err;
2396 if (new_mtu == dev->mtu)
2397 return 0;
2399 /* MTU must be positive. */
2400 if (new_mtu < 0)
2401 return -EINVAL;
2403 if (!netif_device_present(dev))
2404 return -ENODEV;
2406 err = 0;
2407 if (dev->change_mtu)
2408 err = dev->change_mtu(dev, new_mtu);
2409 else
2410 dev->mtu = new_mtu;
2411 if (!err && dev->flags & IFF_UP)
2412 notifier_call_chain(&netdev_chain,
2413 NETDEV_CHANGEMTU, dev);
2414 return err;
2417 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
2419 int err;
2421 if (!dev->set_mac_address)
2422 return -EOPNOTSUPP;
2423 if (sa->sa_family != dev->type)
2424 return -EINVAL;
2425 if (!netif_device_present(dev))
2426 return -ENODEV;
2427 err = dev->set_mac_address(dev, sa);
2428 if (!err)
2429 notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev);
2430 return err;
2433 /*
2434 * Perform the SIOCxIFxxx calls.
2435 */
2436 static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
2438 int err;
2439 struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
2441 if (!dev)
2442 return -ENODEV;
2444 switch (cmd) {
2445 case SIOCGIFFLAGS: /* Get interface flags */
2446 ifr->ifr_flags = dev_get_flags(dev);
2447 return 0;
2449 case SIOCSIFFLAGS: /* Set interface flags */
2450 return dev_change_flags(dev, ifr->ifr_flags);
2452 case SIOCGIFMETRIC: /* Get the metric on the interface
2453 (currently unused) */
2454 ifr->ifr_metric = 0;
2455 return 0;
2457 case SIOCSIFMETRIC: /* Set the metric on the interface
2458 (currently unused) */
2459 return -EOPNOTSUPP;
2461 case SIOCGIFMTU: /* Get the MTU of a device */
2462 ifr->ifr_mtu = dev->mtu;
2463 return 0;
2465 case SIOCSIFMTU: /* Set the MTU of a device */
2466 return dev_set_mtu(dev, ifr->ifr_mtu);
2468 case SIOCGIFHWADDR:
2469 if (!dev->addr_len)
2470 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
2471 else
2472 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
2473 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2474 ifr->ifr_hwaddr.sa_family = dev->type;
2475 return 0;
2477 case SIOCSIFHWADDR:
2478 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
2480 case SIOCSIFHWBROADCAST:
2481 if (ifr->ifr_hwaddr.sa_family != dev->type)
2482 return -EINVAL;
2483 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
2484 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2485 notifier_call_chain(&netdev_chain,
2486 NETDEV_CHANGEADDR, dev);
2487 return 0;
2489 case SIOCGIFMAP:
2490 ifr->ifr_map.mem_start = dev->mem_start;
2491 ifr->ifr_map.mem_end = dev->mem_end;
2492 ifr->ifr_map.base_addr = dev->base_addr;
2493 ifr->ifr_map.irq = dev->irq;
2494 ifr->ifr_map.dma = dev->dma;
2495 ifr->ifr_map.port = dev->if_port;
2496 return 0;
2498 case SIOCSIFMAP:
2499 if (dev->set_config) {
2500 if (!netif_device_present(dev))
2501 return -ENODEV;
2502 return dev->set_config(dev, &ifr->ifr_map);
2504 return -EOPNOTSUPP;
2506 case SIOCADDMULTI:
2507 if (!dev->set_multicast_list ||
2508 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2509 return -EINVAL;
2510 if (!netif_device_present(dev))
2511 return -ENODEV;
2512 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
2513 dev->addr_len, 1);
2515 case SIOCDELMULTI:
2516 if (!dev->set_multicast_list ||
2517 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2518 return -EINVAL;
2519 if (!netif_device_present(dev))
2520 return -ENODEV;
2521 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
2522 dev->addr_len, 1);
2524 case SIOCGIFINDEX:
2525 ifr->ifr_ifindex = dev->ifindex;
2526 return 0;
2528 case SIOCGIFTXQLEN:
2529 ifr->ifr_qlen = dev->tx_queue_len;
2530 return 0;
2532 case SIOCSIFTXQLEN:
2533 if (ifr->ifr_qlen < 0)
2534 return -EINVAL;
2535 dev->tx_queue_len = ifr->ifr_qlen;
2536 return 0;
2538 case SIOCSIFNAME:
2539 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
2540 return dev_change_name(dev, ifr->ifr_newname);
2542 /*
2543 * Unknown or private ioctl
2544 */
2546 default:
2547 if ((cmd >= SIOCDEVPRIVATE &&
2548 cmd <= SIOCDEVPRIVATE + 15) ||
2549 cmd == SIOCBONDENSLAVE ||
2550 cmd == SIOCBONDRELEASE ||
2551 cmd == SIOCBONDSETHWADDR ||
2552 cmd == SIOCBONDSLAVEINFOQUERY ||
2553 cmd == SIOCBONDINFOQUERY ||
2554 cmd == SIOCBONDCHANGEACTIVE ||
2555 cmd == SIOCGMIIPHY ||
2556 cmd == SIOCGMIIREG ||
2557 cmd == SIOCSMIIREG ||
2558 cmd == SIOCBRADDIF ||
2559 cmd == SIOCBRDELIF ||
2560 cmd == SIOCWANDEV) {
2561 err = -EOPNOTSUPP;
2562 if (dev->do_ioctl) {
2563 if (netif_device_present(dev))
2564 err = dev->do_ioctl(dev, ifr,
2565 cmd);
2566 else
2567 err = -ENODEV;
2569 } else
2570 err = -EINVAL;
2573 return err;
2576 /*
2577 * This function handles all "interface"-type I/O control requests. The actual
2578 * 'doing' part of this is dev_ifsioc above.
2579 */
2581 /**
2582 * dev_ioctl - network device ioctl
2583 * @cmd: command to issue
2584 * @arg: pointer to a struct ifreq in user space
2586 * Issue ioctl functions to devices. This is normally called by the
2587 * user space syscall interfaces but can sometimes be useful for
2588 * other purposes. The return value is the return from the syscall if
2589 * positive or a negative errno code on error.
2590 */
2592 int dev_ioctl(unsigned int cmd, void __user *arg)
2594 struct ifreq ifr;
2595 int ret;
2596 char *colon;
2598 /* One special case: SIOCGIFCONF takes ifconf argument
2599 and requires shared lock, because it sleeps writing
2600 to user space.
2601 */
2603 if (cmd == SIOCGIFCONF) {
2604 rtnl_shlock();
2605 ret = dev_ifconf((char __user *) arg);
2606 rtnl_shunlock();
2607 return ret;
2609 if (cmd == SIOCGIFNAME)
2610 return dev_ifname((struct ifreq __user *)arg);
2612 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2613 return -EFAULT;
2615 ifr.ifr_name[IFNAMSIZ-1] = 0;
2617 colon = strchr(ifr.ifr_name, ':');
2618 if (colon)
2619 *colon = 0;
2621 /*
2622 * See which interface the caller is talking about.
2623 */
2625 switch (cmd) {
2626 /*
2627 * These ioctl calls:
2628 * - can be done by all.
2629 * - atomic and do not require locking.
2630 * - return a value
2631 */
2632 case SIOCGIFFLAGS:
2633 case SIOCGIFMETRIC:
2634 case SIOCGIFMTU:
2635 case SIOCGIFHWADDR:
2636 case SIOCGIFSLAVE:
2637 case SIOCGIFMAP:
2638 case SIOCGIFINDEX:
2639 case SIOCGIFTXQLEN:
2640 dev_load(ifr.ifr_name);
2641 read_lock(&dev_base_lock);
2642 ret = dev_ifsioc(&ifr, cmd);
2643 read_unlock(&dev_base_lock);
2644 if (!ret) {
2645 if (colon)
2646 *colon = ':';
2647 if (copy_to_user(arg, &ifr,
2648 sizeof(struct ifreq)))
2649 ret = -EFAULT;
2651 return ret;
2653 case SIOCETHTOOL:
2654 dev_load(ifr.ifr_name);
2655 rtnl_lock();
2656 ret = dev_ethtool(&ifr);
2657 rtnl_unlock();
2658 if (!ret) {
2659 if (colon)
2660 *colon = ':';
2661 if (copy_to_user(arg, &ifr,
2662 sizeof(struct ifreq)))
2663 ret = -EFAULT;
2665 return ret;
2667 /*
2668 * These ioctl calls:
2669 * - require superuser power.
2670 * - require strict serialization.
2671 * - return a value
2672 */
2673 case SIOCGMIIPHY:
2674 case SIOCGMIIREG:
2675 case SIOCSIFNAME:
2676 if (!capable(CAP_NET_ADMIN))
2677 return -EPERM;
2678 dev_load(ifr.ifr_name);
2679 rtnl_lock();
2680 ret = dev_ifsioc(&ifr, cmd);
2681 rtnl_unlock();
2682 if (!ret) {
2683 if (colon)
2684 *colon = ':';
2685 if (copy_to_user(arg, &ifr,
2686 sizeof(struct ifreq)))
2687 ret = -EFAULT;
2689 return ret;
2691 /*
2692 * These ioctl calls:
2693 * - require superuser power.
2694 * - require strict serialization.
2695 * - do not return a value
2696 */
2697 case SIOCSIFFLAGS:
2698 case SIOCSIFMETRIC:
2699 case SIOCSIFMTU:
2700 case SIOCSIFMAP:
2701 case SIOCSIFHWADDR:
2702 case SIOCSIFSLAVE:
2703 case SIOCADDMULTI:
2704 case SIOCDELMULTI:
2705 case SIOCSIFHWBROADCAST:
2706 case SIOCSIFTXQLEN:
2707 case SIOCSMIIREG:
2708 case SIOCBONDENSLAVE:
2709 case SIOCBONDRELEASE:
2710 case SIOCBONDSETHWADDR:
2711 case SIOCBONDCHANGEACTIVE:
2712 case SIOCBRADDIF:
2713 case SIOCBRDELIF:
2714 if (!capable(CAP_NET_ADMIN))
2715 return -EPERM;
2716 /* fall through */
2717 case SIOCBONDSLAVEINFOQUERY:
2718 case SIOCBONDINFOQUERY:
2719 dev_load(ifr.ifr_name);
2720 rtnl_lock();
2721 ret = dev_ifsioc(&ifr, cmd);
2722 rtnl_unlock();
2723 return ret;
2725 case SIOCGIFMEM:
2726 /* Get the per device memory space. We can add this but
2727 * currently do not support it */
2728 case SIOCSIFMEM:
2729 /* Set the per device memory buffer space.
2730 * Not applicable in our case */
2731 case SIOCSIFLINK:
2732 return -EINVAL;
2734 /*
2735 * Unknown or private ioctl.
2736 */
2737 default:
2738 if (cmd == SIOCWANDEV ||
2739 (cmd >= SIOCDEVPRIVATE &&
2740 cmd <= SIOCDEVPRIVATE + 15)) {
2741 dev_load(ifr.ifr_name);
2742 rtnl_lock();
2743 ret = dev_ifsioc(&ifr, cmd);
2744 rtnl_unlock();
2745 if (!ret && copy_to_user(arg, &ifr,
2746 sizeof(struct ifreq)))
2747 ret = -EFAULT;
2748 return ret;
2750 #ifdef WIRELESS_EXT
2751 /* Take care of Wireless Extensions */
2752 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
2753 /* If command is `set a parameter', or
2754 * `get the encoding parameters', check if
2755 * the user has the right to do it */
2756 if (IW_IS_SET(cmd) || cmd == SIOCGIWENCODE) {
2757 if (!capable(CAP_NET_ADMIN))
2758 return -EPERM;
2760 dev_load(ifr.ifr_name);
2761 rtnl_lock();
2762 /* Follow me in net/core/wireless.c */
2763 ret = wireless_process_ioctl(&ifr, cmd);
2764 rtnl_unlock();
2765 if (IW_IS_GET(cmd) &&
2766 copy_to_user(arg, &ifr,
2767 sizeof(struct ifreq)))
2768 ret = -EFAULT;
2769 return ret;
2771 #endif /* WIRELESS_EXT */
2772 return -EINVAL;
2777 /**
2778 * dev_new_index - allocate an ifindex
2780 * Returns a suitable unique value for a new device interface
2781 * number. The caller must hold the rtnl semaphore or the
2782 * dev_base_lock to be sure it remains unique.
2783 */
2784 static int dev_new_index(void)
2786 static int ifindex;
2787 for (;;) {
2788 if (++ifindex <= 0)
2789 ifindex = 1;
2790 if (!__dev_get_by_index(ifindex))
2791 return ifindex;
2795 static int dev_boot_phase = 1;
2797 /* Delayed registration/unregisteration */
2798 static DEFINE_SPINLOCK(net_todo_list_lock);
2799 static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list);
2801 static inline void net_set_todo(struct net_device *dev)
2803 spin_lock(&net_todo_list_lock);
2804 list_add_tail(&dev->todo_list, &net_todo_list);
2805 spin_unlock(&net_todo_list_lock);
2808 /**
2809 * register_netdevice - register a network device
2810 * @dev: device to register
2812 * Take a completed network device structure and add it to the kernel
2813 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
2814 * chain. 0 is returned on success. A negative errno code is returned
2815 * on a failure to set up the device, or if the name is a duplicate.
2817 * Callers must hold the rtnl semaphore. You may want
2818 * register_netdev() instead of this.
2820 * BUGS:
2821 * The locking appears insufficient to guarantee two parallel registers
2822 * will not get the same name.
2823 */
2825 int register_netdevice(struct net_device *dev)
2827 struct hlist_head *head;
2828 struct hlist_node *p;
2829 int ret;
2831 BUG_ON(dev_boot_phase);
2832 ASSERT_RTNL();
2834 /* When net_device's are persistent, this will be fatal. */
2835 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
2837 spin_lock_init(&dev->queue_lock);
2838 spin_lock_init(&dev->_xmit_lock);
2839 dev->xmit_lock_owner = -1;
2840 #ifdef CONFIG_NET_CLS_ACT
2841 spin_lock_init(&dev->ingress_lock);
2842 #endif
2844 ret = alloc_divert_blk(dev);
2845 if (ret)
2846 goto out;
2848 dev->iflink = -1;
2850 /* Init, if this function is available */
2851 if (dev->init) {
2852 ret = dev->init(dev);
2853 if (ret) {
2854 if (ret > 0)
2855 ret = -EIO;
2856 goto out_err;
2860 if (!dev_valid_name(dev->name)) {
2861 ret = -EINVAL;
2862 goto out_err;
2865 dev->ifindex = dev_new_index();
2866 if (dev->iflink == -1)
2867 dev->iflink = dev->ifindex;
2869 /* Check for existence of name */
2870 head = dev_name_hash(dev->name);
2871 hlist_for_each(p, head) {
2872 struct net_device *d
2873 = hlist_entry(p, struct net_device, name_hlist);
2874 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
2875 ret = -EEXIST;
2876 goto out_err;
2880 /* Fix illegal SG+CSUM combinations. */
2881 if ((dev->features & NETIF_F_SG) &&
2882 !(dev->features & NETIF_F_ALL_CSUM)) {
2883 printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
2884 dev->name);
2885 dev->features &= ~NETIF_F_SG;
2888 /* TSO requires that SG is present as well. */
2889 if ((dev->features & NETIF_F_TSO) &&
2890 !(dev->features & NETIF_F_SG)) {
2891 printk("%s: Dropping NETIF_F_TSO since no SG feature.\n",
2892 dev->name);
2893 dev->features &= ~NETIF_F_TSO;
2895 if (dev->features & NETIF_F_UFO) {
2896 if (!(dev->features & NETIF_F_HW_CSUM)) {
2897 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
2898 "NETIF_F_HW_CSUM feature.\n",
2899 dev->name);
2900 dev->features &= ~NETIF_F_UFO;
2902 if (!(dev->features & NETIF_F_SG)) {
2903 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
2904 "NETIF_F_SG feature.\n",
2905 dev->name);
2906 dev->features &= ~NETIF_F_UFO;
2910 /*
2911 * nil rebuild_header routine,
2912 * that should be never called and used as just bug trap.
2913 */
2915 if (!dev->rebuild_header)
2916 dev->rebuild_header = default_rebuild_header;
2918 /*
2919 * Default initial state at registry is that the
2920 * device is present.
2921 */
2923 set_bit(__LINK_STATE_PRESENT, &dev->state);
2925 dev->next = NULL;
2926 dev_init_scheduler(dev);
2927 write_lock_bh(&dev_base_lock);
2928 *dev_tail = dev;
2929 dev_tail = &dev->next;
2930 hlist_add_head(&dev->name_hlist, head);
2931 hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex));
2932 dev_hold(dev);
2933 dev->reg_state = NETREG_REGISTERING;
2934 write_unlock_bh(&dev_base_lock);
2936 /* Notify protocols, that a new device appeared. */
2937 notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
2939 /* Finish registration after unlock */
2940 net_set_todo(dev);
2941 ret = 0;
2943 out:
2944 return ret;
2945 out_err:
2946 free_divert_blk(dev);
2947 goto out;
2950 /**
2951 * register_netdev - register a network device
2952 * @dev: device to register
2954 * Take a completed network device structure and add it to the kernel
2955 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
2956 * chain. 0 is returned on success. A negative errno code is returned
2957 * on a failure to set up the device, or if the name is a duplicate.
2959 * This is a wrapper around register_netdev that takes the rtnl semaphore
2960 * and expands the device name if you passed a format string to
2961 * alloc_netdev.
2962 */
2963 int register_netdev(struct net_device *dev)
2965 int err;
2967 rtnl_lock();
2969 /*
2970 * If the name is a format string the caller wants us to do a
2971 * name allocation.
2972 */
2973 if (strchr(dev->name, '%')) {
2974 err = dev_alloc_name(dev, dev->name);
2975 if (err < 0)
2976 goto out;
2979 /*
2980 * Back compatibility hook. Kill this one in 2.5
2981 */
2982 if (dev->name[0] == 0 || dev->name[0] == ' ') {
2983 err = dev_alloc_name(dev, "eth%d");
2984 if (err < 0)
2985 goto out;
2988 err = register_netdevice(dev);
2989 out:
2990 rtnl_unlock();
2991 return err;
2993 EXPORT_SYMBOL(register_netdev);
2995 /*
2996 * netdev_wait_allrefs - wait until all references are gone.
2998 * This is called when unregistering network devices.
3000 * Any protocol or device that holds a reference should register
3001 * for netdevice notification, and cleanup and put back the
3002 * reference if they receive an UNREGISTER event.
3003 * We can get stuck here if buggy protocols don't correctly
3004 * call dev_put.
3005 */
3006 static void netdev_wait_allrefs(struct net_device *dev)
3008 unsigned long rebroadcast_time, warning_time;
3010 rebroadcast_time = warning_time = jiffies;
3011 while (atomic_read(&dev->refcnt) != 0) {
3012 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
3013 rtnl_shlock();
3015 /* Rebroadcast unregister notification */
3016 notifier_call_chain(&netdev_chain,
3017 NETDEV_UNREGISTER, dev);
3019 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
3020 &dev->state)) {
3021 /* We must not have linkwatch events
3022 * pending on unregister. If this
3023 * happens, we simply run the queue
3024 * unscheduled, resulting in a noop
3025 * for this device.
3026 */
3027 linkwatch_run_queue();
3030 rtnl_shunlock();
3032 rebroadcast_time = jiffies;
3035 msleep(250);
3037 if (time_after(jiffies, warning_time + 10 * HZ)) {
3038 printk(KERN_EMERG "unregister_netdevice: "
3039 "waiting for %s to become free. Usage "
3040 "count = %d\n",
3041 dev->name, atomic_read(&dev->refcnt));
3042 warning_time = jiffies;
3047 /* The sequence is:
3049 * rtnl_lock();
3050 * ...
3051 * register_netdevice(x1);
3052 * register_netdevice(x2);
3053 * ...
3054 * unregister_netdevice(y1);
3055 * unregister_netdevice(y2);
3056 * ...
3057 * rtnl_unlock();
3058 * free_netdev(y1);
3059 * free_netdev(y2);
3061 * We are invoked by rtnl_unlock() after it drops the semaphore.
3062 * This allows us to deal with problems:
3063 * 1) We can create/delete sysfs objects which invoke hotplug
3064 * without deadlocking with linkwatch via keventd.
3065 * 2) Since we run with the RTNL semaphore not held, we can sleep
3066 * safely in order to wait for the netdev refcnt to drop to zero.
3067 */
3068 static DECLARE_MUTEX(net_todo_run_mutex);
3069 void netdev_run_todo(void)
3071 struct list_head list = LIST_HEAD_INIT(list);
3072 int err;
3075 /* Need to guard against multiple cpu's getting out of order. */
3076 down(&net_todo_run_mutex);
3078 /* Not safe to do outside the semaphore. We must not return
3079 * until all unregister events invoked by the local processor
3080 * have been completed (either by this todo run, or one on
3081 * another cpu).
3082 */
3083 if (list_empty(&net_todo_list))
3084 goto out;
3086 /* Snapshot list, allow later requests */
3087 spin_lock(&net_todo_list_lock);
3088 list_splice_init(&net_todo_list, &list);
3089 spin_unlock(&net_todo_list_lock);
3091 while (!list_empty(&list)) {
3092 struct net_device *dev
3093 = list_entry(list.next, struct net_device, todo_list);
3094 list_del(&dev->todo_list);
3096 switch(dev->reg_state) {
3097 case NETREG_REGISTERING:
3098 dev->reg_state = NETREG_REGISTERED;
3099 err = netdev_register_sysfs(dev);
3100 if (err)
3101 printk(KERN_ERR "%s: failed sysfs registration (%d)\n",
3102 dev->name, err);
3103 break;
3105 case NETREG_UNREGISTERING:
3106 netdev_unregister_sysfs(dev);
3107 dev->reg_state = NETREG_UNREGISTERED;
3109 netdev_wait_allrefs(dev);
3111 /* paranoia */
3112 BUG_ON(atomic_read(&dev->refcnt));
3113 BUG_TRAP(!dev->ip_ptr);
3114 BUG_TRAP(!dev->ip6_ptr);
3115 BUG_TRAP(!dev->dn_ptr);
3118 /* It must be the very last action,
3119 * after this 'dev' may point to freed up memory.
3120 */
3121 if (dev->destructor)
3122 dev->destructor(dev);
3123 break;
3125 default:
3126 printk(KERN_ERR "network todo '%s' but state %d\n",
3127 dev->name, dev->reg_state);
3128 break;
3132 out:
3133 up(&net_todo_run_mutex);
3136 /**
3137 * alloc_netdev - allocate network device
3138 * @sizeof_priv: size of private data to allocate space for
3139 * @name: device name format string
3140 * @setup: callback to initialize device
3142 * Allocates a struct net_device with private data area for driver use
3143 * and performs basic initialization.
3144 */
3145 struct net_device *alloc_netdev(int sizeof_priv, const char *name,
3146 void (*setup)(struct net_device *))
3148 void *p;
3149 struct net_device *dev;
3150 int alloc_size;
3152 /* ensure 32-byte alignment of both the device and private area */
3153 alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
3154 alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
3156 p = kmalloc(alloc_size, GFP_KERNEL);
3157 if (!p) {
3158 printk(KERN_ERR "alloc_dev: Unable to allocate device.\n");
3159 return NULL;
3161 memset(p, 0, alloc_size);
3163 dev = (struct net_device *)
3164 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
3165 dev->padded = (char *)dev - (char *)p;
3167 if (sizeof_priv)
3168 dev->priv = netdev_priv(dev);
3170 setup(dev);
3171 strcpy(dev->name, name);
3172 return dev;
3174 EXPORT_SYMBOL(alloc_netdev);
3176 /**
3177 * free_netdev - free network device
3178 * @dev: device
3180 * This function does the last stage of destroying an allocated device
3181 * interface. The reference to the device object is released.
3182 * If this is the last reference then it will be freed.
3183 */
3184 void free_netdev(struct net_device *dev)
3186 #ifdef CONFIG_SYSFS
3187 /* Compatiablity with error handling in drivers */
3188 if (dev->reg_state == NETREG_UNINITIALIZED) {
3189 kfree((char *)dev - dev->padded);
3190 return;
3193 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
3194 dev->reg_state = NETREG_RELEASED;
3196 /* will free via class release */
3197 class_device_put(&dev->class_dev);
3198 #else
3199 kfree((char *)dev - dev->padded);
3200 #endif
3203 /* Synchronize with packet receive processing. */
3204 void synchronize_net(void)
3206 might_sleep();
3207 synchronize_rcu();
3210 /**
3211 * unregister_netdevice - remove device from the kernel
3212 * @dev: device
3214 * This function shuts down a device interface and removes it
3215 * from the kernel tables. On success 0 is returned, on a failure
3216 * a negative errno code is returned.
3218 * Callers must hold the rtnl semaphore. You may want
3219 * unregister_netdev() instead of this.
3220 */
3222 int unregister_netdevice(struct net_device *dev)
3224 struct net_device *d, **dp;
3226 BUG_ON(dev_boot_phase);
3227 ASSERT_RTNL();
3229 /* Some devices call without registering for initialization unwind. */
3230 if (dev->reg_state == NETREG_UNINITIALIZED) {
3231 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3232 "was registered\n", dev->name, dev);
3233 return -ENODEV;
3236 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3238 /* If device is running, close it first. */
3239 if (dev->flags & IFF_UP)
3240 dev_close(dev);
3242 /* And unlink it from device chain. */
3243 for (dp = &dev_base; (d = *dp) != NULL; dp = &d->next) {
3244 if (d == dev) {
3245 write_lock_bh(&dev_base_lock);
3246 hlist_del(&dev->name_hlist);
3247 hlist_del(&dev->index_hlist);
3248 if (dev_tail == &dev->next)
3249 dev_tail = dp;
3250 *dp = d->next;
3251 write_unlock_bh(&dev_base_lock);
3252 break;
3255 if (!d) {
3256 printk(KERN_ERR "unregister net_device: '%s' not found\n",
3257 dev->name);
3258 return -ENODEV;
3261 dev->reg_state = NETREG_UNREGISTERING;
3263 synchronize_net();
3265 /* Shutdown queueing discipline. */
3266 dev_shutdown(dev);
3269 /* Notify protocols, that we are about to destroy
3270 this device. They should clean all the things.
3271 */
3272 notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
3274 /*
3275 * Flush the multicast chain
3276 */
3277 dev_mc_discard(dev);
3279 if (dev->uninit)
3280 dev->uninit(dev);
3282 /* Notifier chain MUST detach us from master device. */
3283 BUG_TRAP(!dev->master);
3285 free_divert_blk(dev);
3287 /* Finish processing unregister after unlock */
3288 net_set_todo(dev);
3290 synchronize_net();
3292 dev_put(dev);
3293 return 0;
3296 /**
3297 * unregister_netdev - remove device from the kernel
3298 * @dev: device
3300 * This function shuts down a device interface and removes it
3301 * from the kernel tables. On success 0 is returned, on a failure
3302 * a negative errno code is returned.
3304 * This is just a wrapper for unregister_netdevice that takes
3305 * the rtnl semaphore. In general you want to use this and not
3306 * unregister_netdevice.
3307 */
3308 void unregister_netdev(struct net_device *dev)
3310 rtnl_lock();
3311 unregister_netdevice(dev);
3312 rtnl_unlock();
3315 EXPORT_SYMBOL(unregister_netdev);
3317 #ifdef CONFIG_HOTPLUG_CPU
3318 static int dev_cpu_callback(struct notifier_block *nfb,
3319 unsigned long action,
3320 void *ocpu)
3322 struct sk_buff **list_skb;
3323 struct net_device **list_net;
3324 struct sk_buff *skb;
3325 unsigned int cpu, oldcpu = (unsigned long)ocpu;
3326 struct softnet_data *sd, *oldsd;
3328 if (action != CPU_DEAD)
3329 return NOTIFY_OK;
3331 local_irq_disable();
3332 cpu = smp_processor_id();
3333 sd = &per_cpu(softnet_data, cpu);
3334 oldsd = &per_cpu(softnet_data, oldcpu);
3336 /* Find end of our completion_queue. */
3337 list_skb = &sd->completion_queue;
3338 while (*list_skb)
3339 list_skb = &(*list_skb)->next;
3340 /* Append completion queue from offline CPU. */
3341 *list_skb = oldsd->completion_queue;
3342 oldsd->completion_queue = NULL;
3344 /* Find end of our output_queue. */
3345 list_net = &sd->output_queue;
3346 while (*list_net)
3347 list_net = &(*list_net)->next_sched;
3348 /* Append output queue from offline CPU. */
3349 *list_net = oldsd->output_queue;
3350 oldsd->output_queue = NULL;
3352 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3353 local_irq_enable();
3355 /* Process offline CPU's input_pkt_queue */
3356 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
3357 netif_rx(skb);
3359 return NOTIFY_OK;
3361 #endif /* CONFIG_HOTPLUG_CPU */
3364 /*
3365 * Initialize the DEV module. At boot time this walks the device list and
3366 * unhooks any devices that fail to initialise (normally hardware not
3367 * present) and leaves us with a valid list of present and active devices.
3369 */
3371 /*
3372 * This is called single threaded during boot, so no need
3373 * to take the rtnl semaphore.
3374 */
3375 static int __init net_dev_init(void)
3377 int i, rc = -ENOMEM;
3379 BUG_ON(!dev_boot_phase);
3381 net_random_init();
3383 if (dev_proc_init())
3384 goto out;
3386 if (netdev_sysfs_init())
3387 goto out;
3389 INIT_LIST_HEAD(&ptype_all);
3390 for (i = 0; i < 16; i++)
3391 INIT_LIST_HEAD(&ptype_base[i]);
3393 for (i = 0; i < ARRAY_SIZE(dev_name_head); i++)
3394 INIT_HLIST_HEAD(&dev_name_head[i]);
3396 for (i = 0; i < ARRAY_SIZE(dev_index_head); i++)
3397 INIT_HLIST_HEAD(&dev_index_head[i]);
3399 /*
3400 * Initialise the packet receive queues.
3401 */
3403 for_each_cpu(i) {
3404 struct softnet_data *queue;
3406 queue = &per_cpu(softnet_data, i);
3407 skb_queue_head_init(&queue->input_pkt_queue);
3408 queue->completion_queue = NULL;
3409 INIT_LIST_HEAD(&queue->poll_list);
3410 set_bit(__LINK_STATE_START, &queue->backlog_dev.state);
3411 queue->backlog_dev.weight = weight_p;
3412 queue->backlog_dev.poll = process_backlog;
3413 atomic_set(&queue->backlog_dev.refcnt, 1);
3416 dev_boot_phase = 0;
3418 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
3419 open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
3421 hotcpu_notifier(dev_cpu_callback, 0);
3422 dst_init();
3423 dev_mcast_init();
3424 rc = 0;
3425 out:
3426 return rc;
3429 subsys_initcall(net_dev_init);
3431 EXPORT_SYMBOL(__dev_get_by_index);
3432 EXPORT_SYMBOL(__dev_get_by_name);
3433 EXPORT_SYMBOL(__dev_remove_pack);
3434 EXPORT_SYMBOL(dev_valid_name);
3435 EXPORT_SYMBOL(dev_add_pack);
3436 EXPORT_SYMBOL(dev_alloc_name);
3437 EXPORT_SYMBOL(dev_close);
3438 EXPORT_SYMBOL(dev_get_by_flags);
3439 EXPORT_SYMBOL(dev_get_by_index);
3440 EXPORT_SYMBOL(dev_get_by_name);
3441 EXPORT_SYMBOL(dev_open);
3442 EXPORT_SYMBOL(dev_queue_xmit);
3443 EXPORT_SYMBOL(dev_remove_pack);
3444 EXPORT_SYMBOL(dev_set_allmulti);
3445 EXPORT_SYMBOL(dev_set_promiscuity);
3446 EXPORT_SYMBOL(dev_change_flags);
3447 EXPORT_SYMBOL(dev_set_mtu);
3448 EXPORT_SYMBOL(dev_set_mac_address);
3449 EXPORT_SYMBOL(free_netdev);
3450 EXPORT_SYMBOL(netdev_boot_setup_check);
3451 EXPORT_SYMBOL(netdev_set_master);
3452 EXPORT_SYMBOL(netdev_state_change);
3453 EXPORT_SYMBOL(netif_receive_skb);
3454 EXPORT_SYMBOL(netif_rx);
3455 EXPORT_SYMBOL(register_gifconf);
3456 EXPORT_SYMBOL(register_netdevice);
3457 EXPORT_SYMBOL(register_netdevice_notifier);
3458 EXPORT_SYMBOL(skb_checksum_help);
3459 EXPORT_SYMBOL(synchronize_net);
3460 EXPORT_SYMBOL(unregister_netdevice);
3461 EXPORT_SYMBOL(unregister_netdevice_notifier);
3462 EXPORT_SYMBOL(net_enable_timestamp);
3463 EXPORT_SYMBOL(net_disable_timestamp);
3464 EXPORT_SYMBOL(dev_get_flags);
3465 EXPORT_SYMBOL(skb_checksum_setup);
3467 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
3468 EXPORT_SYMBOL(br_handle_frame_hook);
3469 EXPORT_SYMBOL(br_fdb_get_hook);
3470 EXPORT_SYMBOL(br_fdb_put_hook);
3471 #endif
3473 #ifdef CONFIG_KMOD
3474 EXPORT_SYMBOL(dev_load);
3475 #endif
3477 EXPORT_PER_CPU_SYMBOL(softnet_data);