ia64/xen-unstable

view linux-2.6-xen-sparse/net/core/dev.c @ 10672:ac110157c19d

[IA64] Move asmmacro.h to linux-xen

Signed-off-by: Akio Takebe <takebe_akio@jp.fujitsu.com>
Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author awilliam@xenbuild.aw
date Thu Jul 06 10:02:28 2006 -0600 (2006-07-06)
parents 6e7027a2abca
children a4041ac6f152
line source
1 /*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/config.h>
80 #include <linux/cpu.h>
81 #include <linux/types.h>
82 #include <linux/kernel.h>
83 #include <linux/sched.h>
84 #include <linux/string.h>
85 #include <linux/mm.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/notifier.h>
94 #include <linux/skbuff.h>
95 #include <net/sock.h>
96 #include <linux/rtnetlink.h>
97 #include <linux/proc_fs.h>
98 #include <linux/seq_file.h>
99 #include <linux/stat.h>
100 #include <linux/if_bridge.h>
101 #include <linux/divert.h>
102 #include <net/dst.h>
103 #include <net/pkt_sched.h>
104 #include <net/checksum.h>
105 #include <linux/highmem.h>
106 #include <linux/init.h>
107 #include <linux/kmod.h>
108 #include <linux/module.h>
109 #include <linux/kallsyms.h>
110 #include <linux/netpoll.h>
111 #include <linux/rcupdate.h>
112 #include <linux/delay.h>
113 #ifdef CONFIG_NET_RADIO
114 #include <linux/wireless.h> /* Note : will define WIRELESS_EXT */
115 #include <net/iw_handler.h>
116 #endif /* CONFIG_NET_RADIO */
117 #include <asm/current.h>
118 #include <linux/err.h>
120 #ifdef CONFIG_XEN
121 #include <net/ip.h>
122 #include <linux/tcp.h>
123 #include <linux/udp.h>
124 #endif
126 /*
127 * The list of packet types we will receive (as opposed to discard)
128 * and the routines to invoke.
129 *
130 * Why 16. Because with 16 the only overlap we get on a hash of the
131 * low nibble of the protocol value is RARP/SNAP/X.25.
132 *
133 * NOTE: That is no longer true with the addition of VLAN tags. Not
134 * sure which should go first, but I bet it won't make much
135 * difference if we are running VLANs. The good news is that
136 * this protocol won't be in the list unless compiled in, so
137 * the average user (w/out VLANs) will not be adversly affected.
138 * --BLG
139 *
140 * 0800 IP
141 * 8100 802.1Q VLAN
142 * 0001 802.3
143 * 0002 AX.25
144 * 0004 802.2
145 * 8035 RARP
146 * 0005 SNAP
147 * 0805 X.25
148 * 0806 ARP
149 * 8137 IPX
150 * 0009 Localtalk
151 * 86DD IPv6
152 */
154 static DEFINE_SPINLOCK(ptype_lock);
155 static struct list_head ptype_base[16]; /* 16 way hashed list */
156 static struct list_head ptype_all; /* Taps */
158 /*
159 * The @dev_base list is protected by @dev_base_lock and the rtln
160 * semaphore.
161 *
162 * Pure readers hold dev_base_lock for reading.
163 *
164 * Writers must hold the rtnl semaphore while they loop through the
165 * dev_base list, and hold dev_base_lock for writing when they do the
166 * actual updates. This allows pure readers to access the list even
167 * while a writer is preparing to update it.
168 *
169 * To put it another way, dev_base_lock is held for writing only to
170 * protect against pure readers; the rtnl semaphore provides the
171 * protection against other writers.
172 *
173 * See, for example usages, register_netdevice() and
174 * unregister_netdevice(), which must be called with the rtnl
175 * semaphore held.
176 */
177 struct net_device *dev_base;
178 static struct net_device **dev_tail = &dev_base;
179 DEFINE_RWLOCK(dev_base_lock);
181 EXPORT_SYMBOL(dev_base);
182 EXPORT_SYMBOL(dev_base_lock);
184 #define NETDEV_HASHBITS 8
185 static struct hlist_head dev_name_head[1<<NETDEV_HASHBITS];
186 static struct hlist_head dev_index_head[1<<NETDEV_HASHBITS];
188 static inline struct hlist_head *dev_name_hash(const char *name)
189 {
190 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
191 return &dev_name_head[hash & ((1<<NETDEV_HASHBITS)-1)];
192 }
194 static inline struct hlist_head *dev_index_hash(int ifindex)
195 {
196 return &dev_index_head[ifindex & ((1<<NETDEV_HASHBITS)-1)];
197 }
199 /*
200 * Our notifier list
201 */
203 static struct notifier_block *netdev_chain;
205 /*
206 * Device drivers call our routines to queue packets here. We empty the
207 * queue in the local softnet handler.
208 */
209 DEFINE_PER_CPU(struct softnet_data, softnet_data) = { NULL };
211 #ifdef CONFIG_SYSFS
212 extern int netdev_sysfs_init(void);
213 extern int netdev_register_sysfs(struct net_device *);
214 extern void netdev_unregister_sysfs(struct net_device *);
215 #else
216 #define netdev_sysfs_init() (0)
217 #define netdev_register_sysfs(dev) (0)
218 #define netdev_unregister_sysfs(dev) do { } while(0)
219 #endif
222 /*******************************************************************************
224 Protocol management and registration routines
226 *******************************************************************************/
228 /*
229 * For efficiency
230 */
232 int netdev_nit;
234 /*
235 * Add a protocol ID to the list. Now that the input handler is
236 * smarter we can dispense with all the messy stuff that used to be
237 * here.
238 *
239 * BEWARE!!! Protocol handlers, mangling input packets,
240 * MUST BE last in hash buckets and checking protocol handlers
241 * MUST start from promiscuous ptype_all chain in net_bh.
242 * It is true now, do not change it.
243 * Explanation follows: if protocol handler, mangling packet, will
244 * be the first on list, it is not able to sense, that packet
245 * is cloned and should be copied-on-write, so that it will
246 * change it and subsequent readers will get broken packet.
247 * --ANK (980803)
248 */
250 /**
251 * dev_add_pack - add packet handler
252 * @pt: packet type declaration
253 *
254 * Add a protocol handler to the networking stack. The passed &packet_type
255 * is linked into kernel lists and may not be freed until it has been
256 * removed from the kernel lists.
257 *
258 * This call does not sleep therefore it can not
259 * guarantee all CPU's that are in middle of receiving packets
260 * will see the new packet type (until the next received packet).
261 */
263 void dev_add_pack(struct packet_type *pt)
264 {
265 int hash;
267 spin_lock_bh(&ptype_lock);
268 if (pt->type == htons(ETH_P_ALL)) {
269 netdev_nit++;
270 list_add_rcu(&pt->list, &ptype_all);
271 } else {
272 hash = ntohs(pt->type) & 15;
273 list_add_rcu(&pt->list, &ptype_base[hash]);
274 }
275 spin_unlock_bh(&ptype_lock);
276 }
278 /**
279 * __dev_remove_pack - remove packet handler
280 * @pt: packet type declaration
281 *
282 * Remove a protocol handler that was previously added to the kernel
283 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
284 * from the kernel lists and can be freed or reused once this function
285 * returns.
286 *
287 * The packet type might still be in use by receivers
288 * and must not be freed until after all the CPU's have gone
289 * through a quiescent state.
290 */
291 void __dev_remove_pack(struct packet_type *pt)
292 {
293 struct list_head *head;
294 struct packet_type *pt1;
296 spin_lock_bh(&ptype_lock);
298 if (pt->type == htons(ETH_P_ALL)) {
299 netdev_nit--;
300 head = &ptype_all;
301 } else
302 head = &ptype_base[ntohs(pt->type) & 15];
304 list_for_each_entry(pt1, head, list) {
305 if (pt == pt1) {
306 list_del_rcu(&pt->list);
307 goto out;
308 }
309 }
311 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
312 out:
313 spin_unlock_bh(&ptype_lock);
314 }
315 /**
316 * dev_remove_pack - remove packet handler
317 * @pt: packet type declaration
318 *
319 * Remove a protocol handler that was previously added to the kernel
320 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
321 * from the kernel lists and can be freed or reused once this function
322 * returns.
323 *
324 * This call sleeps to guarantee that no CPU is looking at the packet
325 * type after return.
326 */
327 void dev_remove_pack(struct packet_type *pt)
328 {
329 __dev_remove_pack(pt);
331 synchronize_net();
332 }
334 /******************************************************************************
336 Device Boot-time Settings Routines
338 *******************************************************************************/
340 /* Boot time configuration table */
341 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
343 /**
344 * netdev_boot_setup_add - add new setup entry
345 * @name: name of the device
346 * @map: configured settings for the device
347 *
348 * Adds new setup entry to the dev_boot_setup list. The function
349 * returns 0 on error and 1 on success. This is a generic routine to
350 * all netdevices.
351 */
352 static int netdev_boot_setup_add(char *name, struct ifmap *map)
353 {
354 struct netdev_boot_setup *s;
355 int i;
357 s = dev_boot_setup;
358 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
359 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
360 memset(s[i].name, 0, sizeof(s[i].name));
361 strcpy(s[i].name, name);
362 memcpy(&s[i].map, map, sizeof(s[i].map));
363 break;
364 }
365 }
367 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
368 }
370 /**
371 * netdev_boot_setup_check - check boot time settings
372 * @dev: the netdevice
373 *
374 * Check boot time settings for the device.
375 * The found settings are set for the device to be used
376 * later in the device probing.
377 * Returns 0 if no settings found, 1 if they are.
378 */
379 int netdev_boot_setup_check(struct net_device *dev)
380 {
381 struct netdev_boot_setup *s = dev_boot_setup;
382 int i;
384 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
385 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
386 !strncmp(dev->name, s[i].name, strlen(s[i].name))) {
387 dev->irq = s[i].map.irq;
388 dev->base_addr = s[i].map.base_addr;
389 dev->mem_start = s[i].map.mem_start;
390 dev->mem_end = s[i].map.mem_end;
391 return 1;
392 }
393 }
394 return 0;
395 }
398 /**
399 * netdev_boot_base - get address from boot time settings
400 * @prefix: prefix for network device
401 * @unit: id for network device
402 *
403 * Check boot time settings for the base address of device.
404 * The found settings are set for the device to be used
405 * later in the device probing.
406 * Returns 0 if no settings found.
407 */
408 unsigned long netdev_boot_base(const char *prefix, int unit)
409 {
410 const struct netdev_boot_setup *s = dev_boot_setup;
411 char name[IFNAMSIZ];
412 int i;
414 sprintf(name, "%s%d", prefix, unit);
416 /*
417 * If device already registered then return base of 1
418 * to indicate not to probe for this interface
419 */
420 if (__dev_get_by_name(name))
421 return 1;
423 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
424 if (!strcmp(name, s[i].name))
425 return s[i].map.base_addr;
426 return 0;
427 }
429 /*
430 * Saves at boot time configured settings for any netdevice.
431 */
432 int __init netdev_boot_setup(char *str)
433 {
434 int ints[5];
435 struct ifmap map;
437 str = get_options(str, ARRAY_SIZE(ints), ints);
438 if (!str || !*str)
439 return 0;
441 /* Save settings */
442 memset(&map, 0, sizeof(map));
443 if (ints[0] > 0)
444 map.irq = ints[1];
445 if (ints[0] > 1)
446 map.base_addr = ints[2];
447 if (ints[0] > 2)
448 map.mem_start = ints[3];
449 if (ints[0] > 3)
450 map.mem_end = ints[4];
452 /* Add new entry to the list */
453 return netdev_boot_setup_add(str, &map);
454 }
456 __setup("netdev=", netdev_boot_setup);
458 /*******************************************************************************
460 Device Interface Subroutines
462 *******************************************************************************/
464 /**
465 * __dev_get_by_name - find a device by its name
466 * @name: name to find
467 *
468 * Find an interface by name. Must be called under RTNL semaphore
469 * or @dev_base_lock. If the name is found a pointer to the device
470 * is returned. If the name is not found then %NULL is returned. The
471 * reference counters are not incremented so the caller must be
472 * careful with locks.
473 */
475 struct net_device *__dev_get_by_name(const char *name)
476 {
477 struct hlist_node *p;
479 hlist_for_each(p, dev_name_hash(name)) {
480 struct net_device *dev
481 = hlist_entry(p, struct net_device, name_hlist);
482 if (!strncmp(dev->name, name, IFNAMSIZ))
483 return dev;
484 }
485 return NULL;
486 }
488 /**
489 * dev_get_by_name - find a device by its name
490 * @name: name to find
491 *
492 * Find an interface by name. This can be called from any
493 * context and does its own locking. The returned handle has
494 * the usage count incremented and the caller must use dev_put() to
495 * release it when it is no longer needed. %NULL is returned if no
496 * matching device is found.
497 */
499 struct net_device *dev_get_by_name(const char *name)
500 {
501 struct net_device *dev;
503 read_lock(&dev_base_lock);
504 dev = __dev_get_by_name(name);
505 if (dev)
506 dev_hold(dev);
507 read_unlock(&dev_base_lock);
508 return dev;
509 }
511 /**
512 * __dev_get_by_index - find a device by its ifindex
513 * @ifindex: index of device
514 *
515 * Search for an interface by index. Returns %NULL if the device
516 * is not found or a pointer to the device. The device has not
517 * had its reference counter increased so the caller must be careful
518 * about locking. The caller must hold either the RTNL semaphore
519 * or @dev_base_lock.
520 */
522 struct net_device *__dev_get_by_index(int ifindex)
523 {
524 struct hlist_node *p;
526 hlist_for_each(p, dev_index_hash(ifindex)) {
527 struct net_device *dev
528 = hlist_entry(p, struct net_device, index_hlist);
529 if (dev->ifindex == ifindex)
530 return dev;
531 }
532 return NULL;
533 }
536 /**
537 * dev_get_by_index - find a device by its ifindex
538 * @ifindex: index of device
539 *
540 * Search for an interface by index. Returns NULL if the device
541 * is not found or a pointer to the device. The device returned has
542 * had a reference added and the pointer is safe until the user calls
543 * dev_put to indicate they have finished with it.
544 */
546 struct net_device *dev_get_by_index(int ifindex)
547 {
548 struct net_device *dev;
550 read_lock(&dev_base_lock);
551 dev = __dev_get_by_index(ifindex);
552 if (dev)
553 dev_hold(dev);
554 read_unlock(&dev_base_lock);
555 return dev;
556 }
558 /**
559 * dev_getbyhwaddr - find a device by its hardware address
560 * @type: media type of device
561 * @ha: hardware address
562 *
563 * Search for an interface by MAC address. Returns NULL if the device
564 * is not found or a pointer to the device. The caller must hold the
565 * rtnl semaphore. The returned device has not had its ref count increased
566 * and the caller must therefore be careful about locking
567 *
568 * BUGS:
569 * If the API was consistent this would be __dev_get_by_hwaddr
570 */
572 struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
573 {
574 struct net_device *dev;
576 ASSERT_RTNL();
578 for (dev = dev_base; dev; dev = dev->next)
579 if (dev->type == type &&
580 !memcmp(dev->dev_addr, ha, dev->addr_len))
581 break;
582 return dev;
583 }
585 EXPORT_SYMBOL(dev_getbyhwaddr);
587 struct net_device *dev_getfirstbyhwtype(unsigned short type)
588 {
589 struct net_device *dev;
591 rtnl_lock();
592 for (dev = dev_base; dev; dev = dev->next) {
593 if (dev->type == type) {
594 dev_hold(dev);
595 break;
596 }
597 }
598 rtnl_unlock();
599 return dev;
600 }
602 EXPORT_SYMBOL(dev_getfirstbyhwtype);
604 /**
605 * dev_get_by_flags - find any device with given flags
606 * @if_flags: IFF_* values
607 * @mask: bitmask of bits in if_flags to check
608 *
609 * Search for any interface with the given flags. Returns NULL if a device
610 * is not found or a pointer to the device. The device returned has
611 * had a reference added and the pointer is safe until the user calls
612 * dev_put to indicate they have finished with it.
613 */
615 struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mask)
616 {
617 struct net_device *dev;
619 read_lock(&dev_base_lock);
620 for (dev = dev_base; dev != NULL; dev = dev->next) {
621 if (((dev->flags ^ if_flags) & mask) == 0) {
622 dev_hold(dev);
623 break;
624 }
625 }
626 read_unlock(&dev_base_lock);
627 return dev;
628 }
630 /**
631 * dev_valid_name - check if name is okay for network device
632 * @name: name string
633 *
634 * Network device names need to be valid file names to
635 * to allow sysfs to work
636 */
637 int dev_valid_name(const char *name)
638 {
639 return !(*name == '\0'
640 || !strcmp(name, ".")
641 || !strcmp(name, "..")
642 || strchr(name, '/'));
643 }
645 /**
646 * dev_alloc_name - allocate a name for a device
647 * @dev: device
648 * @name: name format string
649 *
650 * Passed a format string - eg "lt%d" it will try and find a suitable
651 * id. Not efficient for many devices, not called a lot. The caller
652 * must hold the dev_base or rtnl lock while allocating the name and
653 * adding the device in order to avoid duplicates. Returns the number
654 * of the unit assigned or a negative errno code.
655 */
657 int dev_alloc_name(struct net_device *dev, const char *name)
658 {
659 int i = 0;
660 char buf[IFNAMSIZ];
661 const char *p;
662 const int max_netdevices = 8*PAGE_SIZE;
663 long *inuse;
664 struct net_device *d;
666 p = strnchr(name, IFNAMSIZ-1, '%');
667 if (p) {
668 /*
669 * Verify the string as this thing may have come from
670 * the user. There must be either one "%d" and no other "%"
671 * characters.
672 */
673 if (p[1] != 'd' || strchr(p + 2, '%'))
674 return -EINVAL;
676 /* Use one page as a bit array of possible slots */
677 inuse = (long *) get_zeroed_page(GFP_ATOMIC);
678 if (!inuse)
679 return -ENOMEM;
681 for (d = dev_base; d; d = d->next) {
682 if (!sscanf(d->name, name, &i))
683 continue;
684 if (i < 0 || i >= max_netdevices)
685 continue;
687 /* avoid cases where sscanf is not exact inverse of printf */
688 snprintf(buf, sizeof(buf), name, i);
689 if (!strncmp(buf, d->name, IFNAMSIZ))
690 set_bit(i, inuse);
691 }
693 i = find_first_zero_bit(inuse, max_netdevices);
694 free_page((unsigned long) inuse);
695 }
697 snprintf(buf, sizeof(buf), name, i);
698 if (!__dev_get_by_name(buf)) {
699 strlcpy(dev->name, buf, IFNAMSIZ);
700 return i;
701 }
703 /* It is possible to run out of possible slots
704 * when the name is long and there isn't enough space left
705 * for the digits, or if all bits are used.
706 */
707 return -ENFILE;
708 }
711 /**
712 * dev_change_name - change name of a device
713 * @dev: device
714 * @newname: name (or format string) must be at least IFNAMSIZ
715 *
716 * Change name of a device, can pass format strings "eth%d".
717 * for wildcarding.
718 */
719 int dev_change_name(struct net_device *dev, char *newname)
720 {
721 int err = 0;
723 ASSERT_RTNL();
725 if (dev->flags & IFF_UP)
726 return -EBUSY;
728 if (!dev_valid_name(newname))
729 return -EINVAL;
731 if (strchr(newname, '%')) {
732 err = dev_alloc_name(dev, newname);
733 if (err < 0)
734 return err;
735 strcpy(newname, dev->name);
736 }
737 else if (__dev_get_by_name(newname))
738 return -EEXIST;
739 else
740 strlcpy(dev->name, newname, IFNAMSIZ);
742 err = class_device_rename(&dev->class_dev, dev->name);
743 if (!err) {
744 hlist_del(&dev->name_hlist);
745 hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
746 notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
747 }
749 return err;
750 }
752 /**
753 * netdev_features_change - device changes fatures
754 * @dev: device to cause notification
755 *
756 * Called to indicate a device has changed features.
757 */
758 void netdev_features_change(struct net_device *dev)
759 {
760 notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
761 }
762 EXPORT_SYMBOL(netdev_features_change);
764 /**
765 * netdev_state_change - device changes state
766 * @dev: device to cause notification
767 *
768 * Called to indicate a device has changed state. This function calls
769 * the notifier chains for netdev_chain and sends a NEWLINK message
770 * to the routing socket.
771 */
772 void netdev_state_change(struct net_device *dev)
773 {
774 if (dev->flags & IFF_UP) {
775 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
776 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
777 }
778 }
780 /**
781 * dev_load - load a network module
782 * @name: name of interface
783 *
784 * If a network interface is not present and the process has suitable
785 * privileges this function loads the module. If module loading is not
786 * available in this kernel then it becomes a nop.
787 */
789 void dev_load(const char *name)
790 {
791 struct net_device *dev;
793 read_lock(&dev_base_lock);
794 dev = __dev_get_by_name(name);
795 read_unlock(&dev_base_lock);
797 if (!dev && capable(CAP_SYS_MODULE))
798 request_module("%s", name);
799 }
801 static int default_rebuild_header(struct sk_buff *skb)
802 {
803 printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n",
804 skb->dev ? skb->dev->name : "NULL!!!");
805 kfree_skb(skb);
806 return 1;
807 }
810 /**
811 * dev_open - prepare an interface for use.
812 * @dev: device to open
813 *
814 * Takes a device from down to up state. The device's private open
815 * function is invoked and then the multicast lists are loaded. Finally
816 * the device is moved into the up state and a %NETDEV_UP message is
817 * sent to the netdev notifier chain.
818 *
819 * Calling this function on an active interface is a nop. On a failure
820 * a negative errno code is returned.
821 */
822 int dev_open(struct net_device *dev)
823 {
824 int ret = 0;
826 /*
827 * Is it already up?
828 */
830 if (dev->flags & IFF_UP)
831 return 0;
833 /*
834 * Is it even present?
835 */
836 if (!netif_device_present(dev))
837 return -ENODEV;
839 /*
840 * Call device private open method
841 */
842 set_bit(__LINK_STATE_START, &dev->state);
843 if (dev->open) {
844 ret = dev->open(dev);
845 if (ret)
846 clear_bit(__LINK_STATE_START, &dev->state);
847 }
849 /*
850 * If it went open OK then:
851 */
853 if (!ret) {
854 /*
855 * Set the flags.
856 */
857 dev->flags |= IFF_UP;
859 /*
860 * Initialize multicasting status
861 */
862 dev_mc_upload(dev);
864 /*
865 * Wakeup transmit queue engine
866 */
867 dev_activate(dev);
869 /*
870 * ... and announce new interface.
871 */
872 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
873 }
874 return ret;
875 }
877 /**
878 * dev_close - shutdown an interface.
879 * @dev: device to shutdown
880 *
881 * This function moves an active device into down state. A
882 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
883 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
884 * chain.
885 */
886 int dev_close(struct net_device *dev)
887 {
888 if (!(dev->flags & IFF_UP))
889 return 0;
891 /*
892 * Tell people we are going down, so that they can
893 * prepare to death, when device is still operating.
894 */
895 notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
897 dev_deactivate(dev);
899 clear_bit(__LINK_STATE_START, &dev->state);
901 /* Synchronize to scheduled poll. We cannot touch poll list,
902 * it can be even on different cpu. So just clear netif_running(),
903 * and wait when poll really will happen. Actually, the best place
904 * for this is inside dev->stop() after device stopped its irq
905 * engine, but this requires more changes in devices. */
907 smp_mb__after_clear_bit(); /* Commit netif_running(). */
908 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
909 /* No hurry. */
910 msleep(1);
911 }
913 /*
914 * Call the device specific close. This cannot fail.
915 * Only if device is UP
916 *
917 * We allow it to be called even after a DETACH hot-plug
918 * event.
919 */
920 if (dev->stop)
921 dev->stop(dev);
923 /*
924 * Device is now down.
925 */
927 dev->flags &= ~IFF_UP;
929 /*
930 * Tell people we are down
931 */
932 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
934 return 0;
935 }
938 /*
939 * Device change register/unregister. These are not inline or static
940 * as we export them to the world.
941 */
943 /**
944 * register_netdevice_notifier - register a network notifier block
945 * @nb: notifier
946 *
947 * Register a notifier to be called when network device events occur.
948 * The notifier passed is linked into the kernel structures and must
949 * not be reused until it has been unregistered. A negative errno code
950 * is returned on a failure.
951 *
952 * When registered all registration and up events are replayed
953 * to the new notifier to allow device to have a race free
954 * view of the network device list.
955 */
957 int register_netdevice_notifier(struct notifier_block *nb)
958 {
959 struct net_device *dev;
960 int err;
962 rtnl_lock();
963 err = notifier_chain_register(&netdev_chain, nb);
964 if (!err) {
965 for (dev = dev_base; dev; dev = dev->next) {
966 nb->notifier_call(nb, NETDEV_REGISTER, dev);
968 if (dev->flags & IFF_UP)
969 nb->notifier_call(nb, NETDEV_UP, dev);
970 }
971 }
972 rtnl_unlock();
973 return err;
974 }
976 /**
977 * unregister_netdevice_notifier - unregister a network notifier block
978 * @nb: notifier
979 *
980 * Unregister a notifier previously registered by
981 * register_netdevice_notifier(). The notifier is unlinked into the
982 * kernel structures and may then be reused. A negative errno code
983 * is returned on a failure.
984 */
986 int unregister_netdevice_notifier(struct notifier_block *nb)
987 {
988 return notifier_chain_unregister(&netdev_chain, nb);
989 }
991 /**
992 * call_netdevice_notifiers - call all network notifier blocks
993 * @val: value passed unmodified to notifier function
994 * @v: pointer passed unmodified to notifier function
995 *
996 * Call all network notifier blocks. Parameters and return value
997 * are as for notifier_call_chain().
998 */
1000 int call_netdevice_notifiers(unsigned long val, void *v)
1002 return notifier_call_chain(&netdev_chain, val, v);
1005 /* When > 0 there are consumers of rx skb time stamps */
1006 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1008 void net_enable_timestamp(void)
1010 atomic_inc(&netstamp_needed);
1013 void net_disable_timestamp(void)
1015 atomic_dec(&netstamp_needed);
1018 void __net_timestamp(struct sk_buff *skb)
1020 struct timeval tv;
1022 do_gettimeofday(&tv);
1023 skb_set_timestamp(skb, &tv);
1025 EXPORT_SYMBOL(__net_timestamp);
1027 static inline void net_timestamp(struct sk_buff *skb)
1029 if (atomic_read(&netstamp_needed))
1030 __net_timestamp(skb);
1031 else {
1032 skb->tstamp.off_sec = 0;
1033 skb->tstamp.off_usec = 0;
1037 /*
1038 * Support routine. Sends outgoing frames to any network
1039 * taps currently in use.
1040 */
1042 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1044 struct packet_type *ptype;
1046 net_timestamp(skb);
1048 rcu_read_lock();
1049 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1050 /* Never send packets back to the socket
1051 * they originated from - MvS (miquels@drinkel.ow.org)
1052 */
1053 if ((ptype->dev == dev || !ptype->dev) &&
1054 (ptype->af_packet_priv == NULL ||
1055 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1056 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1057 if (!skb2)
1058 break;
1060 /* skb->nh should be correctly
1061 set by sender, so that the second statement is
1062 just protection against buggy protocols.
1063 */
1064 skb2->mac.raw = skb2->data;
1066 if (skb2->nh.raw < skb2->data ||
1067 skb2->nh.raw > skb2->tail) {
1068 if (net_ratelimit())
1069 printk(KERN_CRIT "protocol %04x is "
1070 "buggy, dev %s\n",
1071 skb2->protocol, dev->name);
1072 skb2->nh.raw = skb2->data;
1075 skb2->h.raw = skb2->nh.raw;
1076 skb2->pkt_type = PACKET_OUTGOING;
1077 ptype->func(skb2, skb->dev, ptype, skb->dev);
1080 rcu_read_unlock();
1083 /*
1084 * Invalidate hardware checksum when packet is to be mangled, and
1085 * complete checksum manually on outgoing path.
1086 */
1087 int skb_checksum_help(struct sk_buff *skb, int inward)
1089 unsigned int csum;
1090 int ret = 0, offset = skb->h.raw - skb->data;
1092 if (inward) {
1093 skb->ip_summed = CHECKSUM_NONE;
1094 goto out;
1097 if (skb_cloned(skb)) {
1098 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1099 if (ret)
1100 goto out;
1103 BUG_ON(offset > (int)skb->len);
1104 csum = skb_checksum(skb, offset, skb->len-offset, 0);
1106 offset = skb->tail - skb->h.raw;
1107 BUG_ON(offset <= 0);
1108 BUG_ON(skb->csum + 2 > offset);
1110 *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
1111 skb->ip_summed = CHECKSUM_NONE;
1112 out:
1113 return ret;
1116 /**
1117 * skb_gso_segment - Perform segmentation on skb.
1118 * @skb: buffer to segment
1119 * @features: features for the output path (see dev->features)
1121 * This function segments the given skb and returns a list of segments.
1123 * It may return NULL if the skb requires no segmentation. This is
1124 * only possible when GSO is used for verifying header integrity.
1125 */
1126 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1128 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1129 struct packet_type *ptype;
1130 int type = skb->protocol;
1132 BUG_ON(skb_shinfo(skb)->frag_list);
1133 BUG_ON(skb->ip_summed != CHECKSUM_HW);
1135 skb->mac.raw = skb->data;
1136 skb->mac_len = skb->nh.raw - skb->data;
1137 __skb_pull(skb, skb->mac_len);
1139 rcu_read_lock();
1140 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
1141 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1142 segs = ptype->gso_segment(skb, features);
1143 break;
1146 rcu_read_unlock();
1148 __skb_push(skb, skb->data - skb->mac.raw);
1150 return segs;
1153 EXPORT_SYMBOL(skb_gso_segment);
1155 /* Take action when hardware reception checksum errors are detected. */
1156 #ifdef CONFIG_BUG
1157 void netdev_rx_csum_fault(struct net_device *dev)
1159 if (net_ratelimit()) {
1160 printk(KERN_ERR "%s: hw csum failure.\n",
1161 dev ? dev->name : "<unknown>");
1162 dump_stack();
1165 EXPORT_SYMBOL(netdev_rx_csum_fault);
1166 #endif
1168 #ifdef CONFIG_HIGHMEM
1169 /* Actually, we should eliminate this check as soon as we know, that:
1170 * 1. IOMMU is present and allows to map all the memory.
1171 * 2. No high memory really exists on this machine.
1172 */
1174 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1176 int i;
1178 if (dev->features & NETIF_F_HIGHDMA)
1179 return 0;
1181 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1182 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1183 return 1;
1185 return 0;
1187 #else
1188 #define illegal_highdma(dev, skb) (0)
1189 #endif
1191 struct dev_gso_cb {
1192 void (*destructor)(struct sk_buff *skb);
1193 };
1195 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1197 static void dev_gso_skb_destructor(struct sk_buff *skb)
1199 struct dev_gso_cb *cb;
1201 do {
1202 struct sk_buff *nskb = skb->next;
1204 skb->next = nskb->next;
1205 nskb->next = NULL;
1206 kfree_skb(nskb);
1207 } while (skb->next);
1209 cb = DEV_GSO_CB(skb);
1210 if (cb->destructor)
1211 cb->destructor(skb);
1214 /**
1215 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1216 * @skb: buffer to segment
1218 * This function segments the given skb and stores the list of segments
1219 * in skb->next.
1220 */
1221 static int dev_gso_segment(struct sk_buff *skb)
1223 struct net_device *dev = skb->dev;
1224 struct sk_buff *segs;
1225 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1226 NETIF_F_SG : 0);
1228 segs = skb_gso_segment(skb, features);
1230 /* Verifying header integrity only. */
1231 if (!segs)
1232 return 0;
1234 if (unlikely(IS_ERR(segs)))
1235 return PTR_ERR(segs);
1237 skb->next = segs;
1238 DEV_GSO_CB(skb)->destructor = skb->destructor;
1239 skb->destructor = dev_gso_skb_destructor;
1241 return 0;
1244 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1246 if (likely(!skb->next)) {
1247 if (netdev_nit)
1248 dev_queue_xmit_nit(skb, dev);
1250 if (netif_needs_gso(dev, skb)) {
1251 if (unlikely(dev_gso_segment(skb)))
1252 goto out_kfree_skb;
1253 if (skb->next)
1254 goto gso;
1257 return dev->hard_start_xmit(skb, dev);
1260 gso:
1261 do {
1262 struct sk_buff *nskb = skb->next;
1263 int rc;
1265 skb->next = nskb->next;
1266 nskb->next = NULL;
1267 rc = dev->hard_start_xmit(nskb, dev);
1268 if (unlikely(rc)) {
1269 nskb->next = skb->next;
1270 skb->next = nskb;
1271 return rc;
1273 if (unlikely(netif_queue_stopped(dev) && skb->next))
1274 return NETDEV_TX_BUSY;
1275 } while (skb->next);
1277 skb->destructor = DEV_GSO_CB(skb)->destructor;
1279 out_kfree_skb:
1280 kfree_skb(skb);
1281 return 0;
1284 #define HARD_TX_LOCK(dev, cpu) { \
1285 if ((dev->features & NETIF_F_LLTX) == 0) { \
1286 netif_tx_lock(dev); \
1287 } \
1290 #define HARD_TX_UNLOCK(dev) { \
1291 if ((dev->features & NETIF_F_LLTX) == 0) { \
1292 netif_tx_unlock(dev); \
1293 } \
1296 #ifdef CONFIG_XEN
1297 inline int skb_checksum_setup(struct sk_buff *skb)
1299 if (skb->proto_csum_blank) {
1300 if (skb->protocol != htons(ETH_P_IP))
1301 goto out;
1302 skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl;
1303 if (skb->h.raw >= skb->tail)
1304 goto out;
1305 switch (skb->nh.iph->protocol) {
1306 case IPPROTO_TCP:
1307 skb->csum = offsetof(struct tcphdr, check);
1308 break;
1309 case IPPROTO_UDP:
1310 skb->csum = offsetof(struct udphdr, check);
1311 break;
1312 default:
1313 if (net_ratelimit())
1314 printk(KERN_ERR "Attempting to checksum a non-"
1315 "TCP/UDP packet, dropping a protocol"
1316 " %d packet", skb->nh.iph->protocol);
1317 goto out;
1319 if ((skb->h.raw + skb->csum + 2) > skb->tail)
1320 goto out;
1321 skb->ip_summed = CHECKSUM_HW;
1322 skb->proto_csum_blank = 0;
1324 return 0;
1325 out:
1326 return -EPROTO;
1328 #else
1329 inline int skb_checksum_setup(struct sk_buff *skb) { return 0; }
1330 #endif
1333 /**
1334 * dev_queue_xmit - transmit a buffer
1335 * @skb: buffer to transmit
1337 * Queue a buffer for transmission to a network device. The caller must
1338 * have set the device and priority and built the buffer before calling
1339 * this function. The function can be called from an interrupt.
1341 * A negative errno code is returned on a failure. A success does not
1342 * guarantee the frame will be transmitted as it may be dropped due
1343 * to congestion or traffic shaping.
1345 * -----------------------------------------------------------------------------------
1346 * I notice this method can also return errors from the queue disciplines,
1347 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1348 * be positive.
1350 * Regardless of the return value, the skb is consumed, so it is currently
1351 * difficult to retry a send to this method. (You can bump the ref count
1352 * before sending to hold a reference for retry if you are careful.)
1354 * When calling this method, interrupts MUST be enabled. This is because
1355 * the BH enable code must have IRQs enabled so that it will not deadlock.
1356 * --BLG
1357 */
1359 int dev_queue_xmit(struct sk_buff *skb)
1361 struct net_device *dev = skb->dev;
1362 struct Qdisc *q;
1363 int rc = -ENOMEM;
1365 /* If a checksum-deferred packet is forwarded to a device that needs a
1366 * checksum, correct the pointers and force checksumming.
1367 */
1368 if (skb_checksum_setup(skb))
1369 goto out_kfree_skb;
1371 /* GSO will handle the following emulations directly. */
1372 if (netif_needs_gso(dev, skb))
1373 goto gso;
1375 if (skb_shinfo(skb)->frag_list &&
1376 !(dev->features & NETIF_F_FRAGLIST) &&
1377 __skb_linearize(skb))
1378 goto out_kfree_skb;
1380 /* Fragmented skb is linearized if device does not support SG,
1381 * or if at least one of fragments is in highmem and device
1382 * does not support DMA from it.
1383 */
1384 if (skb_shinfo(skb)->nr_frags &&
1385 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1386 __skb_linearize(skb))
1387 goto out_kfree_skb;
1389 /* If packet is not checksummed and device does not support
1390 * checksumming for this protocol, complete checksumming here.
1391 */
1392 if (skb->ip_summed == CHECKSUM_HW &&
1393 (!(dev->features & NETIF_F_GEN_CSUM) &&
1394 (!(dev->features & NETIF_F_IP_CSUM) ||
1395 skb->protocol != htons(ETH_P_IP))))
1396 if (skb_checksum_help(skb, 0))
1397 goto out_kfree_skb;
1399 gso:
1400 spin_lock_prefetch(&dev->queue_lock);
1402 /* Disable soft irqs for various locks below. Also
1403 * stops preemption for RCU.
1404 */
1405 rcu_read_lock_bh();
1407 /* Updates of qdisc are serialized by queue_lock.
1408 * The struct Qdisc which is pointed to by qdisc is now a
1409 * rcu structure - it may be accessed without acquiring
1410 * a lock (but the structure may be stale.) The freeing of the
1411 * qdisc will be deferred until it's known that there are no
1412 * more references to it.
1414 * If the qdisc has an enqueue function, we still need to
1415 * hold the queue_lock before calling it, since queue_lock
1416 * also serializes access to the device queue.
1417 */
1419 q = rcu_dereference(dev->qdisc);
1420 #ifdef CONFIG_NET_CLS_ACT
1421 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1422 #endif
1423 if (q->enqueue) {
1424 /* Grab device queue */
1425 spin_lock(&dev->queue_lock);
1427 rc = q->enqueue(skb, q);
1429 qdisc_run(dev);
1431 spin_unlock(&dev->queue_lock);
1432 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1433 goto out;
1436 /* The device has no queue. Common case for software devices:
1437 loopback, all the sorts of tunnels...
1439 Really, it is unlikely that netif_tx_lock protection is necessary
1440 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1441 counters.)
1442 However, it is possible, that they rely on protection
1443 made by us here.
1445 Check this and shot the lock. It is not prone from deadlocks.
1446 Either shot noqueue qdisc, it is even simpler 8)
1447 */
1448 if (dev->flags & IFF_UP) {
1449 int cpu = smp_processor_id(); /* ok because BHs are off */
1451 if (dev->xmit_lock_owner != cpu) {
1453 HARD_TX_LOCK(dev, cpu);
1455 if (!netif_queue_stopped(dev)) {
1456 rc = 0;
1457 if (!dev_hard_start_xmit(skb, dev)) {
1458 HARD_TX_UNLOCK(dev);
1459 goto out;
1462 HARD_TX_UNLOCK(dev);
1463 if (net_ratelimit())
1464 printk(KERN_CRIT "Virtual device %s asks to "
1465 "queue packet!\n", dev->name);
1466 } else {
1467 /* Recursion is detected! It is possible,
1468 * unfortunately */
1469 if (net_ratelimit())
1470 printk(KERN_CRIT "Dead loop on virtual device "
1471 "%s, fix it urgently!\n", dev->name);
1475 rc = -ENETDOWN;
1476 rcu_read_unlock_bh();
1478 out_kfree_skb:
1479 kfree_skb(skb);
1480 return rc;
1481 out:
1482 rcu_read_unlock_bh();
1483 return rc;
1487 /*=======================================================================
1488 Receiver routines
1489 =======================================================================*/
1491 int netdev_max_backlog = 1000;
1492 int netdev_budget = 300;
1493 int weight_p = 64; /* old backlog weight */
1495 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1498 /**
1499 * netif_rx - post buffer to the network code
1500 * @skb: buffer to post
1502 * This function receives a packet from a device driver and queues it for
1503 * the upper (protocol) levels to process. It always succeeds. The buffer
1504 * may be dropped during processing for congestion control or by the
1505 * protocol layers.
1507 * return values:
1508 * NET_RX_SUCCESS (no congestion)
1509 * NET_RX_CN_LOW (low congestion)
1510 * NET_RX_CN_MOD (moderate congestion)
1511 * NET_RX_CN_HIGH (high congestion)
1512 * NET_RX_DROP (packet was dropped)
1514 */
1516 int netif_rx(struct sk_buff *skb)
1518 struct softnet_data *queue;
1519 unsigned long flags;
1521 /* if netpoll wants it, pretend we never saw it */
1522 if (netpoll_rx(skb))
1523 return NET_RX_DROP;
1525 if (!skb->tstamp.off_sec)
1526 net_timestamp(skb);
1528 /*
1529 * The code is rearranged so that the path is the most
1530 * short when CPU is congested, but is still operating.
1531 */
1532 local_irq_save(flags);
1533 queue = &__get_cpu_var(softnet_data);
1535 __get_cpu_var(netdev_rx_stat).total++;
1536 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1537 if (queue->input_pkt_queue.qlen) {
1538 enqueue:
1539 dev_hold(skb->dev);
1540 __skb_queue_tail(&queue->input_pkt_queue, skb);
1541 local_irq_restore(flags);
1542 return NET_RX_SUCCESS;
1545 netif_rx_schedule(&queue->backlog_dev);
1546 goto enqueue;
1549 __get_cpu_var(netdev_rx_stat).dropped++;
1550 local_irq_restore(flags);
1552 kfree_skb(skb);
1553 return NET_RX_DROP;
1556 int netif_rx_ni(struct sk_buff *skb)
1558 int err;
1560 preempt_disable();
1561 err = netif_rx(skb);
1562 if (local_softirq_pending())
1563 do_softirq();
1564 preempt_enable();
1566 return err;
1569 EXPORT_SYMBOL(netif_rx_ni);
1571 static inline struct net_device *skb_bond(struct sk_buff *skb)
1573 struct net_device *dev = skb->dev;
1575 if (dev->master)
1576 skb->dev = dev->master;
1578 return dev;
1581 static void net_tx_action(struct softirq_action *h)
1583 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1585 if (sd->completion_queue) {
1586 struct sk_buff *clist;
1588 local_irq_disable();
1589 clist = sd->completion_queue;
1590 sd->completion_queue = NULL;
1591 local_irq_enable();
1593 while (clist) {
1594 struct sk_buff *skb = clist;
1595 clist = clist->next;
1597 BUG_TRAP(!atomic_read(&skb->users));
1598 __kfree_skb(skb);
1602 if (sd->output_queue) {
1603 struct net_device *head;
1605 local_irq_disable();
1606 head = sd->output_queue;
1607 sd->output_queue = NULL;
1608 local_irq_enable();
1610 while (head) {
1611 struct net_device *dev = head;
1612 head = head->next_sched;
1614 smp_mb__before_clear_bit();
1615 clear_bit(__LINK_STATE_SCHED, &dev->state);
1617 if (spin_trylock(&dev->queue_lock)) {
1618 qdisc_run(dev);
1619 spin_unlock(&dev->queue_lock);
1620 } else {
1621 netif_schedule(dev);
1627 static __inline__ int deliver_skb(struct sk_buff *skb,
1628 struct packet_type *pt_prev,
1629 struct net_device *orig_dev)
1631 atomic_inc(&skb->users);
1632 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1635 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
1636 int (*br_handle_frame_hook)(struct net_bridge_port *p, struct sk_buff **pskb);
1637 struct net_bridge;
1638 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
1639 unsigned char *addr);
1640 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent);
1642 static __inline__ int handle_bridge(struct sk_buff **pskb,
1643 struct packet_type **pt_prev, int *ret,
1644 struct net_device *orig_dev)
1646 struct net_bridge_port *port;
1648 if ((*pskb)->pkt_type == PACKET_LOOPBACK ||
1649 (port = rcu_dereference((*pskb)->dev->br_port)) == NULL)
1650 return 0;
1652 if (*pt_prev) {
1653 *ret = deliver_skb(*pskb, *pt_prev, orig_dev);
1654 *pt_prev = NULL;
1657 return br_handle_frame_hook(port, pskb);
1659 #else
1660 #define handle_bridge(skb, pt_prev, ret, orig_dev) (0)
1661 #endif
1663 #ifdef CONFIG_NET_CLS_ACT
1664 /* TODO: Maybe we should just force sch_ingress to be compiled in
1665 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
1666 * a compare and 2 stores extra right now if we dont have it on
1667 * but have CONFIG_NET_CLS_ACT
1668 * NOTE: This doesnt stop any functionality; if you dont have
1669 * the ingress scheduler, you just cant add policies on ingress.
1671 */
1672 static int ing_filter(struct sk_buff *skb)
1674 struct Qdisc *q;
1675 struct net_device *dev = skb->dev;
1676 int result = TC_ACT_OK;
1678 if (dev->qdisc_ingress) {
1679 __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
1680 if (MAX_RED_LOOP < ttl++) {
1681 printk("Redir loop detected Dropping packet (%s->%s)\n",
1682 skb->input_dev->name, skb->dev->name);
1683 return TC_ACT_SHOT;
1686 skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl);
1688 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
1690 spin_lock(&dev->ingress_lock);
1691 if ((q = dev->qdisc_ingress) != NULL)
1692 result = q->enqueue(skb, q);
1693 spin_unlock(&dev->ingress_lock);
1697 return result;
1699 #endif
1701 int netif_receive_skb(struct sk_buff *skb)
1703 struct packet_type *ptype, *pt_prev;
1704 struct net_device *orig_dev;
1705 int ret = NET_RX_DROP;
1706 unsigned short type;
1708 /* if we've gotten here through NAPI, check netpoll */
1709 if (skb->dev->poll && netpoll_rx(skb))
1710 return NET_RX_DROP;
1712 if (!skb->tstamp.off_sec)
1713 net_timestamp(skb);
1715 if (!skb->input_dev)
1716 skb->input_dev = skb->dev;
1718 orig_dev = skb_bond(skb);
1720 __get_cpu_var(netdev_rx_stat).total++;
1722 skb->h.raw = skb->nh.raw = skb->data;
1723 skb->mac_len = skb->nh.raw - skb->mac.raw;
1725 pt_prev = NULL;
1727 rcu_read_lock();
1729 #ifdef CONFIG_NET_CLS_ACT
1730 if (skb->tc_verd & TC_NCLS) {
1731 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
1732 goto ncls;
1734 #endif
1736 #ifdef CONFIG_XEN
1737 switch (skb->ip_summed) {
1738 case CHECKSUM_UNNECESSARY:
1739 skb->proto_data_valid = 1;
1740 break;
1741 case CHECKSUM_HW:
1742 /* XXX Implement me. */
1743 default:
1744 skb->proto_data_valid = 0;
1745 break;
1747 #endif
1749 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1750 if (!ptype->dev || ptype->dev == skb->dev) {
1751 if (pt_prev)
1752 ret = deliver_skb(skb, pt_prev, orig_dev);
1753 pt_prev = ptype;
1757 #ifdef CONFIG_NET_CLS_ACT
1758 if (pt_prev) {
1759 ret = deliver_skb(skb, pt_prev, orig_dev);
1760 pt_prev = NULL; /* noone else should process this after*/
1761 } else {
1762 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
1765 ret = ing_filter(skb);
1767 if (ret == TC_ACT_SHOT || (ret == TC_ACT_STOLEN)) {
1768 kfree_skb(skb);
1769 goto out;
1772 skb->tc_verd = 0;
1773 ncls:
1774 #endif
1776 handle_diverter(skb);
1778 if (handle_bridge(&skb, &pt_prev, &ret, orig_dev))
1779 goto out;
1781 type = skb->protocol;
1782 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
1783 if (ptype->type == type &&
1784 (!ptype->dev || ptype->dev == skb->dev)) {
1785 if (pt_prev)
1786 ret = deliver_skb(skb, pt_prev, orig_dev);
1787 pt_prev = ptype;
1791 if (pt_prev) {
1792 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1793 } else {
1794 kfree_skb(skb);
1795 /* Jamal, now you will not able to escape explaining
1796 * me how you were going to use this. :-)
1797 */
1798 ret = NET_RX_DROP;
1801 out:
1802 rcu_read_unlock();
1803 return ret;
1806 static int process_backlog(struct net_device *backlog_dev, int *budget)
1808 int work = 0;
1809 int quota = min(backlog_dev->quota, *budget);
1810 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1811 unsigned long start_time = jiffies;
1813 backlog_dev->weight = weight_p;
1814 for (;;) {
1815 struct sk_buff *skb;
1816 struct net_device *dev;
1818 local_irq_disable();
1819 skb = __skb_dequeue(&queue->input_pkt_queue);
1820 if (!skb)
1821 goto job_done;
1822 local_irq_enable();
1824 dev = skb->dev;
1826 netif_receive_skb(skb);
1828 dev_put(dev);
1830 work++;
1832 if (work >= quota || jiffies - start_time > 1)
1833 break;
1837 backlog_dev->quota -= work;
1838 *budget -= work;
1839 return -1;
1841 job_done:
1842 backlog_dev->quota -= work;
1843 *budget -= work;
1845 list_del(&backlog_dev->poll_list);
1846 smp_mb__before_clear_bit();
1847 netif_poll_enable(backlog_dev);
1849 local_irq_enable();
1850 return 0;
1853 static void net_rx_action(struct softirq_action *h)
1855 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1856 unsigned long start_time = jiffies;
1857 int budget = netdev_budget;
1858 void *have;
1860 local_irq_disable();
1862 while (!list_empty(&queue->poll_list)) {
1863 struct net_device *dev;
1865 if (budget <= 0 || jiffies - start_time > 1)
1866 goto softnet_break;
1868 local_irq_enable();
1870 dev = list_entry(queue->poll_list.next,
1871 struct net_device, poll_list);
1872 have = netpoll_poll_lock(dev);
1874 if (dev->quota <= 0 || dev->poll(dev, &budget)) {
1875 netpoll_poll_unlock(have);
1876 local_irq_disable();
1877 list_del(&dev->poll_list);
1878 list_add_tail(&dev->poll_list, &queue->poll_list);
1879 if (dev->quota < 0)
1880 dev->quota += dev->weight;
1881 else
1882 dev->quota = dev->weight;
1883 } else {
1884 netpoll_poll_unlock(have);
1885 dev_put(dev);
1886 local_irq_disable();
1889 out:
1890 local_irq_enable();
1891 return;
1893 softnet_break:
1894 __get_cpu_var(netdev_rx_stat).time_squeeze++;
1895 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
1896 goto out;
1899 static gifconf_func_t * gifconf_list [NPROTO];
1901 /**
1902 * register_gifconf - register a SIOCGIF handler
1903 * @family: Address family
1904 * @gifconf: Function handler
1906 * Register protocol dependent address dumping routines. The handler
1907 * that is passed must not be freed or reused until it has been replaced
1908 * by another handler.
1909 */
1910 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
1912 if (family >= NPROTO)
1913 return -EINVAL;
1914 gifconf_list[family] = gifconf;
1915 return 0;
1919 /*
1920 * Map an interface index to its name (SIOCGIFNAME)
1921 */
1923 /*
1924 * We need this ioctl for efficient implementation of the
1925 * if_indextoname() function required by the IPv6 API. Without
1926 * it, we would have to search all the interfaces to find a
1927 * match. --pb
1928 */
1930 static int dev_ifname(struct ifreq __user *arg)
1932 struct net_device *dev;
1933 struct ifreq ifr;
1935 /*
1936 * Fetch the caller's info block.
1937 */
1939 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
1940 return -EFAULT;
1942 read_lock(&dev_base_lock);
1943 dev = __dev_get_by_index(ifr.ifr_ifindex);
1944 if (!dev) {
1945 read_unlock(&dev_base_lock);
1946 return -ENODEV;
1949 strcpy(ifr.ifr_name, dev->name);
1950 read_unlock(&dev_base_lock);
1952 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
1953 return -EFAULT;
1954 return 0;
1957 /*
1958 * Perform a SIOCGIFCONF call. This structure will change
1959 * size eventually, and there is nothing I can do about it.
1960 * Thus we will need a 'compatibility mode'.
1961 */
1963 static int dev_ifconf(char __user *arg)
1965 struct ifconf ifc;
1966 struct net_device *dev;
1967 char __user *pos;
1968 int len;
1969 int total;
1970 int i;
1972 /*
1973 * Fetch the caller's info block.
1974 */
1976 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
1977 return -EFAULT;
1979 pos = ifc.ifc_buf;
1980 len = ifc.ifc_len;
1982 /*
1983 * Loop over the interfaces, and write an info block for each.
1984 */
1986 total = 0;
1987 for (dev = dev_base; dev; dev = dev->next) {
1988 for (i = 0; i < NPROTO; i++) {
1989 if (gifconf_list[i]) {
1990 int done;
1991 if (!pos)
1992 done = gifconf_list[i](dev, NULL, 0);
1993 else
1994 done = gifconf_list[i](dev, pos + total,
1995 len - total);
1996 if (done < 0)
1997 return -EFAULT;
1998 total += done;
2003 /*
2004 * All done. Write the updated control block back to the caller.
2005 */
2006 ifc.ifc_len = total;
2008 /*
2009 * Both BSD and Solaris return 0 here, so we do too.
2010 */
2011 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2014 #ifdef CONFIG_PROC_FS
2015 /*
2016 * This is invoked by the /proc filesystem handler to display a device
2017 * in detail.
2018 */
2019 static __inline__ struct net_device *dev_get_idx(loff_t pos)
2021 struct net_device *dev;
2022 loff_t i;
2024 for (i = 0, dev = dev_base; dev && i < pos; ++i, dev = dev->next);
2026 return i == pos ? dev : NULL;
2029 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2031 read_lock(&dev_base_lock);
2032 return *pos ? dev_get_idx(*pos - 1) : SEQ_START_TOKEN;
2035 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2037 ++*pos;
2038 return v == SEQ_START_TOKEN ? dev_base : ((struct net_device *)v)->next;
2041 void dev_seq_stop(struct seq_file *seq, void *v)
2043 read_unlock(&dev_base_lock);
2046 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2048 if (dev->get_stats) {
2049 struct net_device_stats *stats = dev->get_stats(dev);
2051 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2052 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2053 dev->name, stats->rx_bytes, stats->rx_packets,
2054 stats->rx_errors,
2055 stats->rx_dropped + stats->rx_missed_errors,
2056 stats->rx_fifo_errors,
2057 stats->rx_length_errors + stats->rx_over_errors +
2058 stats->rx_crc_errors + stats->rx_frame_errors,
2059 stats->rx_compressed, stats->multicast,
2060 stats->tx_bytes, stats->tx_packets,
2061 stats->tx_errors, stats->tx_dropped,
2062 stats->tx_fifo_errors, stats->collisions,
2063 stats->tx_carrier_errors +
2064 stats->tx_aborted_errors +
2065 stats->tx_window_errors +
2066 stats->tx_heartbeat_errors,
2067 stats->tx_compressed);
2068 } else
2069 seq_printf(seq, "%6s: No statistics available.\n", dev->name);
2072 /*
2073 * Called from the PROCfs module. This now uses the new arbitrary sized
2074 * /proc/net interface to create /proc/net/dev
2075 */
2076 static int dev_seq_show(struct seq_file *seq, void *v)
2078 if (v == SEQ_START_TOKEN)
2079 seq_puts(seq, "Inter-| Receive "
2080 " | Transmit\n"
2081 " face |bytes packets errs drop fifo frame "
2082 "compressed multicast|bytes packets errs "
2083 "drop fifo colls carrier compressed\n");
2084 else
2085 dev_seq_printf_stats(seq, v);
2086 return 0;
2089 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2091 struct netif_rx_stats *rc = NULL;
2093 while (*pos < NR_CPUS)
2094 if (cpu_online(*pos)) {
2095 rc = &per_cpu(netdev_rx_stat, *pos);
2096 break;
2097 } else
2098 ++*pos;
2099 return rc;
2102 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2104 return softnet_get_online(pos);
2107 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2109 ++*pos;
2110 return softnet_get_online(pos);
2113 static void softnet_seq_stop(struct seq_file *seq, void *v)
2117 static int softnet_seq_show(struct seq_file *seq, void *v)
2119 struct netif_rx_stats *s = v;
2121 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2122 s->total, s->dropped, s->time_squeeze, 0,
2123 0, 0, 0, 0, /* was fastroute */
2124 s->cpu_collision );
2125 return 0;
2128 static struct seq_operations dev_seq_ops = {
2129 .start = dev_seq_start,
2130 .next = dev_seq_next,
2131 .stop = dev_seq_stop,
2132 .show = dev_seq_show,
2133 };
2135 static int dev_seq_open(struct inode *inode, struct file *file)
2137 return seq_open(file, &dev_seq_ops);
2140 static struct file_operations dev_seq_fops = {
2141 .owner = THIS_MODULE,
2142 .open = dev_seq_open,
2143 .read = seq_read,
2144 .llseek = seq_lseek,
2145 .release = seq_release,
2146 };
2148 static struct seq_operations softnet_seq_ops = {
2149 .start = softnet_seq_start,
2150 .next = softnet_seq_next,
2151 .stop = softnet_seq_stop,
2152 .show = softnet_seq_show,
2153 };
2155 static int softnet_seq_open(struct inode *inode, struct file *file)
2157 return seq_open(file, &softnet_seq_ops);
2160 static struct file_operations softnet_seq_fops = {
2161 .owner = THIS_MODULE,
2162 .open = softnet_seq_open,
2163 .read = seq_read,
2164 .llseek = seq_lseek,
2165 .release = seq_release,
2166 };
2168 #ifdef WIRELESS_EXT
2169 extern int wireless_proc_init(void);
2170 #else
2171 #define wireless_proc_init() 0
2172 #endif
2174 static int __init dev_proc_init(void)
2176 int rc = -ENOMEM;
2178 if (!proc_net_fops_create("dev", S_IRUGO, &dev_seq_fops))
2179 goto out;
2180 if (!proc_net_fops_create("softnet_stat", S_IRUGO, &softnet_seq_fops))
2181 goto out_dev;
2182 if (wireless_proc_init())
2183 goto out_softnet;
2184 rc = 0;
2185 out:
2186 return rc;
2187 out_softnet:
2188 proc_net_remove("softnet_stat");
2189 out_dev:
2190 proc_net_remove("dev");
2191 goto out;
2193 #else
2194 #define dev_proc_init() 0
2195 #endif /* CONFIG_PROC_FS */
2198 /**
2199 * netdev_set_master - set up master/slave pair
2200 * @slave: slave device
2201 * @master: new master device
2203 * Changes the master device of the slave. Pass %NULL to break the
2204 * bonding. The caller must hold the RTNL semaphore. On a failure
2205 * a negative errno code is returned. On success the reference counts
2206 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2207 * function returns zero.
2208 */
2209 int netdev_set_master(struct net_device *slave, struct net_device *master)
2211 struct net_device *old = slave->master;
2213 ASSERT_RTNL();
2215 if (master) {
2216 if (old)
2217 return -EBUSY;
2218 dev_hold(master);
2221 slave->master = master;
2223 synchronize_net();
2225 if (old)
2226 dev_put(old);
2228 if (master)
2229 slave->flags |= IFF_SLAVE;
2230 else
2231 slave->flags &= ~IFF_SLAVE;
2233 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2234 return 0;
2237 /**
2238 * dev_set_promiscuity - update promiscuity count on a device
2239 * @dev: device
2240 * @inc: modifier
2242 * Add or remove promsicuity from a device. While the count in the device
2243 * remains above zero the interface remains promiscuous. Once it hits zero
2244 * the device reverts back to normal filtering operation. A negative inc
2245 * value is used to drop promiscuity on the device.
2246 */
2247 void dev_set_promiscuity(struct net_device *dev, int inc)
2249 unsigned short old_flags = dev->flags;
2251 if ((dev->promiscuity += inc) == 0)
2252 dev->flags &= ~IFF_PROMISC;
2253 else
2254 dev->flags |= IFF_PROMISC;
2255 if (dev->flags != old_flags) {
2256 dev_mc_upload(dev);
2257 printk(KERN_INFO "device %s %s promiscuous mode\n",
2258 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2259 "left");
2263 /**
2264 * dev_set_allmulti - update allmulti count on a device
2265 * @dev: device
2266 * @inc: modifier
2268 * Add or remove reception of all multicast frames to a device. While the
2269 * count in the device remains above zero the interface remains listening
2270 * to all interfaces. Once it hits zero the device reverts back to normal
2271 * filtering operation. A negative @inc value is used to drop the counter
2272 * when releasing a resource needing all multicasts.
2273 */
2275 void dev_set_allmulti(struct net_device *dev, int inc)
2277 unsigned short old_flags = dev->flags;
2279 dev->flags |= IFF_ALLMULTI;
2280 if ((dev->allmulti += inc) == 0)
2281 dev->flags &= ~IFF_ALLMULTI;
2282 if (dev->flags ^ old_flags)
2283 dev_mc_upload(dev);
2286 unsigned dev_get_flags(const struct net_device *dev)
2288 unsigned flags;
2290 flags = (dev->flags & ~(IFF_PROMISC |
2291 IFF_ALLMULTI |
2292 IFF_RUNNING)) |
2293 (dev->gflags & (IFF_PROMISC |
2294 IFF_ALLMULTI));
2296 if (netif_running(dev) && netif_carrier_ok(dev))
2297 flags |= IFF_RUNNING;
2299 return flags;
2302 int dev_change_flags(struct net_device *dev, unsigned flags)
2304 int ret;
2305 int old_flags = dev->flags;
2307 /*
2308 * Set the flags on our device.
2309 */
2311 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
2312 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
2313 IFF_AUTOMEDIA)) |
2314 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
2315 IFF_ALLMULTI));
2317 /*
2318 * Load in the correct multicast list now the flags have changed.
2319 */
2321 dev_mc_upload(dev);
2323 /*
2324 * Have we downed the interface. We handle IFF_UP ourselves
2325 * according to user attempts to set it, rather than blindly
2326 * setting it.
2327 */
2329 ret = 0;
2330 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
2331 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
2333 if (!ret)
2334 dev_mc_upload(dev);
2337 if (dev->flags & IFF_UP &&
2338 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
2339 IFF_VOLATILE)))
2340 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
2342 if ((flags ^ dev->gflags) & IFF_PROMISC) {
2343 int inc = (flags & IFF_PROMISC) ? +1 : -1;
2344 dev->gflags ^= IFF_PROMISC;
2345 dev_set_promiscuity(dev, inc);
2348 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
2349 is important. Some (broken) drivers set IFF_PROMISC, when
2350 IFF_ALLMULTI is requested not asking us and not reporting.
2351 */
2352 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
2353 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
2354 dev->gflags ^= IFF_ALLMULTI;
2355 dev_set_allmulti(dev, inc);
2358 if (old_flags ^ dev->flags)
2359 rtmsg_ifinfo(RTM_NEWLINK, dev, old_flags ^ dev->flags);
2361 return ret;
2364 int dev_set_mtu(struct net_device *dev, int new_mtu)
2366 int err;
2368 if (new_mtu == dev->mtu)
2369 return 0;
2371 /* MTU must be positive. */
2372 if (new_mtu < 0)
2373 return -EINVAL;
2375 if (!netif_device_present(dev))
2376 return -ENODEV;
2378 err = 0;
2379 if (dev->change_mtu)
2380 err = dev->change_mtu(dev, new_mtu);
2381 else
2382 dev->mtu = new_mtu;
2383 if (!err && dev->flags & IFF_UP)
2384 notifier_call_chain(&netdev_chain,
2385 NETDEV_CHANGEMTU, dev);
2386 return err;
2389 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
2391 int err;
2393 if (!dev->set_mac_address)
2394 return -EOPNOTSUPP;
2395 if (sa->sa_family != dev->type)
2396 return -EINVAL;
2397 if (!netif_device_present(dev))
2398 return -ENODEV;
2399 err = dev->set_mac_address(dev, sa);
2400 if (!err)
2401 notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev);
2402 return err;
2405 /*
2406 * Perform the SIOCxIFxxx calls.
2407 */
2408 static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
2410 int err;
2411 struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
2413 if (!dev)
2414 return -ENODEV;
2416 switch (cmd) {
2417 case SIOCGIFFLAGS: /* Get interface flags */
2418 ifr->ifr_flags = dev_get_flags(dev);
2419 return 0;
2421 case SIOCSIFFLAGS: /* Set interface flags */
2422 return dev_change_flags(dev, ifr->ifr_flags);
2424 case SIOCGIFMETRIC: /* Get the metric on the interface
2425 (currently unused) */
2426 ifr->ifr_metric = 0;
2427 return 0;
2429 case SIOCSIFMETRIC: /* Set the metric on the interface
2430 (currently unused) */
2431 return -EOPNOTSUPP;
2433 case SIOCGIFMTU: /* Get the MTU of a device */
2434 ifr->ifr_mtu = dev->mtu;
2435 return 0;
2437 case SIOCSIFMTU: /* Set the MTU of a device */
2438 return dev_set_mtu(dev, ifr->ifr_mtu);
2440 case SIOCGIFHWADDR:
2441 if (!dev->addr_len)
2442 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
2443 else
2444 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
2445 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2446 ifr->ifr_hwaddr.sa_family = dev->type;
2447 return 0;
2449 case SIOCSIFHWADDR:
2450 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
2452 case SIOCSIFHWBROADCAST:
2453 if (ifr->ifr_hwaddr.sa_family != dev->type)
2454 return -EINVAL;
2455 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
2456 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2457 notifier_call_chain(&netdev_chain,
2458 NETDEV_CHANGEADDR, dev);
2459 return 0;
2461 case SIOCGIFMAP:
2462 ifr->ifr_map.mem_start = dev->mem_start;
2463 ifr->ifr_map.mem_end = dev->mem_end;
2464 ifr->ifr_map.base_addr = dev->base_addr;
2465 ifr->ifr_map.irq = dev->irq;
2466 ifr->ifr_map.dma = dev->dma;
2467 ifr->ifr_map.port = dev->if_port;
2468 return 0;
2470 case SIOCSIFMAP:
2471 if (dev->set_config) {
2472 if (!netif_device_present(dev))
2473 return -ENODEV;
2474 return dev->set_config(dev, &ifr->ifr_map);
2476 return -EOPNOTSUPP;
2478 case SIOCADDMULTI:
2479 if (!dev->set_multicast_list ||
2480 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2481 return -EINVAL;
2482 if (!netif_device_present(dev))
2483 return -ENODEV;
2484 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
2485 dev->addr_len, 1);
2487 case SIOCDELMULTI:
2488 if (!dev->set_multicast_list ||
2489 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2490 return -EINVAL;
2491 if (!netif_device_present(dev))
2492 return -ENODEV;
2493 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
2494 dev->addr_len, 1);
2496 case SIOCGIFINDEX:
2497 ifr->ifr_ifindex = dev->ifindex;
2498 return 0;
2500 case SIOCGIFTXQLEN:
2501 ifr->ifr_qlen = dev->tx_queue_len;
2502 return 0;
2504 case SIOCSIFTXQLEN:
2505 if (ifr->ifr_qlen < 0)
2506 return -EINVAL;
2507 dev->tx_queue_len = ifr->ifr_qlen;
2508 return 0;
2510 case SIOCSIFNAME:
2511 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
2512 return dev_change_name(dev, ifr->ifr_newname);
2514 /*
2515 * Unknown or private ioctl
2516 */
2518 default:
2519 if ((cmd >= SIOCDEVPRIVATE &&
2520 cmd <= SIOCDEVPRIVATE + 15) ||
2521 cmd == SIOCBONDENSLAVE ||
2522 cmd == SIOCBONDRELEASE ||
2523 cmd == SIOCBONDSETHWADDR ||
2524 cmd == SIOCBONDSLAVEINFOQUERY ||
2525 cmd == SIOCBONDINFOQUERY ||
2526 cmd == SIOCBONDCHANGEACTIVE ||
2527 cmd == SIOCGMIIPHY ||
2528 cmd == SIOCGMIIREG ||
2529 cmd == SIOCSMIIREG ||
2530 cmd == SIOCBRADDIF ||
2531 cmd == SIOCBRDELIF ||
2532 cmd == SIOCWANDEV) {
2533 err = -EOPNOTSUPP;
2534 if (dev->do_ioctl) {
2535 if (netif_device_present(dev))
2536 err = dev->do_ioctl(dev, ifr,
2537 cmd);
2538 else
2539 err = -ENODEV;
2541 } else
2542 err = -EINVAL;
2545 return err;
2548 /*
2549 * This function handles all "interface"-type I/O control requests. The actual
2550 * 'doing' part of this is dev_ifsioc above.
2551 */
2553 /**
2554 * dev_ioctl - network device ioctl
2555 * @cmd: command to issue
2556 * @arg: pointer to a struct ifreq in user space
2558 * Issue ioctl functions to devices. This is normally called by the
2559 * user space syscall interfaces but can sometimes be useful for
2560 * other purposes. The return value is the return from the syscall if
2561 * positive or a negative errno code on error.
2562 */
2564 int dev_ioctl(unsigned int cmd, void __user *arg)
2566 struct ifreq ifr;
2567 int ret;
2568 char *colon;
2570 /* One special case: SIOCGIFCONF takes ifconf argument
2571 and requires shared lock, because it sleeps writing
2572 to user space.
2573 */
2575 if (cmd == SIOCGIFCONF) {
2576 rtnl_shlock();
2577 ret = dev_ifconf((char __user *) arg);
2578 rtnl_shunlock();
2579 return ret;
2581 if (cmd == SIOCGIFNAME)
2582 return dev_ifname((struct ifreq __user *)arg);
2584 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2585 return -EFAULT;
2587 ifr.ifr_name[IFNAMSIZ-1] = 0;
2589 colon = strchr(ifr.ifr_name, ':');
2590 if (colon)
2591 *colon = 0;
2593 /*
2594 * See which interface the caller is talking about.
2595 */
2597 switch (cmd) {
2598 /*
2599 * These ioctl calls:
2600 * - can be done by all.
2601 * - atomic and do not require locking.
2602 * - return a value
2603 */
2604 case SIOCGIFFLAGS:
2605 case SIOCGIFMETRIC:
2606 case SIOCGIFMTU:
2607 case SIOCGIFHWADDR:
2608 case SIOCGIFSLAVE:
2609 case SIOCGIFMAP:
2610 case SIOCGIFINDEX:
2611 case SIOCGIFTXQLEN:
2612 dev_load(ifr.ifr_name);
2613 read_lock(&dev_base_lock);
2614 ret = dev_ifsioc(&ifr, cmd);
2615 read_unlock(&dev_base_lock);
2616 if (!ret) {
2617 if (colon)
2618 *colon = ':';
2619 if (copy_to_user(arg, &ifr,
2620 sizeof(struct ifreq)))
2621 ret = -EFAULT;
2623 return ret;
2625 case SIOCETHTOOL:
2626 dev_load(ifr.ifr_name);
2627 rtnl_lock();
2628 ret = dev_ethtool(&ifr);
2629 rtnl_unlock();
2630 if (!ret) {
2631 if (colon)
2632 *colon = ':';
2633 if (copy_to_user(arg, &ifr,
2634 sizeof(struct ifreq)))
2635 ret = -EFAULT;
2637 return ret;
2639 /*
2640 * These ioctl calls:
2641 * - require superuser power.
2642 * - require strict serialization.
2643 * - return a value
2644 */
2645 case SIOCGMIIPHY:
2646 case SIOCGMIIREG:
2647 case SIOCSIFNAME:
2648 if (!capable(CAP_NET_ADMIN))
2649 return -EPERM;
2650 dev_load(ifr.ifr_name);
2651 rtnl_lock();
2652 ret = dev_ifsioc(&ifr, cmd);
2653 rtnl_unlock();
2654 if (!ret) {
2655 if (colon)
2656 *colon = ':';
2657 if (copy_to_user(arg, &ifr,
2658 sizeof(struct ifreq)))
2659 ret = -EFAULT;
2661 return ret;
2663 /*
2664 * These ioctl calls:
2665 * - require superuser power.
2666 * - require strict serialization.
2667 * - do not return a value
2668 */
2669 case SIOCSIFFLAGS:
2670 case SIOCSIFMETRIC:
2671 case SIOCSIFMTU:
2672 case SIOCSIFMAP:
2673 case SIOCSIFHWADDR:
2674 case SIOCSIFSLAVE:
2675 case SIOCADDMULTI:
2676 case SIOCDELMULTI:
2677 case SIOCSIFHWBROADCAST:
2678 case SIOCSIFTXQLEN:
2679 case SIOCSMIIREG:
2680 case SIOCBONDENSLAVE:
2681 case SIOCBONDRELEASE:
2682 case SIOCBONDSETHWADDR:
2683 case SIOCBONDCHANGEACTIVE:
2684 case SIOCBRADDIF:
2685 case SIOCBRDELIF:
2686 if (!capable(CAP_NET_ADMIN))
2687 return -EPERM;
2688 /* fall through */
2689 case SIOCBONDSLAVEINFOQUERY:
2690 case SIOCBONDINFOQUERY:
2691 dev_load(ifr.ifr_name);
2692 rtnl_lock();
2693 ret = dev_ifsioc(&ifr, cmd);
2694 rtnl_unlock();
2695 return ret;
2697 case SIOCGIFMEM:
2698 /* Get the per device memory space. We can add this but
2699 * currently do not support it */
2700 case SIOCSIFMEM:
2701 /* Set the per device memory buffer space.
2702 * Not applicable in our case */
2703 case SIOCSIFLINK:
2704 return -EINVAL;
2706 /*
2707 * Unknown or private ioctl.
2708 */
2709 default:
2710 if (cmd == SIOCWANDEV ||
2711 (cmd >= SIOCDEVPRIVATE &&
2712 cmd <= SIOCDEVPRIVATE + 15)) {
2713 dev_load(ifr.ifr_name);
2714 rtnl_lock();
2715 ret = dev_ifsioc(&ifr, cmd);
2716 rtnl_unlock();
2717 if (!ret && copy_to_user(arg, &ifr,
2718 sizeof(struct ifreq)))
2719 ret = -EFAULT;
2720 return ret;
2722 #ifdef WIRELESS_EXT
2723 /* Take care of Wireless Extensions */
2724 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
2725 /* If command is `set a parameter', or
2726 * `get the encoding parameters', check if
2727 * the user has the right to do it */
2728 if (IW_IS_SET(cmd) || cmd == SIOCGIWENCODE) {
2729 if (!capable(CAP_NET_ADMIN))
2730 return -EPERM;
2732 dev_load(ifr.ifr_name);
2733 rtnl_lock();
2734 /* Follow me in net/core/wireless.c */
2735 ret = wireless_process_ioctl(&ifr, cmd);
2736 rtnl_unlock();
2737 if (IW_IS_GET(cmd) &&
2738 copy_to_user(arg, &ifr,
2739 sizeof(struct ifreq)))
2740 ret = -EFAULT;
2741 return ret;
2743 #endif /* WIRELESS_EXT */
2744 return -EINVAL;
2749 /**
2750 * dev_new_index - allocate an ifindex
2752 * Returns a suitable unique value for a new device interface
2753 * number. The caller must hold the rtnl semaphore or the
2754 * dev_base_lock to be sure it remains unique.
2755 */
2756 static int dev_new_index(void)
2758 static int ifindex;
2759 for (;;) {
2760 if (++ifindex <= 0)
2761 ifindex = 1;
2762 if (!__dev_get_by_index(ifindex))
2763 return ifindex;
2767 static int dev_boot_phase = 1;
2769 /* Delayed registration/unregisteration */
2770 static DEFINE_SPINLOCK(net_todo_list_lock);
2771 static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list);
2773 static inline void net_set_todo(struct net_device *dev)
2775 spin_lock(&net_todo_list_lock);
2776 list_add_tail(&dev->todo_list, &net_todo_list);
2777 spin_unlock(&net_todo_list_lock);
2780 /**
2781 * register_netdevice - register a network device
2782 * @dev: device to register
2784 * Take a completed network device structure and add it to the kernel
2785 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
2786 * chain. 0 is returned on success. A negative errno code is returned
2787 * on a failure to set up the device, or if the name is a duplicate.
2789 * Callers must hold the rtnl semaphore. You may want
2790 * register_netdev() instead of this.
2792 * BUGS:
2793 * The locking appears insufficient to guarantee two parallel registers
2794 * will not get the same name.
2795 */
2797 int register_netdevice(struct net_device *dev)
2799 struct hlist_head *head;
2800 struct hlist_node *p;
2801 int ret;
2803 BUG_ON(dev_boot_phase);
2804 ASSERT_RTNL();
2806 /* When net_device's are persistent, this will be fatal. */
2807 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
2809 spin_lock_init(&dev->queue_lock);
2810 spin_lock_init(&dev->_xmit_lock);
2811 dev->xmit_lock_owner = -1;
2812 #ifdef CONFIG_NET_CLS_ACT
2813 spin_lock_init(&dev->ingress_lock);
2814 #endif
2816 ret = alloc_divert_blk(dev);
2817 if (ret)
2818 goto out;
2820 dev->iflink = -1;
2822 /* Init, if this function is available */
2823 if (dev->init) {
2824 ret = dev->init(dev);
2825 if (ret) {
2826 if (ret > 0)
2827 ret = -EIO;
2828 goto out_err;
2832 if (!dev_valid_name(dev->name)) {
2833 ret = -EINVAL;
2834 goto out_err;
2837 dev->ifindex = dev_new_index();
2838 if (dev->iflink == -1)
2839 dev->iflink = dev->ifindex;
2841 /* Check for existence of name */
2842 head = dev_name_hash(dev->name);
2843 hlist_for_each(p, head) {
2844 struct net_device *d
2845 = hlist_entry(p, struct net_device, name_hlist);
2846 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
2847 ret = -EEXIST;
2848 goto out_err;
2852 /* Fix illegal SG+CSUM combinations. */
2853 if ((dev->features & NETIF_F_SG) &&
2854 !(dev->features & NETIF_F_ALL_CSUM)) {
2855 printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
2856 dev->name);
2857 dev->features &= ~NETIF_F_SG;
2860 /* TSO requires that SG is present as well. */
2861 if ((dev->features & NETIF_F_TSO) &&
2862 !(dev->features & NETIF_F_SG)) {
2863 printk("%s: Dropping NETIF_F_TSO since no SG feature.\n",
2864 dev->name);
2865 dev->features &= ~NETIF_F_TSO;
2867 if (dev->features & NETIF_F_UFO) {
2868 if (!(dev->features & NETIF_F_HW_CSUM)) {
2869 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
2870 "NETIF_F_HW_CSUM feature.\n",
2871 dev->name);
2872 dev->features &= ~NETIF_F_UFO;
2874 if (!(dev->features & NETIF_F_SG)) {
2875 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
2876 "NETIF_F_SG feature.\n",
2877 dev->name);
2878 dev->features &= ~NETIF_F_UFO;
2882 /*
2883 * nil rebuild_header routine,
2884 * that should be never called and used as just bug trap.
2885 */
2887 if (!dev->rebuild_header)
2888 dev->rebuild_header = default_rebuild_header;
2890 /*
2891 * Default initial state at registry is that the
2892 * device is present.
2893 */
2895 set_bit(__LINK_STATE_PRESENT, &dev->state);
2897 dev->next = NULL;
2898 dev_init_scheduler(dev);
2899 write_lock_bh(&dev_base_lock);
2900 *dev_tail = dev;
2901 dev_tail = &dev->next;
2902 hlist_add_head(&dev->name_hlist, head);
2903 hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex));
2904 dev_hold(dev);
2905 dev->reg_state = NETREG_REGISTERING;
2906 write_unlock_bh(&dev_base_lock);
2908 /* Notify protocols, that a new device appeared. */
2909 notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
2911 /* Finish registration after unlock */
2912 net_set_todo(dev);
2913 ret = 0;
2915 out:
2916 return ret;
2917 out_err:
2918 free_divert_blk(dev);
2919 goto out;
2922 /**
2923 * register_netdev - register a network device
2924 * @dev: device to register
2926 * Take a completed network device structure and add it to the kernel
2927 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
2928 * chain. 0 is returned on success. A negative errno code is returned
2929 * on a failure to set up the device, or if the name is a duplicate.
2931 * This is a wrapper around register_netdev that takes the rtnl semaphore
2932 * and expands the device name if you passed a format string to
2933 * alloc_netdev.
2934 */
2935 int register_netdev(struct net_device *dev)
2937 int err;
2939 rtnl_lock();
2941 /*
2942 * If the name is a format string the caller wants us to do a
2943 * name allocation.
2944 */
2945 if (strchr(dev->name, '%')) {
2946 err = dev_alloc_name(dev, dev->name);
2947 if (err < 0)
2948 goto out;
2951 /*
2952 * Back compatibility hook. Kill this one in 2.5
2953 */
2954 if (dev->name[0] == 0 || dev->name[0] == ' ') {
2955 err = dev_alloc_name(dev, "eth%d");
2956 if (err < 0)
2957 goto out;
2960 err = register_netdevice(dev);
2961 out:
2962 rtnl_unlock();
2963 return err;
2965 EXPORT_SYMBOL(register_netdev);
2967 /*
2968 * netdev_wait_allrefs - wait until all references are gone.
2970 * This is called when unregistering network devices.
2972 * Any protocol or device that holds a reference should register
2973 * for netdevice notification, and cleanup and put back the
2974 * reference if they receive an UNREGISTER event.
2975 * We can get stuck here if buggy protocols don't correctly
2976 * call dev_put.
2977 */
2978 static void netdev_wait_allrefs(struct net_device *dev)
2980 unsigned long rebroadcast_time, warning_time;
2982 rebroadcast_time = warning_time = jiffies;
2983 while (atomic_read(&dev->refcnt) != 0) {
2984 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
2985 rtnl_shlock();
2987 /* Rebroadcast unregister notification */
2988 notifier_call_chain(&netdev_chain,
2989 NETDEV_UNREGISTER, dev);
2991 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
2992 &dev->state)) {
2993 /* We must not have linkwatch events
2994 * pending on unregister. If this
2995 * happens, we simply run the queue
2996 * unscheduled, resulting in a noop
2997 * for this device.
2998 */
2999 linkwatch_run_queue();
3002 rtnl_shunlock();
3004 rebroadcast_time = jiffies;
3007 msleep(250);
3009 if (time_after(jiffies, warning_time + 10 * HZ)) {
3010 printk(KERN_EMERG "unregister_netdevice: "
3011 "waiting for %s to become free. Usage "
3012 "count = %d\n",
3013 dev->name, atomic_read(&dev->refcnt));
3014 warning_time = jiffies;
3019 /* The sequence is:
3021 * rtnl_lock();
3022 * ...
3023 * register_netdevice(x1);
3024 * register_netdevice(x2);
3025 * ...
3026 * unregister_netdevice(y1);
3027 * unregister_netdevice(y2);
3028 * ...
3029 * rtnl_unlock();
3030 * free_netdev(y1);
3031 * free_netdev(y2);
3033 * We are invoked by rtnl_unlock() after it drops the semaphore.
3034 * This allows us to deal with problems:
3035 * 1) We can create/delete sysfs objects which invoke hotplug
3036 * without deadlocking with linkwatch via keventd.
3037 * 2) Since we run with the RTNL semaphore not held, we can sleep
3038 * safely in order to wait for the netdev refcnt to drop to zero.
3039 */
3040 static DECLARE_MUTEX(net_todo_run_mutex);
3041 void netdev_run_todo(void)
3043 struct list_head list = LIST_HEAD_INIT(list);
3044 int err;
3047 /* Need to guard against multiple cpu's getting out of order. */
3048 down(&net_todo_run_mutex);
3050 /* Not safe to do outside the semaphore. We must not return
3051 * until all unregister events invoked by the local processor
3052 * have been completed (either by this todo run, or one on
3053 * another cpu).
3054 */
3055 if (list_empty(&net_todo_list))
3056 goto out;
3058 /* Snapshot list, allow later requests */
3059 spin_lock(&net_todo_list_lock);
3060 list_splice_init(&net_todo_list, &list);
3061 spin_unlock(&net_todo_list_lock);
3063 while (!list_empty(&list)) {
3064 struct net_device *dev
3065 = list_entry(list.next, struct net_device, todo_list);
3066 list_del(&dev->todo_list);
3068 switch(dev->reg_state) {
3069 case NETREG_REGISTERING:
3070 dev->reg_state = NETREG_REGISTERED;
3071 err = netdev_register_sysfs(dev);
3072 if (err)
3073 printk(KERN_ERR "%s: failed sysfs registration (%d)\n",
3074 dev->name, err);
3075 break;
3077 case NETREG_UNREGISTERING:
3078 netdev_unregister_sysfs(dev);
3079 dev->reg_state = NETREG_UNREGISTERED;
3081 netdev_wait_allrefs(dev);
3083 /* paranoia */
3084 BUG_ON(atomic_read(&dev->refcnt));
3085 BUG_TRAP(!dev->ip_ptr);
3086 BUG_TRAP(!dev->ip6_ptr);
3087 BUG_TRAP(!dev->dn_ptr);
3090 /* It must be the very last action,
3091 * after this 'dev' may point to freed up memory.
3092 */
3093 if (dev->destructor)
3094 dev->destructor(dev);
3095 break;
3097 default:
3098 printk(KERN_ERR "network todo '%s' but state %d\n",
3099 dev->name, dev->reg_state);
3100 break;
3104 out:
3105 up(&net_todo_run_mutex);
3108 /**
3109 * alloc_netdev - allocate network device
3110 * @sizeof_priv: size of private data to allocate space for
3111 * @name: device name format string
3112 * @setup: callback to initialize device
3114 * Allocates a struct net_device with private data area for driver use
3115 * and performs basic initialization.
3116 */
3117 struct net_device *alloc_netdev(int sizeof_priv, const char *name,
3118 void (*setup)(struct net_device *))
3120 void *p;
3121 struct net_device *dev;
3122 int alloc_size;
3124 /* ensure 32-byte alignment of both the device and private area */
3125 alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
3126 alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
3128 p = kmalloc(alloc_size, GFP_KERNEL);
3129 if (!p) {
3130 printk(KERN_ERR "alloc_dev: Unable to allocate device.\n");
3131 return NULL;
3133 memset(p, 0, alloc_size);
3135 dev = (struct net_device *)
3136 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
3137 dev->padded = (char *)dev - (char *)p;
3139 if (sizeof_priv)
3140 dev->priv = netdev_priv(dev);
3142 setup(dev);
3143 strcpy(dev->name, name);
3144 return dev;
3146 EXPORT_SYMBOL(alloc_netdev);
3148 /**
3149 * free_netdev - free network device
3150 * @dev: device
3152 * This function does the last stage of destroying an allocated device
3153 * interface. The reference to the device object is released.
3154 * If this is the last reference then it will be freed.
3155 */
3156 void free_netdev(struct net_device *dev)
3158 #ifdef CONFIG_SYSFS
3159 /* Compatiablity with error handling in drivers */
3160 if (dev->reg_state == NETREG_UNINITIALIZED) {
3161 kfree((char *)dev - dev->padded);
3162 return;
3165 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
3166 dev->reg_state = NETREG_RELEASED;
3168 /* will free via class release */
3169 class_device_put(&dev->class_dev);
3170 #else
3171 kfree((char *)dev - dev->padded);
3172 #endif
3175 /* Synchronize with packet receive processing. */
3176 void synchronize_net(void)
3178 might_sleep();
3179 synchronize_rcu();
3182 /**
3183 * unregister_netdevice - remove device from the kernel
3184 * @dev: device
3186 * This function shuts down a device interface and removes it
3187 * from the kernel tables. On success 0 is returned, on a failure
3188 * a negative errno code is returned.
3190 * Callers must hold the rtnl semaphore. You may want
3191 * unregister_netdev() instead of this.
3192 */
3194 int unregister_netdevice(struct net_device *dev)
3196 struct net_device *d, **dp;
3198 BUG_ON(dev_boot_phase);
3199 ASSERT_RTNL();
3201 /* Some devices call without registering for initialization unwind. */
3202 if (dev->reg_state == NETREG_UNINITIALIZED) {
3203 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3204 "was registered\n", dev->name, dev);
3205 return -ENODEV;
3208 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3210 /* If device is running, close it first. */
3211 if (dev->flags & IFF_UP)
3212 dev_close(dev);
3214 /* And unlink it from device chain. */
3215 for (dp = &dev_base; (d = *dp) != NULL; dp = &d->next) {
3216 if (d == dev) {
3217 write_lock_bh(&dev_base_lock);
3218 hlist_del(&dev->name_hlist);
3219 hlist_del(&dev->index_hlist);
3220 if (dev_tail == &dev->next)
3221 dev_tail = dp;
3222 *dp = d->next;
3223 write_unlock_bh(&dev_base_lock);
3224 break;
3227 if (!d) {
3228 printk(KERN_ERR "unregister net_device: '%s' not found\n",
3229 dev->name);
3230 return -ENODEV;
3233 dev->reg_state = NETREG_UNREGISTERING;
3235 synchronize_net();
3237 /* Shutdown queueing discipline. */
3238 dev_shutdown(dev);
3241 /* Notify protocols, that we are about to destroy
3242 this device. They should clean all the things.
3243 */
3244 notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
3246 /*
3247 * Flush the multicast chain
3248 */
3249 dev_mc_discard(dev);
3251 if (dev->uninit)
3252 dev->uninit(dev);
3254 /* Notifier chain MUST detach us from master device. */
3255 BUG_TRAP(!dev->master);
3257 free_divert_blk(dev);
3259 /* Finish processing unregister after unlock */
3260 net_set_todo(dev);
3262 synchronize_net();
3264 dev_put(dev);
3265 return 0;
3268 /**
3269 * unregister_netdev - remove device from the kernel
3270 * @dev: device
3272 * This function shuts down a device interface and removes it
3273 * from the kernel tables. On success 0 is returned, on a failure
3274 * a negative errno code is returned.
3276 * This is just a wrapper for unregister_netdevice that takes
3277 * the rtnl semaphore. In general you want to use this and not
3278 * unregister_netdevice.
3279 */
3280 void unregister_netdev(struct net_device *dev)
3282 rtnl_lock();
3283 unregister_netdevice(dev);
3284 rtnl_unlock();
3287 EXPORT_SYMBOL(unregister_netdev);
3289 #ifdef CONFIG_HOTPLUG_CPU
3290 static int dev_cpu_callback(struct notifier_block *nfb,
3291 unsigned long action,
3292 void *ocpu)
3294 struct sk_buff **list_skb;
3295 struct net_device **list_net;
3296 struct sk_buff *skb;
3297 unsigned int cpu, oldcpu = (unsigned long)ocpu;
3298 struct softnet_data *sd, *oldsd;
3300 if (action != CPU_DEAD)
3301 return NOTIFY_OK;
3303 local_irq_disable();
3304 cpu = smp_processor_id();
3305 sd = &per_cpu(softnet_data, cpu);
3306 oldsd = &per_cpu(softnet_data, oldcpu);
3308 /* Find end of our completion_queue. */
3309 list_skb = &sd->completion_queue;
3310 while (*list_skb)
3311 list_skb = &(*list_skb)->next;
3312 /* Append completion queue from offline CPU. */
3313 *list_skb = oldsd->completion_queue;
3314 oldsd->completion_queue = NULL;
3316 /* Find end of our output_queue. */
3317 list_net = &sd->output_queue;
3318 while (*list_net)
3319 list_net = &(*list_net)->next_sched;
3320 /* Append output queue from offline CPU. */
3321 *list_net = oldsd->output_queue;
3322 oldsd->output_queue = NULL;
3324 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3325 local_irq_enable();
3327 /* Process offline CPU's input_pkt_queue */
3328 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
3329 netif_rx(skb);
3331 return NOTIFY_OK;
3333 #endif /* CONFIG_HOTPLUG_CPU */
3336 /*
3337 * Initialize the DEV module. At boot time this walks the device list and
3338 * unhooks any devices that fail to initialise (normally hardware not
3339 * present) and leaves us with a valid list of present and active devices.
3341 */
3343 /*
3344 * This is called single threaded during boot, so no need
3345 * to take the rtnl semaphore.
3346 */
3347 static int __init net_dev_init(void)
3349 int i, rc = -ENOMEM;
3351 BUG_ON(!dev_boot_phase);
3353 net_random_init();
3355 if (dev_proc_init())
3356 goto out;
3358 if (netdev_sysfs_init())
3359 goto out;
3361 INIT_LIST_HEAD(&ptype_all);
3362 for (i = 0; i < 16; i++)
3363 INIT_LIST_HEAD(&ptype_base[i]);
3365 for (i = 0; i < ARRAY_SIZE(dev_name_head); i++)
3366 INIT_HLIST_HEAD(&dev_name_head[i]);
3368 for (i = 0; i < ARRAY_SIZE(dev_index_head); i++)
3369 INIT_HLIST_HEAD(&dev_index_head[i]);
3371 /*
3372 * Initialise the packet receive queues.
3373 */
3375 for_each_cpu(i) {
3376 struct softnet_data *queue;
3378 queue = &per_cpu(softnet_data, i);
3379 skb_queue_head_init(&queue->input_pkt_queue);
3380 queue->completion_queue = NULL;
3381 INIT_LIST_HEAD(&queue->poll_list);
3382 set_bit(__LINK_STATE_START, &queue->backlog_dev.state);
3383 queue->backlog_dev.weight = weight_p;
3384 queue->backlog_dev.poll = process_backlog;
3385 atomic_set(&queue->backlog_dev.refcnt, 1);
3388 dev_boot_phase = 0;
3390 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
3391 open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
3393 hotcpu_notifier(dev_cpu_callback, 0);
3394 dst_init();
3395 dev_mcast_init();
3396 rc = 0;
3397 out:
3398 return rc;
3401 subsys_initcall(net_dev_init);
3403 EXPORT_SYMBOL(__dev_get_by_index);
3404 EXPORT_SYMBOL(__dev_get_by_name);
3405 EXPORT_SYMBOL(__dev_remove_pack);
3406 EXPORT_SYMBOL(dev_valid_name);
3407 EXPORT_SYMBOL(dev_add_pack);
3408 EXPORT_SYMBOL(dev_alloc_name);
3409 EXPORT_SYMBOL(dev_close);
3410 EXPORT_SYMBOL(dev_get_by_flags);
3411 EXPORT_SYMBOL(dev_get_by_index);
3412 EXPORT_SYMBOL(dev_get_by_name);
3413 EXPORT_SYMBOL(dev_open);
3414 EXPORT_SYMBOL(dev_queue_xmit);
3415 EXPORT_SYMBOL(dev_remove_pack);
3416 EXPORT_SYMBOL(dev_set_allmulti);
3417 EXPORT_SYMBOL(dev_set_promiscuity);
3418 EXPORT_SYMBOL(dev_change_flags);
3419 EXPORT_SYMBOL(dev_set_mtu);
3420 EXPORT_SYMBOL(dev_set_mac_address);
3421 EXPORT_SYMBOL(free_netdev);
3422 EXPORT_SYMBOL(netdev_boot_setup_check);
3423 EXPORT_SYMBOL(netdev_set_master);
3424 EXPORT_SYMBOL(netdev_state_change);
3425 EXPORT_SYMBOL(netif_receive_skb);
3426 EXPORT_SYMBOL(netif_rx);
3427 EXPORT_SYMBOL(register_gifconf);
3428 EXPORT_SYMBOL(register_netdevice);
3429 EXPORT_SYMBOL(register_netdevice_notifier);
3430 EXPORT_SYMBOL(skb_checksum_help);
3431 EXPORT_SYMBOL(synchronize_net);
3432 EXPORT_SYMBOL(unregister_netdevice);
3433 EXPORT_SYMBOL(unregister_netdevice_notifier);
3434 EXPORT_SYMBOL(net_enable_timestamp);
3435 EXPORT_SYMBOL(net_disable_timestamp);
3436 EXPORT_SYMBOL(dev_get_flags);
3437 EXPORT_SYMBOL(skb_checksum_setup);
3439 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
3440 EXPORT_SYMBOL(br_handle_frame_hook);
3441 EXPORT_SYMBOL(br_fdb_get_hook);
3442 EXPORT_SYMBOL(br_fdb_put_hook);
3443 #endif
3445 #ifdef CONFIG_KMOD
3446 EXPORT_SYMBOL(dev_load);
3447 #endif
3449 EXPORT_PER_CPU_SYMBOL(softnet_data);