2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/mutex.h>
84 #include <linux/string.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/ethtool.h>
94 #include <linux/notifier.h>
95 #include <linux/skbuff.h>
96 #include <net/net_namespace.h>
98 #include <linux/rtnetlink.h>
99 #include <linux/proc_fs.h>
100 #include <linux/seq_file.h>
101 #include <linux/stat.h>
102 #include <linux/if_bridge.h>
103 #include <linux/if_macvlan.h>
105 #include <net/pkt_sched.h>
106 #include <net/checksum.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/kallsyms.h>
112 #include <linux/netpoll.h>
113 #include <linux/rcupdate.h>
114 #include <linux/delay.h>
115 #include <net/wext.h>
116 #include <net/iw_handler.h>
117 #include <asm/current.h>
118 #include <linux/audit.h>
119 #include <linux/dmaengine.h>
120 #include <linux/err.h>
121 #include <linux/ctype.h>
122 #include <linux/if_arp.h>
123 #include <linux/if_vlan.h>
124 #include <linux/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
130 #include "net-sysfs.h"
133 * The list of packet types we will receive (as opposed to discard)
134 * and the routines to invoke.
136 * Why 16. Because with 16 the only overlap we get on a hash of the
137 * low nibble of the protocol value is RARP/SNAP/X.25.
139 * NOTE: That is no longer true with the addition of VLAN tags. Not
140 * sure which should go first, but I bet it won't make much
141 * difference if we are running VLANs. The good news is that
142 * this protocol won't be in the list unless compiled in, so
143 * the average user (w/out VLANs) will not be adversely affected.
160 #define PTYPE_HASH_SIZE (16)
161 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
163 static DEFINE_SPINLOCK(ptype_lock);
164 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
165 static struct list_head ptype_all __read_mostly; /* Taps */
167 #ifdef CONFIG_NET_DMA
169 struct dma_client client;
171 cpumask_t channel_mask;
172 struct dma_chan **channels;
175 static enum dma_state_client
176 netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
177 enum dma_state state);
179 static struct net_dma net_dma = {
181 .event_callback = netdev_dma_event,
187 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
190 * Pure readers hold dev_base_lock for reading.
192 * Writers must hold the rtnl semaphore while they loop through the
193 * dev_base_head list, and hold dev_base_lock for writing when they do the
194 * actual updates. This allows pure readers to access the list even
195 * while a writer is preparing to update it.
197 * To put it another way, dev_base_lock is held for writing only to
198 * protect against pure readers; the rtnl semaphore provides the
199 * protection against other writers.
201 * See, for example usages, register_netdevice() and
202 * unregister_netdevice(), which must be called with the rtnl
205 DEFINE_RWLOCK(dev_base_lock);
207 EXPORT_SYMBOL(dev_base_lock);
209 #define NETDEV_HASHBITS 8
210 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
212 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
214 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
215 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
218 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
220 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
223 /* Device list insertion */
224 static int list_netdevice(struct net_device *dev)
226 struct net *net = dev_net(dev);
230 write_lock_bh(&dev_base_lock);
231 list_add_tail(&dev->dev_list, &net->dev_base_head);
232 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
233 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
234 write_unlock_bh(&dev_base_lock);
238 /* Device list removal */
239 static void unlist_netdevice(struct net_device *dev)
243 /* Unlink dev from the device chain */
244 write_lock_bh(&dev_base_lock);
245 list_del(&dev->dev_list);
246 hlist_del(&dev->name_hlist);
247 hlist_del(&dev->index_hlist);
248 write_unlock_bh(&dev_base_lock);
255 static RAW_NOTIFIER_HEAD(netdev_chain);
258 * Device drivers call our routines to queue packets here. We empty the
259 * queue in the local softnet handler.
262 DEFINE_PER_CPU(struct softnet_data, softnet_data);
264 #ifdef CONFIG_LOCKDEP
266 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
267 * according to dev->type
269 static const unsigned short netdev_lock_type[] =
270 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
271 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
272 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
273 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
274 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
275 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
276 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
277 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
278 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
279 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
280 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
281 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
282 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
283 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
286 static const char *netdev_lock_name[] =
287 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
288 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
289 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
290 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
291 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
292 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
293 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
294 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
295 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
296 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
297 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
298 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
299 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
300 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
303 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
304 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
306 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
310 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
311 if (netdev_lock_type[i] == dev_type)
313 /* the last key is used by default */
314 return ARRAY_SIZE(netdev_lock_type) - 1;
317 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
318 unsigned short dev_type)
322 i = netdev_lock_pos(dev_type);
323 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
324 netdev_lock_name[i]);
327 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
331 i = netdev_lock_pos(dev->type);
332 lockdep_set_class_and_name(&dev->addr_list_lock,
333 &netdev_addr_lock_key[i],
334 netdev_lock_name[i]);
337 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
338 unsigned short dev_type)
341 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
346 /*******************************************************************************
348 Protocol management and registration routines
350 *******************************************************************************/
353 * Add a protocol ID to the list. Now that the input handler is
354 * smarter we can dispense with all the messy stuff that used to be
357 * BEWARE!!! Protocol handlers, mangling input packets,
358 * MUST BE last in hash buckets and checking protocol handlers
359 * MUST start from promiscuous ptype_all chain in net_bh.
360 * It is true now, do not change it.
361 * Explanation follows: if protocol handler, mangling packet, will
362 * be the first on list, it is not able to sense, that packet
363 * is cloned and should be copied-on-write, so that it will
364 * change it and subsequent readers will get broken packet.
369 * dev_add_pack - add packet handler
370 * @pt: packet type declaration
372 * Add a protocol handler to the networking stack. The passed &packet_type
373 * is linked into kernel lists and may not be freed until it has been
374 * removed from the kernel lists.
376 * This call does not sleep therefore it can not
377 * guarantee all CPU's that are in middle of receiving packets
378 * will see the new packet type (until the next received packet).
381 void dev_add_pack(struct packet_type *pt)
385 spin_lock_bh(&ptype_lock);
386 if (pt->type == htons(ETH_P_ALL))
387 list_add_rcu(&pt->list, &ptype_all);
389 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
390 list_add_rcu(&pt->list, &ptype_base[hash]);
392 spin_unlock_bh(&ptype_lock);
396 * __dev_remove_pack - remove packet handler
397 * @pt: packet type declaration
399 * Remove a protocol handler that was previously added to the kernel
400 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
401 * from the kernel lists and can be freed or reused once this function
404 * The packet type might still be in use by receivers
405 * and must not be freed until after all the CPU's have gone
406 * through a quiescent state.
408 void __dev_remove_pack(struct packet_type *pt)
410 struct list_head *head;
411 struct packet_type *pt1;
413 spin_lock_bh(&ptype_lock);
415 if (pt->type == htons(ETH_P_ALL))
418 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
420 list_for_each_entry(pt1, head, list) {
422 list_del_rcu(&pt->list);
427 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
429 spin_unlock_bh(&ptype_lock);
432 * dev_remove_pack - remove packet handler
433 * @pt: packet type declaration
435 * Remove a protocol handler that was previously added to the kernel
436 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
437 * from the kernel lists and can be freed or reused once this function
440 * This call sleeps to guarantee that no CPU is looking at the packet
443 void dev_remove_pack(struct packet_type *pt)
445 __dev_remove_pack(pt);
450 /******************************************************************************
452 Device Boot-time Settings Routines
454 *******************************************************************************/
456 /* Boot time configuration table */
457 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
460 * netdev_boot_setup_add - add new setup entry
461 * @name: name of the device
462 * @map: configured settings for the device
464 * Adds new setup entry to the dev_boot_setup list. The function
465 * returns 0 on error and 1 on success. This is a generic routine to
468 static int netdev_boot_setup_add(char *name, struct ifmap *map)
470 struct netdev_boot_setup *s;
474 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
475 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
476 memset(s[i].name, 0, sizeof(s[i].name));
477 strlcpy(s[i].name, name, IFNAMSIZ);
478 memcpy(&s[i].map, map, sizeof(s[i].map));
483 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
487 * netdev_boot_setup_check - check boot time settings
488 * @dev: the netdevice
490 * Check boot time settings for the device.
491 * The found settings are set for the device to be used
492 * later in the device probing.
493 * Returns 0 if no settings found, 1 if they are.
495 int netdev_boot_setup_check(struct net_device *dev)
497 struct netdev_boot_setup *s = dev_boot_setup;
500 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
501 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
502 !strcmp(dev->name, s[i].name)) {
503 dev->irq = s[i].map.irq;
504 dev->base_addr = s[i].map.base_addr;
505 dev->mem_start = s[i].map.mem_start;
506 dev->mem_end = s[i].map.mem_end;
515 * netdev_boot_base - get address from boot time settings
516 * @prefix: prefix for network device
517 * @unit: id for network device
519 * Check boot time settings for the base address of device.
520 * The found settings are set for the device to be used
521 * later in the device probing.
522 * Returns 0 if no settings found.
524 unsigned long netdev_boot_base(const char *prefix, int unit)
526 const struct netdev_boot_setup *s = dev_boot_setup;
530 sprintf(name, "%s%d", prefix, unit);
533 * If device already registered then return base of 1
534 * to indicate not to probe for this interface
536 if (__dev_get_by_name(&init_net, name))
539 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
540 if (!strcmp(name, s[i].name))
541 return s[i].map.base_addr;
546 * Saves at boot time configured settings for any netdevice.
548 int __init netdev_boot_setup(char *str)
553 str = get_options(str, ARRAY_SIZE(ints), ints);
558 memset(&map, 0, sizeof(map));
562 map.base_addr = ints[2];
564 map.mem_start = ints[3];
566 map.mem_end = ints[4];
568 /* Add new entry to the list */
569 return netdev_boot_setup_add(str, &map);
572 __setup("netdev=", netdev_boot_setup);
574 /*******************************************************************************
576 Device Interface Subroutines
578 *******************************************************************************/
581 * __dev_get_by_name - find a device by its name
582 * @net: the applicable net namespace
583 * @name: name to find
585 * Find an interface by name. Must be called under RTNL semaphore
586 * or @dev_base_lock. If the name is found a pointer to the device
587 * is returned. If the name is not found then %NULL is returned. The
588 * reference counters are not incremented so the caller must be
589 * careful with locks.
592 struct net_device *__dev_get_by_name(struct net *net, const char *name)
594 struct hlist_node *p;
596 hlist_for_each(p, dev_name_hash(net, name)) {
597 struct net_device *dev
598 = hlist_entry(p, struct net_device, name_hlist);
599 if (!strncmp(dev->name, name, IFNAMSIZ))
606 * dev_get_by_name - find a device by its name
607 * @net: the applicable net namespace
608 * @name: name to find
610 * Find an interface by name. This can be called from any
611 * context and does its own locking. The returned handle has
612 * the usage count incremented and the caller must use dev_put() to
613 * release it when it is no longer needed. %NULL is returned if no
614 * matching device is found.
617 struct net_device *dev_get_by_name(struct net *net, const char *name)
619 struct net_device *dev;
621 read_lock(&dev_base_lock);
622 dev = __dev_get_by_name(net, name);
625 read_unlock(&dev_base_lock);
630 * __dev_get_by_index - find a device by its ifindex
631 * @net: the applicable net namespace
632 * @ifindex: index of device
634 * Search for an interface by index. Returns %NULL if the device
635 * is not found or a pointer to the device. The device has not
636 * had its reference counter increased so the caller must be careful
637 * about locking. The caller must hold either the RTNL semaphore
641 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
643 struct hlist_node *p;
645 hlist_for_each(p, dev_index_hash(net, ifindex)) {
646 struct net_device *dev
647 = hlist_entry(p, struct net_device, index_hlist);
648 if (dev->ifindex == ifindex)
656 * dev_get_by_index - find a device by its ifindex
657 * @net: the applicable net namespace
658 * @ifindex: index of device
660 * Search for an interface by index. Returns NULL if the device
661 * is not found or a pointer to the device. The device returned has
662 * had a reference added and the pointer is safe until the user calls
663 * dev_put to indicate they have finished with it.
666 struct net_device *dev_get_by_index(struct net *net, int ifindex)
668 struct net_device *dev;
670 read_lock(&dev_base_lock);
671 dev = __dev_get_by_index(net, ifindex);
674 read_unlock(&dev_base_lock);
679 * dev_getbyhwaddr - find a device by its hardware address
680 * @net: the applicable net namespace
681 * @type: media type of device
682 * @ha: hardware address
684 * Search for an interface by MAC address. Returns NULL if the device
685 * is not found or a pointer to the device. The caller must hold the
686 * rtnl semaphore. The returned device has not had its ref count increased
687 * and the caller must therefore be careful about locking
690 * If the API was consistent this would be __dev_get_by_hwaddr
693 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
695 struct net_device *dev;
699 for_each_netdev(net, dev)
700 if (dev->type == type &&
701 !memcmp(dev->dev_addr, ha, dev->addr_len))
707 EXPORT_SYMBOL(dev_getbyhwaddr);
709 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
711 struct net_device *dev;
714 for_each_netdev(net, dev)
715 if (dev->type == type)
721 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
723 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
725 struct net_device *dev;
728 dev = __dev_getfirstbyhwtype(net, type);
735 EXPORT_SYMBOL(dev_getfirstbyhwtype);
738 * dev_get_by_flags - find any device with given flags
739 * @net: the applicable net namespace
740 * @if_flags: IFF_* values
741 * @mask: bitmask of bits in if_flags to check
743 * Search for any interface with the given flags. Returns NULL if a device
744 * is not found or a pointer to the device. The device returned has
745 * had a reference added and the pointer is safe until the user calls
746 * dev_put to indicate they have finished with it.
749 struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
751 struct net_device *dev, *ret;
754 read_lock(&dev_base_lock);
755 for_each_netdev(net, dev) {
756 if (((dev->flags ^ if_flags) & mask) == 0) {
762 read_unlock(&dev_base_lock);
767 * dev_valid_name - check if name is okay for network device
770 * Network device names need to be valid file names to
771 * to allow sysfs to work. We also disallow any kind of
774 int dev_valid_name(const char *name)
778 if (strlen(name) >= IFNAMSIZ)
780 if (!strcmp(name, ".") || !strcmp(name, ".."))
784 if (*name == '/' || isspace(*name))
792 * __dev_alloc_name - allocate a name for a device
793 * @net: network namespace to allocate the device name in
794 * @name: name format string
795 * @buf: scratch buffer and result name string
797 * Passed a format string - eg "lt%d" it will try and find a suitable
798 * id. It scans list of devices to build up a free map, then chooses
799 * the first empty slot. The caller must hold the dev_base or rtnl lock
800 * while allocating the name and adding the device in order to avoid
802 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
803 * Returns the number of the unit assigned or a negative errno code.
806 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
810 const int max_netdevices = 8*PAGE_SIZE;
811 unsigned long *inuse;
812 struct net_device *d;
814 p = strnchr(name, IFNAMSIZ-1, '%');
817 * Verify the string as this thing may have come from
818 * the user. There must be either one "%d" and no other "%"
821 if (p[1] != 'd' || strchr(p + 2, '%'))
824 /* Use one page as a bit array of possible slots */
825 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
829 for_each_netdev(net, d) {
830 if (!sscanf(d->name, name, &i))
832 if (i < 0 || i >= max_netdevices)
835 /* avoid cases where sscanf is not exact inverse of printf */
836 snprintf(buf, IFNAMSIZ, name, i);
837 if (!strncmp(buf, d->name, IFNAMSIZ))
841 i = find_first_zero_bit(inuse, max_netdevices);
842 free_page((unsigned long) inuse);
845 snprintf(buf, IFNAMSIZ, name, i);
846 if (!__dev_get_by_name(net, buf))
849 /* It is possible to run out of possible slots
850 * when the name is long and there isn't enough space left
851 * for the digits, or if all bits are used.
857 * dev_alloc_name - allocate a name for a device
859 * @name: name format string
861 * Passed a format string - eg "lt%d" it will try and find a suitable
862 * id. It scans list of devices to build up a free map, then chooses
863 * the first empty slot. The caller must hold the dev_base or rtnl lock
864 * while allocating the name and adding the device in order to avoid
866 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
867 * Returns the number of the unit assigned or a negative errno code.
870 int dev_alloc_name(struct net_device *dev, const char *name)
876 BUG_ON(!dev_net(dev));
878 ret = __dev_alloc_name(net, name, buf);
880 strlcpy(dev->name, buf, IFNAMSIZ);
886 * dev_change_name - change name of a device
888 * @newname: name (or format string) must be at least IFNAMSIZ
890 * Change name of a device, can pass format strings "eth%d".
893 int dev_change_name(struct net_device *dev, char *newname)
895 char oldname[IFNAMSIZ];
901 BUG_ON(!dev_net(dev));
904 if (dev->flags & IFF_UP)
907 if (!dev_valid_name(newname))
910 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
913 memcpy(oldname, dev->name, IFNAMSIZ);
915 if (strchr(newname, '%')) {
916 err = dev_alloc_name(dev, newname);
919 strcpy(newname, dev->name);
921 else if (__dev_get_by_name(net, newname))
924 strlcpy(dev->name, newname, IFNAMSIZ);
927 err = device_rename(&dev->dev, dev->name);
929 memcpy(dev->name, oldname, IFNAMSIZ);
933 write_lock_bh(&dev_base_lock);
934 hlist_del(&dev->name_hlist);
935 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
936 write_unlock_bh(&dev_base_lock);
938 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
939 ret = notifier_to_errno(ret);
944 "%s: name change rollback failed: %d.\n",
948 memcpy(dev->name, oldname, IFNAMSIZ);
957 * netdev_features_change - device changes features
958 * @dev: device to cause notification
960 * Called to indicate a device has changed features.
962 void netdev_features_change(struct net_device *dev)
964 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
966 EXPORT_SYMBOL(netdev_features_change);
969 * netdev_state_change - device changes state
970 * @dev: device to cause notification
972 * Called to indicate a device has changed state. This function calls
973 * the notifier chains for netdev_chain and sends a NEWLINK message
974 * to the routing socket.
976 void netdev_state_change(struct net_device *dev)
978 if (dev->flags & IFF_UP) {
979 call_netdevice_notifiers(NETDEV_CHANGE, dev);
980 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
984 void netdev_bonding_change(struct net_device *dev)
986 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
988 EXPORT_SYMBOL(netdev_bonding_change);
991 * dev_load - load a network module
992 * @net: the applicable net namespace
993 * @name: name of interface
995 * If a network interface is not present and the process has suitable
996 * privileges this function loads the module. If module loading is not
997 * available in this kernel then it becomes a nop.
1000 void dev_load(struct net *net, const char *name)
1002 struct net_device *dev;
1004 read_lock(&dev_base_lock);
1005 dev = __dev_get_by_name(net, name);
1006 read_unlock(&dev_base_lock);
1008 if (!dev && capable(CAP_SYS_MODULE))
1009 request_module("%s", name);
1013 * dev_open - prepare an interface for use.
1014 * @dev: device to open
1016 * Takes a device from down to up state. The device's private open
1017 * function is invoked and then the multicast lists are loaded. Finally
1018 * the device is moved into the up state and a %NETDEV_UP message is
1019 * sent to the netdev notifier chain.
1021 * Calling this function on an active interface is a nop. On a failure
1022 * a negative errno code is returned.
1024 int dev_open(struct net_device *dev)
1034 if (dev->flags & IFF_UP)
1038 * Is it even present?
1040 if (!netif_device_present(dev))
1044 * Call device private open method
1046 set_bit(__LINK_STATE_START, &dev->state);
1048 if (dev->validate_addr)
1049 ret = dev->validate_addr(dev);
1051 if (!ret && dev->open)
1052 ret = dev->open(dev);
1055 * If it went open OK then:
1059 clear_bit(__LINK_STATE_START, &dev->state);
1064 dev->flags |= IFF_UP;
1067 * Initialize multicasting status
1069 dev_set_rx_mode(dev);
1072 * Wakeup transmit queue engine
1077 * ... and announce new interface.
1079 call_netdevice_notifiers(NETDEV_UP, dev);
1086 * dev_close - shutdown an interface.
1087 * @dev: device to shutdown
1089 * This function moves an active device into down state. A
1090 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1091 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1094 int dev_close(struct net_device *dev)
1100 if (!(dev->flags & IFF_UP))
1104 * Tell people we are going down, so that they can
1105 * prepare to death, when device is still operating.
1107 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1109 clear_bit(__LINK_STATE_START, &dev->state);
1111 /* Synchronize to scheduled poll. We cannot touch poll list,
1112 * it can be even on different cpu. So just clear netif_running().
1114 * dev->stop() will invoke napi_disable() on all of it's
1115 * napi_struct instances on this device.
1117 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1119 dev_deactivate(dev);
1122 * Call the device specific close. This cannot fail.
1123 * Only if device is UP
1125 * We allow it to be called even after a DETACH hot-plug
1132 * Device is now down.
1135 dev->flags &= ~IFF_UP;
1138 * Tell people we are down
1140 call_netdevice_notifiers(NETDEV_DOWN, dev);
1147 * dev_disable_lro - disable Large Receive Offload on a device
1150 * Disable Large Receive Offload (LRO) on a net device. Must be
1151 * called under RTNL. This is needed if received packets may be
1152 * forwarded to another interface.
1154 void dev_disable_lro(struct net_device *dev)
1156 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1157 dev->ethtool_ops->set_flags) {
1158 u32 flags = dev->ethtool_ops->get_flags(dev);
1159 if (flags & ETH_FLAG_LRO) {
1160 flags &= ~ETH_FLAG_LRO;
1161 dev->ethtool_ops->set_flags(dev, flags);
1164 WARN_ON(dev->features & NETIF_F_LRO);
1166 EXPORT_SYMBOL(dev_disable_lro);
1169 static int dev_boot_phase = 1;
1172 * Device change register/unregister. These are not inline or static
1173 * as we export them to the world.
1177 * register_netdevice_notifier - register a network notifier block
1180 * Register a notifier to be called when network device events occur.
1181 * The notifier passed is linked into the kernel structures and must
1182 * not be reused until it has been unregistered. A negative errno code
1183 * is returned on a failure.
1185 * When registered all registration and up events are replayed
1186 * to the new notifier to allow device to have a race free
1187 * view of the network device list.
1190 int register_netdevice_notifier(struct notifier_block *nb)
1192 struct net_device *dev;
1193 struct net_device *last;
1198 err = raw_notifier_chain_register(&netdev_chain, nb);
1204 for_each_netdev(net, dev) {
1205 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1206 err = notifier_to_errno(err);
1210 if (!(dev->flags & IFF_UP))
1213 nb->notifier_call(nb, NETDEV_UP, dev);
1224 for_each_netdev(net, dev) {
1228 if (dev->flags & IFF_UP) {
1229 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1230 nb->notifier_call(nb, NETDEV_DOWN, dev);
1232 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1236 raw_notifier_chain_unregister(&netdev_chain, nb);
1241 * unregister_netdevice_notifier - unregister a network notifier block
1244 * Unregister a notifier previously registered by
1245 * register_netdevice_notifier(). The notifier is unlinked into the
1246 * kernel structures and may then be reused. A negative errno code
1247 * is returned on a failure.
1250 int unregister_netdevice_notifier(struct notifier_block *nb)
1255 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1261 * call_netdevice_notifiers - call all network notifier blocks
1262 * @val: value passed unmodified to notifier function
1263 * @dev: net_device pointer passed unmodified to notifier function
1265 * Call all network notifier blocks. Parameters and return value
1266 * are as for raw_notifier_call_chain().
1269 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1271 return raw_notifier_call_chain(&netdev_chain, val, dev);
1274 /* When > 0 there are consumers of rx skb time stamps */
1275 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1277 void net_enable_timestamp(void)
1279 atomic_inc(&netstamp_needed);
1282 void net_disable_timestamp(void)
1284 atomic_dec(&netstamp_needed);
1287 static inline void net_timestamp(struct sk_buff *skb)
1289 if (atomic_read(&netstamp_needed))
1290 __net_timestamp(skb);
1292 skb->tstamp.tv64 = 0;
1296 * Support routine. Sends outgoing frames to any network
1297 * taps currently in use.
1300 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1302 struct packet_type *ptype;
1307 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1308 /* Never send packets back to the socket
1309 * they originated from - MvS (miquels@drinkel.ow.org)
1311 if ((ptype->dev == dev || !ptype->dev) &&
1312 (ptype->af_packet_priv == NULL ||
1313 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1314 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1318 /* skb->nh should be correctly
1319 set by sender, so that the second statement is
1320 just protection against buggy protocols.
1322 skb_reset_mac_header(skb2);
1324 if (skb_network_header(skb2) < skb2->data ||
1325 skb2->network_header > skb2->tail) {
1326 if (net_ratelimit())
1327 printk(KERN_CRIT "protocol %04x is "
1329 skb2->protocol, dev->name);
1330 skb_reset_network_header(skb2);
1333 skb2->transport_header = skb2->network_header;
1334 skb2->pkt_type = PACKET_OUTGOING;
1335 ptype->func(skb2, skb->dev, ptype, skb->dev);
1342 void __netif_schedule(struct Qdisc *q)
1344 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) {
1345 struct softnet_data *sd;
1346 unsigned long flags;
1348 local_irq_save(flags);
1349 sd = &__get_cpu_var(softnet_data);
1350 q->next_sched = sd->output_queue;
1351 sd->output_queue = q;
1352 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1353 local_irq_restore(flags);
1356 EXPORT_SYMBOL(__netif_schedule);
1358 void dev_kfree_skb_irq(struct sk_buff *skb)
1360 if (atomic_dec_and_test(&skb->users)) {
1361 struct softnet_data *sd;
1362 unsigned long flags;
1364 local_irq_save(flags);
1365 sd = &__get_cpu_var(softnet_data);
1366 skb->next = sd->completion_queue;
1367 sd->completion_queue = skb;
1368 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1369 local_irq_restore(flags);
1372 EXPORT_SYMBOL(dev_kfree_skb_irq);
1374 void dev_kfree_skb_any(struct sk_buff *skb)
1376 if (in_irq() || irqs_disabled())
1377 dev_kfree_skb_irq(skb);
1381 EXPORT_SYMBOL(dev_kfree_skb_any);
1385 * netif_device_detach - mark device as removed
1386 * @dev: network device
1388 * Mark device as removed from system and therefore no longer available.
1390 void netif_device_detach(struct net_device *dev)
1392 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1393 netif_running(dev)) {
1394 netif_stop_queue(dev);
1397 EXPORT_SYMBOL(netif_device_detach);
1400 * netif_device_attach - mark device as attached
1401 * @dev: network device
1403 * Mark device as attached from system and restart if needed.
1405 void netif_device_attach(struct net_device *dev)
1407 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1408 netif_running(dev)) {
1409 netif_wake_queue(dev);
1410 __netdev_watchdog_up(dev);
1413 EXPORT_SYMBOL(netif_device_attach);
1415 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1417 return ((features & NETIF_F_GEN_CSUM) ||
1418 ((features & NETIF_F_IP_CSUM) &&
1419 protocol == htons(ETH_P_IP)) ||
1420 ((features & NETIF_F_IPV6_CSUM) &&
1421 protocol == htons(ETH_P_IPV6)));
1424 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1426 if (can_checksum_protocol(dev->features, skb->protocol))
1429 if (skb->protocol == htons(ETH_P_8021Q)) {
1430 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1431 if (can_checksum_protocol(dev->features & dev->vlan_features,
1432 veh->h_vlan_encapsulated_proto))
1440 * Invalidate hardware checksum when packet is to be mangled, and
1441 * complete checksum manually on outgoing path.
1443 int skb_checksum_help(struct sk_buff *skb)
1446 int ret = 0, offset;
1448 if (skb->ip_summed == CHECKSUM_COMPLETE)
1449 goto out_set_summed;
1451 if (unlikely(skb_shinfo(skb)->gso_size)) {
1452 /* Let GSO fix up the checksum. */
1453 goto out_set_summed;
1456 offset = skb->csum_start - skb_headroom(skb);
1457 BUG_ON(offset >= skb_headlen(skb));
1458 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1460 offset += skb->csum_offset;
1461 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1463 if (skb_cloned(skb) &&
1464 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1465 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1470 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1472 skb->ip_summed = CHECKSUM_NONE;
1478 * skb_gso_segment - Perform segmentation on skb.
1479 * @skb: buffer to segment
1480 * @features: features for the output path (see dev->features)
1482 * This function segments the given skb and returns a list of segments.
1484 * It may return NULL if the skb requires no segmentation. This is
1485 * only possible when GSO is used for verifying header integrity.
1487 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1489 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1490 struct packet_type *ptype;
1491 __be16 type = skb->protocol;
1494 BUG_ON(skb_shinfo(skb)->frag_list);
1496 skb_reset_mac_header(skb);
1497 skb->mac_len = skb->network_header - skb->mac_header;
1498 __skb_pull(skb, skb->mac_len);
1500 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
1501 if (skb_header_cloned(skb) &&
1502 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1503 return ERR_PTR(err);
1507 list_for_each_entry_rcu(ptype,
1508 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1509 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1510 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1511 err = ptype->gso_send_check(skb);
1512 segs = ERR_PTR(err);
1513 if (err || skb_gso_ok(skb, features))
1515 __skb_push(skb, (skb->data -
1516 skb_network_header(skb)));
1518 segs = ptype->gso_segment(skb, features);
1524 __skb_push(skb, skb->data - skb_mac_header(skb));
1529 EXPORT_SYMBOL(skb_gso_segment);
1531 /* Take action when hardware reception checksum errors are detected. */
1533 void netdev_rx_csum_fault(struct net_device *dev)
1535 if (net_ratelimit()) {
1536 printk(KERN_ERR "%s: hw csum failure.\n",
1537 dev ? dev->name : "<unknown>");
1541 EXPORT_SYMBOL(netdev_rx_csum_fault);
1544 /* Actually, we should eliminate this check as soon as we know, that:
1545 * 1. IOMMU is present and allows to map all the memory.
1546 * 2. No high memory really exists on this machine.
1549 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1551 #ifdef CONFIG_HIGHMEM
1554 if (dev->features & NETIF_F_HIGHDMA)
1557 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1558 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1566 void (*destructor)(struct sk_buff *skb);
1569 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1571 static void dev_gso_skb_destructor(struct sk_buff *skb)
1573 struct dev_gso_cb *cb;
1576 struct sk_buff *nskb = skb->next;
1578 skb->next = nskb->next;
1581 } while (skb->next);
1583 cb = DEV_GSO_CB(skb);
1585 cb->destructor(skb);
1589 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1590 * @skb: buffer to segment
1592 * This function segments the given skb and stores the list of segments
1595 static int dev_gso_segment(struct sk_buff *skb)
1597 struct net_device *dev = skb->dev;
1598 struct sk_buff *segs;
1599 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1602 segs = skb_gso_segment(skb, features);
1604 /* Verifying header integrity only. */
1609 return PTR_ERR(segs);
1612 DEV_GSO_CB(skb)->destructor = skb->destructor;
1613 skb->destructor = dev_gso_skb_destructor;
1618 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1619 struct netdev_queue *txq)
1621 if (likely(!skb->next)) {
1622 if (!list_empty(&ptype_all))
1623 dev_queue_xmit_nit(skb, dev);
1625 if (netif_needs_gso(dev, skb)) {
1626 if (unlikely(dev_gso_segment(skb)))
1632 return dev->hard_start_xmit(skb, dev);
1637 struct sk_buff *nskb = skb->next;
1640 skb->next = nskb->next;
1642 rc = dev->hard_start_xmit(nskb, dev);
1644 nskb->next = skb->next;
1648 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1649 return NETDEV_TX_BUSY;
1650 } while (skb->next);
1652 skb->destructor = DEV_GSO_CB(skb)->destructor;
1659 static u32 simple_tx_hashrnd;
1660 static int simple_tx_hashrnd_initialized = 0;
1662 static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1664 u32 addr1, addr2, ports;
1668 if (unlikely(!simple_tx_hashrnd_initialized)) {
1669 get_random_bytes(&simple_tx_hashrnd, 4);
1670 simple_tx_hashrnd_initialized = 1;
1673 switch (skb->protocol) {
1674 case __constant_htons(ETH_P_IP):
1675 ip_proto = ip_hdr(skb)->protocol;
1676 addr1 = ip_hdr(skb)->saddr;
1677 addr2 = ip_hdr(skb)->daddr;
1678 ihl = ip_hdr(skb)->ihl;
1680 case __constant_htons(ETH_P_IPV6):
1681 ip_proto = ipv6_hdr(skb)->nexthdr;
1682 addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
1683 addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
1698 case IPPROTO_UDPLITE:
1699 ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
1707 hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
1709 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1712 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1713 struct sk_buff *skb)
1715 u16 queue_index = 0;
1717 if (dev->select_queue)
1718 queue_index = dev->select_queue(dev, skb);
1719 else if (dev->real_num_tx_queues > 1)
1720 queue_index = simple_tx_hash(dev, skb);
1722 skb_set_queue_mapping(skb, queue_index);
1723 return netdev_get_tx_queue(dev, queue_index);
1727 * dev_queue_xmit - transmit a buffer
1728 * @skb: buffer to transmit
1730 * Queue a buffer for transmission to a network device. The caller must
1731 * have set the device and priority and built the buffer before calling
1732 * this function. The function can be called from an interrupt.
1734 * A negative errno code is returned on a failure. A success does not
1735 * guarantee the frame will be transmitted as it may be dropped due
1736 * to congestion or traffic shaping.
1738 * -----------------------------------------------------------------------------------
1739 * I notice this method can also return errors from the queue disciplines,
1740 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1743 * Regardless of the return value, the skb is consumed, so it is currently
1744 * difficult to retry a send to this method. (You can bump the ref count
1745 * before sending to hold a reference for retry if you are careful.)
1747 * When calling this method, interrupts MUST be enabled. This is because
1748 * the BH enable code must have IRQs enabled so that it will not deadlock.
1751 int dev_queue_xmit(struct sk_buff *skb)
1753 struct net_device *dev = skb->dev;
1754 struct netdev_queue *txq;
1758 /* GSO will handle the following emulations directly. */
1759 if (netif_needs_gso(dev, skb))
1762 if (skb_shinfo(skb)->frag_list &&
1763 !(dev->features & NETIF_F_FRAGLIST) &&
1764 __skb_linearize(skb))
1767 /* Fragmented skb is linearized if device does not support SG,
1768 * or if at least one of fragments is in highmem and device
1769 * does not support DMA from it.
1771 if (skb_shinfo(skb)->nr_frags &&
1772 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1773 __skb_linearize(skb))
1776 /* If packet is not checksummed and device does not support
1777 * checksumming for this protocol, complete checksumming here.
1779 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1780 skb_set_transport_header(skb, skb->csum_start -
1782 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1787 /* Disable soft irqs for various locks below. Also
1788 * stops preemption for RCU.
1792 txq = dev_pick_tx(dev, skb);
1793 q = rcu_dereference(txq->qdisc);
1795 #ifdef CONFIG_NET_CLS_ACT
1796 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1799 spinlock_t *root_lock = qdisc_lock(q);
1801 spin_lock(root_lock);
1803 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1804 spin_unlock(root_lock);
1809 rc = qdisc_enqueue_root(skb, q);
1812 spin_unlock(root_lock);
1817 /* The device has no queue. Common case for software devices:
1818 loopback, all the sorts of tunnels...
1820 Really, it is unlikely that netif_tx_lock protection is necessary
1821 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1823 However, it is possible, that they rely on protection
1826 Check this and shot the lock. It is not prone from deadlocks.
1827 Either shot noqueue qdisc, it is even simpler 8)
1829 if (dev->flags & IFF_UP) {
1830 int cpu = smp_processor_id(); /* ok because BHs are off */
1832 if (txq->xmit_lock_owner != cpu) {
1834 HARD_TX_LOCK(dev, txq, cpu);
1836 if (!netif_tx_queue_stopped(txq)) {
1838 if (!dev_hard_start_xmit(skb, dev, txq)) {
1839 HARD_TX_UNLOCK(dev, txq);
1843 HARD_TX_UNLOCK(dev, txq);
1844 if (net_ratelimit())
1845 printk(KERN_CRIT "Virtual device %s asks to "
1846 "queue packet!\n", dev->name);
1848 /* Recursion is detected! It is possible,
1850 if (net_ratelimit())
1851 printk(KERN_CRIT "Dead loop on virtual device "
1852 "%s, fix it urgently!\n", dev->name);
1857 rcu_read_unlock_bh();
1863 rcu_read_unlock_bh();
1868 /*=======================================================================
1870 =======================================================================*/
1872 int netdev_max_backlog __read_mostly = 1000;
1873 int netdev_budget __read_mostly = 300;
1874 int weight_p __read_mostly = 64; /* old backlog weight */
1876 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1880 * netif_rx - post buffer to the network code
1881 * @skb: buffer to post
1883 * This function receives a packet from a device driver and queues it for
1884 * the upper (protocol) levels to process. It always succeeds. The buffer
1885 * may be dropped during processing for congestion control or by the
1889 * NET_RX_SUCCESS (no congestion)
1890 * NET_RX_DROP (packet was dropped)
1894 int netif_rx(struct sk_buff *skb)
1896 struct softnet_data *queue;
1897 unsigned long flags;
1899 /* if netpoll wants it, pretend we never saw it */
1900 if (netpoll_rx(skb))
1903 if (!skb->tstamp.tv64)
1907 * The code is rearranged so that the path is the most
1908 * short when CPU is congested, but is still operating.
1910 local_irq_save(flags);
1911 queue = &__get_cpu_var(softnet_data);
1913 __get_cpu_var(netdev_rx_stat).total++;
1914 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1915 if (queue->input_pkt_queue.qlen) {
1917 __skb_queue_tail(&queue->input_pkt_queue, skb);
1918 local_irq_restore(flags);
1919 return NET_RX_SUCCESS;
1922 napi_schedule(&queue->backlog);
1926 __get_cpu_var(netdev_rx_stat).dropped++;
1927 local_irq_restore(flags);
1933 int netif_rx_ni(struct sk_buff *skb)
1938 err = netif_rx(skb);
1939 if (local_softirq_pending())
1946 EXPORT_SYMBOL(netif_rx_ni);
1948 static void net_tx_action(struct softirq_action *h)
1950 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1952 if (sd->completion_queue) {
1953 struct sk_buff *clist;
1955 local_irq_disable();
1956 clist = sd->completion_queue;
1957 sd->completion_queue = NULL;
1961 struct sk_buff *skb = clist;
1962 clist = clist->next;
1964 WARN_ON(atomic_read(&skb->users));
1969 if (sd->output_queue) {
1972 local_irq_disable();
1973 head = sd->output_queue;
1974 sd->output_queue = NULL;
1978 struct Qdisc *q = head;
1979 spinlock_t *root_lock;
1981 head = head->next_sched;
1983 smp_mb__before_clear_bit();
1984 clear_bit(__QDISC_STATE_SCHED, &q->state);
1986 root_lock = qdisc_lock(q);
1987 if (spin_trylock(root_lock)) {
1989 spin_unlock(root_lock);
1991 __netif_schedule(q);
1997 static inline int deliver_skb(struct sk_buff *skb,
1998 struct packet_type *pt_prev,
1999 struct net_device *orig_dev)
2001 atomic_inc(&skb->users);
2002 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2005 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2006 /* These hooks defined here for ATM */
2008 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2009 unsigned char *addr);
2010 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
2013 * If bridge module is loaded call bridging hook.
2014 * returns NULL if packet was consumed.
2016 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2017 struct sk_buff *skb) __read_mostly;
2018 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2019 struct packet_type **pt_prev, int *ret,
2020 struct net_device *orig_dev)
2022 struct net_bridge_port *port;
2024 if (skb->pkt_type == PACKET_LOOPBACK ||
2025 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2029 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2033 return br_handle_frame_hook(port, skb);
2036 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2039 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2040 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2041 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2043 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2044 struct packet_type **pt_prev,
2046 struct net_device *orig_dev)
2048 if (skb->dev->macvlan_port == NULL)
2052 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2055 return macvlan_handle_frame_hook(skb);
2058 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2061 #ifdef CONFIG_NET_CLS_ACT
2062 /* TODO: Maybe we should just force sch_ingress to be compiled in
2063 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2064 * a compare and 2 stores extra right now if we dont have it on
2065 * but have CONFIG_NET_CLS_ACT
2066 * NOTE: This doesnt stop any functionality; if you dont have
2067 * the ingress scheduler, you just cant add policies on ingress.
2070 static int ing_filter(struct sk_buff *skb)
2072 struct net_device *dev = skb->dev;
2073 u32 ttl = G_TC_RTTL(skb->tc_verd);
2074 struct netdev_queue *rxq;
2075 int result = TC_ACT_OK;
2078 if (MAX_RED_LOOP < ttl++) {
2080 "Redir loop detected Dropping packet (%d->%d)\n",
2081 skb->iif, dev->ifindex);
2085 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2086 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2088 rxq = &dev->rx_queue;
2091 if (q != &noop_qdisc) {
2092 spin_lock(qdisc_lock(q));
2093 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2094 result = qdisc_enqueue_root(skb, q);
2095 spin_unlock(qdisc_lock(q));
2101 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2102 struct packet_type **pt_prev,
2103 int *ret, struct net_device *orig_dev)
2105 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2109 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2112 /* Huh? Why does turning on AF_PACKET affect this? */
2113 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2116 switch (ing_filter(skb)) {
2130 * netif_nit_deliver - deliver received packets to network taps
2133 * This function is used to deliver incoming packets to network
2134 * taps. It should be used when the normal netif_receive_skb path
2135 * is bypassed, for example because of VLAN acceleration.
2137 void netif_nit_deliver(struct sk_buff *skb)
2139 struct packet_type *ptype;
2141 if (list_empty(&ptype_all))
2144 skb_reset_network_header(skb);
2145 skb_reset_transport_header(skb);
2146 skb->mac_len = skb->network_header - skb->mac_header;
2149 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2150 if (!ptype->dev || ptype->dev == skb->dev)
2151 deliver_skb(skb, ptype, skb->dev);
2157 * netif_receive_skb - process receive buffer from network
2158 * @skb: buffer to process
2160 * netif_receive_skb() is the main receive data processing function.
2161 * It always succeeds. The buffer may be dropped during processing
2162 * for congestion control or by the protocol layers.
2164 * This function may only be called from softirq context and interrupts
2165 * should be enabled.
2167 * Return values (usually ignored):
2168 * NET_RX_SUCCESS: no congestion
2169 * NET_RX_DROP: packet was dropped
2171 int netif_receive_skb(struct sk_buff *skb)
2173 struct packet_type *ptype, *pt_prev;
2174 struct net_device *orig_dev;
2175 struct net_device *null_or_orig;
2176 int ret = NET_RX_DROP;
2179 /* if we've gotten here through NAPI, check netpoll */
2180 if (netpoll_receive_skb(skb))
2183 if (!skb->tstamp.tv64)
2187 skb->iif = skb->dev->ifindex;
2189 null_or_orig = NULL;
2190 orig_dev = skb->dev;
2191 if (orig_dev->master) {
2192 if (skb_bond_should_drop(skb))
2193 null_or_orig = orig_dev; /* deliver only exact match */
2195 skb->dev = orig_dev->master;
2198 __get_cpu_var(netdev_rx_stat).total++;
2200 skb_reset_network_header(skb);
2201 skb_reset_transport_header(skb);
2202 skb->mac_len = skb->network_header - skb->mac_header;
2208 /* Don't receive packets in an exiting network namespace */
2209 if (!net_alive(dev_net(skb->dev)))
2212 #ifdef CONFIG_NET_CLS_ACT
2213 if (skb->tc_verd & TC_NCLS) {
2214 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2219 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2220 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2221 ptype->dev == orig_dev) {
2223 ret = deliver_skb(skb, pt_prev, orig_dev);
2228 #ifdef CONFIG_NET_CLS_ACT
2229 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2235 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2238 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2242 type = skb->protocol;
2243 list_for_each_entry_rcu(ptype,
2244 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2245 if (ptype->type == type &&
2246 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2247 ptype->dev == orig_dev)) {
2249 ret = deliver_skb(skb, pt_prev, orig_dev);
2255 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2258 /* Jamal, now you will not able to escape explaining
2259 * me how you were going to use this. :-)
2269 /* Network device is going away, flush any packets still pending */
2270 static void flush_backlog(void *arg)
2272 struct net_device *dev = arg;
2273 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2274 struct sk_buff *skb, *tmp;
2276 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2277 if (skb->dev == dev) {
2278 __skb_unlink(skb, &queue->input_pkt_queue);
2283 static int process_backlog(struct napi_struct *napi, int quota)
2286 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2287 unsigned long start_time = jiffies;
2289 napi->weight = weight_p;
2291 struct sk_buff *skb;
2293 local_irq_disable();
2294 skb = __skb_dequeue(&queue->input_pkt_queue);
2296 __napi_complete(napi);
2302 netif_receive_skb(skb);
2303 } while (++work < quota && jiffies == start_time);
2309 * __napi_schedule - schedule for receive
2310 * @n: entry to schedule
2312 * The entry's receive function will be scheduled to run
2314 void __napi_schedule(struct napi_struct *n)
2316 unsigned long flags;
2318 local_irq_save(flags);
2319 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2320 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2321 local_irq_restore(flags);
2323 EXPORT_SYMBOL(__napi_schedule);
2326 static void net_rx_action(struct softirq_action *h)
2328 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2329 unsigned long start_time = jiffies;
2330 int budget = netdev_budget;
2333 local_irq_disable();
2335 while (!list_empty(list)) {
2336 struct napi_struct *n;
2339 /* If softirq window is exhuasted then punt.
2341 * Note that this is a slight policy change from the
2342 * previous NAPI code, which would allow up to 2
2343 * jiffies to pass before breaking out. The test
2344 * used to be "jiffies - start_time > 1".
2346 if (unlikely(budget <= 0 || jiffies != start_time))
2351 /* Even though interrupts have been re-enabled, this
2352 * access is safe because interrupts can only add new
2353 * entries to the tail of this list, and only ->poll()
2354 * calls can remove this head entry from the list.
2356 n = list_entry(list->next, struct napi_struct, poll_list);
2358 have = netpoll_poll_lock(n);
2362 /* This NAPI_STATE_SCHED test is for avoiding a race
2363 * with netpoll's poll_napi(). Only the entity which
2364 * obtains the lock and sees NAPI_STATE_SCHED set will
2365 * actually make the ->poll() call. Therefore we avoid
2366 * accidently calling ->poll() when NAPI is not scheduled.
2369 if (test_bit(NAPI_STATE_SCHED, &n->state))
2370 work = n->poll(n, weight);
2372 WARN_ON_ONCE(work > weight);
2376 local_irq_disable();
2378 /* Drivers must not modify the NAPI state if they
2379 * consume the entire weight. In such cases this code
2380 * still "owns" the NAPI instance and therefore can
2381 * move the instance around on the list at-will.
2383 if (unlikely(work == weight)) {
2384 if (unlikely(napi_disable_pending(n)))
2387 list_move_tail(&n->poll_list, list);
2390 netpoll_poll_unlock(have);
2395 #ifdef CONFIG_NET_DMA
2397 * There may not be any more sk_buffs coming right now, so push
2398 * any pending DMA copies to hardware
2400 if (!cpus_empty(net_dma.channel_mask)) {
2402 for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
2403 struct dma_chan *chan = net_dma.channels[chan_idx];
2405 dma_async_memcpy_issue_pending(chan);
2413 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2414 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2418 static gifconf_func_t * gifconf_list [NPROTO];
2421 * register_gifconf - register a SIOCGIF handler
2422 * @family: Address family
2423 * @gifconf: Function handler
2425 * Register protocol dependent address dumping routines. The handler
2426 * that is passed must not be freed or reused until it has been replaced
2427 * by another handler.
2429 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2431 if (family >= NPROTO)
2433 gifconf_list[family] = gifconf;
2439 * Map an interface index to its name (SIOCGIFNAME)
2443 * We need this ioctl for efficient implementation of the
2444 * if_indextoname() function required by the IPv6 API. Without
2445 * it, we would have to search all the interfaces to find a
2449 static int dev_ifname(struct net *net, struct ifreq __user *arg)
2451 struct net_device *dev;
2455 * Fetch the caller's info block.
2458 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2461 read_lock(&dev_base_lock);
2462 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
2464 read_unlock(&dev_base_lock);
2468 strcpy(ifr.ifr_name, dev->name);
2469 read_unlock(&dev_base_lock);
2471 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2477 * Perform a SIOCGIFCONF call. This structure will change
2478 * size eventually, and there is nothing I can do about it.
2479 * Thus we will need a 'compatibility mode'.
2482 static int dev_ifconf(struct net *net, char __user *arg)
2485 struct net_device *dev;
2492 * Fetch the caller's info block.
2495 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2502 * Loop over the interfaces, and write an info block for each.
2506 for_each_netdev(net, dev) {
2507 for (i = 0; i < NPROTO; i++) {
2508 if (gifconf_list[i]) {
2511 done = gifconf_list[i](dev, NULL, 0);
2513 done = gifconf_list[i](dev, pos + total,
2523 * All done. Write the updated control block back to the caller.
2525 ifc.ifc_len = total;
2528 * Both BSD and Solaris return 0 here, so we do too.
2530 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2533 #ifdef CONFIG_PROC_FS
2535 * This is invoked by the /proc filesystem handler to display a device
2538 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2539 __acquires(dev_base_lock)
2541 struct net *net = seq_file_net(seq);
2543 struct net_device *dev;
2545 read_lock(&dev_base_lock);
2547 return SEQ_START_TOKEN;
2550 for_each_netdev(net, dev)
2557 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2559 struct net *net = seq_file_net(seq);
2561 return v == SEQ_START_TOKEN ?
2562 first_net_device(net) : next_net_device((struct net_device *)v);
2565 void dev_seq_stop(struct seq_file *seq, void *v)
2566 __releases(dev_base_lock)
2568 read_unlock(&dev_base_lock);
2571 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2573 struct net_device_stats *stats = dev->get_stats(dev);
2575 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2576 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2577 dev->name, stats->rx_bytes, stats->rx_packets,
2579 stats->rx_dropped + stats->rx_missed_errors,
2580 stats->rx_fifo_errors,
2581 stats->rx_length_errors + stats->rx_over_errors +
2582 stats->rx_crc_errors + stats->rx_frame_errors,
2583 stats->rx_compressed, stats->multicast,
2584 stats->tx_bytes, stats->tx_packets,
2585 stats->tx_errors, stats->tx_dropped,
2586 stats->tx_fifo_errors, stats->collisions,
2587 stats->tx_carrier_errors +
2588 stats->tx_aborted_errors +
2589 stats->tx_window_errors +
2590 stats->tx_heartbeat_errors,
2591 stats->tx_compressed);
2595 * Called from the PROCfs module. This now uses the new arbitrary sized
2596 * /proc/net interface to create /proc/net/dev
2598 static int dev_seq_show(struct seq_file *seq, void *v)
2600 if (v == SEQ_START_TOKEN)
2601 seq_puts(seq, "Inter-| Receive "
2603 " face |bytes packets errs drop fifo frame "
2604 "compressed multicast|bytes packets errs "
2605 "drop fifo colls carrier compressed\n");
2607 dev_seq_printf_stats(seq, v);
2611 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2613 struct netif_rx_stats *rc = NULL;
2615 while (*pos < nr_cpu_ids)
2616 if (cpu_online(*pos)) {
2617 rc = &per_cpu(netdev_rx_stat, *pos);
2624 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2626 return softnet_get_online(pos);
2629 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2632 return softnet_get_online(pos);
2635 static void softnet_seq_stop(struct seq_file *seq, void *v)
2639 static int softnet_seq_show(struct seq_file *seq, void *v)
2641 struct netif_rx_stats *s = v;
2643 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2644 s->total, s->dropped, s->time_squeeze, 0,
2645 0, 0, 0, 0, /* was fastroute */
2650 static const struct seq_operations dev_seq_ops = {
2651 .start = dev_seq_start,
2652 .next = dev_seq_next,
2653 .stop = dev_seq_stop,
2654 .show = dev_seq_show,
2657 static int dev_seq_open(struct inode *inode, struct file *file)
2659 return seq_open_net(inode, file, &dev_seq_ops,
2660 sizeof(struct seq_net_private));
2663 static const struct file_operations dev_seq_fops = {
2664 .owner = THIS_MODULE,
2665 .open = dev_seq_open,
2667 .llseek = seq_lseek,
2668 .release = seq_release_net,
2671 static const struct seq_operations softnet_seq_ops = {
2672 .start = softnet_seq_start,
2673 .next = softnet_seq_next,
2674 .stop = softnet_seq_stop,
2675 .show = softnet_seq_show,
2678 static int softnet_seq_open(struct inode *inode, struct file *file)
2680 return seq_open(file, &softnet_seq_ops);
2683 static const struct file_operations softnet_seq_fops = {
2684 .owner = THIS_MODULE,
2685 .open = softnet_seq_open,
2687 .llseek = seq_lseek,
2688 .release = seq_release,
2691 static void *ptype_get_idx(loff_t pos)
2693 struct packet_type *pt = NULL;
2697 list_for_each_entry_rcu(pt, &ptype_all, list) {
2703 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
2704 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2713 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
2717 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2720 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2722 struct packet_type *pt;
2723 struct list_head *nxt;
2727 if (v == SEQ_START_TOKEN)
2728 return ptype_get_idx(0);
2731 nxt = pt->list.next;
2732 if (pt->type == htons(ETH_P_ALL)) {
2733 if (nxt != &ptype_all)
2736 nxt = ptype_base[0].next;
2738 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
2740 while (nxt == &ptype_base[hash]) {
2741 if (++hash >= PTYPE_HASH_SIZE)
2743 nxt = ptype_base[hash].next;
2746 return list_entry(nxt, struct packet_type, list);
2749 static void ptype_seq_stop(struct seq_file *seq, void *v)
2755 static void ptype_seq_decode(struct seq_file *seq, void *sym)
2757 #ifdef CONFIG_KALLSYMS
2758 unsigned long offset = 0, symsize;
2759 const char *symname;
2763 symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
2770 modname = delim = "";
2771 seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
2777 seq_printf(seq, "[%p]", sym);
2780 static int ptype_seq_show(struct seq_file *seq, void *v)
2782 struct packet_type *pt = v;
2784 if (v == SEQ_START_TOKEN)
2785 seq_puts(seq, "Type Device Function\n");
2786 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
2787 if (pt->type == htons(ETH_P_ALL))
2788 seq_puts(seq, "ALL ");
2790 seq_printf(seq, "%04x", ntohs(pt->type));
2792 seq_printf(seq, " %-8s ",
2793 pt->dev ? pt->dev->name : "");
2794 ptype_seq_decode(seq, pt->func);
2795 seq_putc(seq, '\n');
2801 static const struct seq_operations ptype_seq_ops = {
2802 .start = ptype_seq_start,
2803 .next = ptype_seq_next,
2804 .stop = ptype_seq_stop,
2805 .show = ptype_seq_show,
2808 static int ptype_seq_open(struct inode *inode, struct file *file)
2810 return seq_open_net(inode, file, &ptype_seq_ops,
2811 sizeof(struct seq_net_private));
2814 static const struct file_operations ptype_seq_fops = {
2815 .owner = THIS_MODULE,
2816 .open = ptype_seq_open,
2818 .llseek = seq_lseek,
2819 .release = seq_release_net,
2823 static int __net_init dev_proc_net_init(struct net *net)
2827 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
2829 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
2831 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
2834 if (wext_proc_init(net))
2840 proc_net_remove(net, "ptype");
2842 proc_net_remove(net, "softnet_stat");
2844 proc_net_remove(net, "dev");
2848 static void __net_exit dev_proc_net_exit(struct net *net)
2850 wext_proc_exit(net);
2852 proc_net_remove(net, "ptype");
2853 proc_net_remove(net, "softnet_stat");
2854 proc_net_remove(net, "dev");
2857 static struct pernet_operations __net_initdata dev_proc_ops = {
2858 .init = dev_proc_net_init,
2859 .exit = dev_proc_net_exit,
2862 static int __init dev_proc_init(void)
2864 return register_pernet_subsys(&dev_proc_ops);
2867 #define dev_proc_init() 0
2868 #endif /* CONFIG_PROC_FS */
2872 * netdev_set_master - set up master/slave pair
2873 * @slave: slave device
2874 * @master: new master device
2876 * Changes the master device of the slave. Pass %NULL to break the
2877 * bonding. The caller must hold the RTNL semaphore. On a failure
2878 * a negative errno code is returned. On success the reference counts
2879 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2880 * function returns zero.
2882 int netdev_set_master(struct net_device *slave, struct net_device *master)
2884 struct net_device *old = slave->master;
2894 slave->master = master;
2902 slave->flags |= IFF_SLAVE;
2904 slave->flags &= ~IFF_SLAVE;
2906 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2910 static int __dev_set_promiscuity(struct net_device *dev, int inc)
2912 unsigned short old_flags = dev->flags;
2916 dev->flags |= IFF_PROMISC;
2917 dev->promiscuity += inc;
2918 if (dev->promiscuity == 0) {
2921 * If inc causes overflow, untouch promisc and return error.
2924 dev->flags &= ~IFF_PROMISC;
2926 dev->promiscuity -= inc;
2927 printk(KERN_WARNING "%s: promiscuity touches roof, "
2928 "set promiscuity failed, promiscuity feature "
2929 "of device might be broken.\n", dev->name);
2933 if (dev->flags != old_flags) {
2934 printk(KERN_INFO "device %s %s promiscuous mode\n",
2935 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2938 audit_log(current->audit_context, GFP_ATOMIC,
2939 AUDIT_ANOM_PROMISCUOUS,
2940 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
2941 dev->name, (dev->flags & IFF_PROMISC),
2942 (old_flags & IFF_PROMISC),
2943 audit_get_loginuid(current),
2944 current->uid, current->gid,
2945 audit_get_sessionid(current));
2947 if (dev->change_rx_flags)
2948 dev->change_rx_flags(dev, IFF_PROMISC);
2954 * dev_set_promiscuity - update promiscuity count on a device
2958 * Add or remove promiscuity from a device. While the count in the device
2959 * remains above zero the interface remains promiscuous. Once it hits zero
2960 * the device reverts back to normal filtering operation. A negative inc
2961 * value is used to drop promiscuity on the device.
2962 * Return 0 if successful or a negative errno code on error.
2964 int dev_set_promiscuity(struct net_device *dev, int inc)
2966 unsigned short old_flags = dev->flags;
2969 err = __dev_set_promiscuity(dev, inc);
2972 if (dev->flags != old_flags)
2973 dev_set_rx_mode(dev);
2978 * dev_set_allmulti - update allmulti count on a device
2982 * Add or remove reception of all multicast frames to a device. While the
2983 * count in the device remains above zero the interface remains listening
2984 * to all interfaces. Once it hits zero the device reverts back to normal
2985 * filtering operation. A negative @inc value is used to drop the counter
2986 * when releasing a resource needing all multicasts.
2987 * Return 0 if successful or a negative errno code on error.
2990 int dev_set_allmulti(struct net_device *dev, int inc)
2992 unsigned short old_flags = dev->flags;
2996 dev->flags |= IFF_ALLMULTI;
2997 dev->allmulti += inc;
2998 if (dev->allmulti == 0) {
3001 * If inc causes overflow, untouch allmulti and return error.
3004 dev->flags &= ~IFF_ALLMULTI;
3006 dev->allmulti -= inc;
3007 printk(KERN_WARNING "%s: allmulti touches roof, "
3008 "set allmulti failed, allmulti feature of "
3009 "device might be broken.\n", dev->name);
3013 if (dev->flags ^ old_flags) {
3014 if (dev->change_rx_flags)
3015 dev->change_rx_flags(dev, IFF_ALLMULTI);
3016 dev_set_rx_mode(dev);
3022 * Upload unicast and multicast address lists to device and
3023 * configure RX filtering. When the device doesn't support unicast
3024 * filtering it is put in promiscuous mode while unicast addresses
3027 void __dev_set_rx_mode(struct net_device *dev)
3029 /* dev_open will call this function so the list will stay sane. */
3030 if (!(dev->flags&IFF_UP))
3033 if (!netif_device_present(dev))
3036 if (dev->set_rx_mode)
3037 dev->set_rx_mode(dev);
3039 /* Unicast addresses changes may only happen under the rtnl,
3040 * therefore calling __dev_set_promiscuity here is safe.
3042 if (dev->uc_count > 0 && !dev->uc_promisc) {
3043 __dev_set_promiscuity(dev, 1);
3044 dev->uc_promisc = 1;
3045 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3046 __dev_set_promiscuity(dev, -1);
3047 dev->uc_promisc = 0;
3050 if (dev->set_multicast_list)
3051 dev->set_multicast_list(dev);
3055 void dev_set_rx_mode(struct net_device *dev)
3057 netif_addr_lock_bh(dev);
3058 __dev_set_rx_mode(dev);
3059 netif_addr_unlock_bh(dev);
3062 int __dev_addr_delete(struct dev_addr_list **list, int *count,
3063 void *addr, int alen, int glbl)
3065 struct dev_addr_list *da;
3067 for (; (da = *list) != NULL; list = &da->next) {
3068 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3069 alen == da->da_addrlen) {
3071 int old_glbl = da->da_gusers;
3088 int __dev_addr_add(struct dev_addr_list **list, int *count,
3089 void *addr, int alen, int glbl)
3091 struct dev_addr_list *da;
3093 for (da = *list; da != NULL; da = da->next) {
3094 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3095 da->da_addrlen == alen) {
3097 int old_glbl = da->da_gusers;
3107 da = kzalloc(sizeof(*da), GFP_ATOMIC);
3110 memcpy(da->da_addr, addr, alen);
3111 da->da_addrlen = alen;
3113 da->da_gusers = glbl ? 1 : 0;
3121 * dev_unicast_delete - Release secondary unicast address.
3123 * @addr: address to delete
3124 * @alen: length of @addr
3126 * Release reference to a secondary unicast address and remove it
3127 * from the device if the reference count drops to zero.
3129 * The caller must hold the rtnl_mutex.
3131 int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3137 netif_addr_lock_bh(dev);
3138 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3140 __dev_set_rx_mode(dev);
3141 netif_addr_unlock_bh(dev);
3144 EXPORT_SYMBOL(dev_unicast_delete);
3147 * dev_unicast_add - add a secondary unicast address
3149 * @addr: address to add
3150 * @alen: length of @addr
3152 * Add a secondary unicast address to the device or increase
3153 * the reference count if it already exists.
3155 * The caller must hold the rtnl_mutex.
3157 int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3163 netif_addr_lock_bh(dev);
3164 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3166 __dev_set_rx_mode(dev);
3167 netif_addr_unlock_bh(dev);
3170 EXPORT_SYMBOL(dev_unicast_add);
3172 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3173 struct dev_addr_list **from, int *from_count)
3175 struct dev_addr_list *da, *next;
3179 while (da != NULL) {
3181 if (!da->da_synced) {
3182 err = __dev_addr_add(to, to_count,
3183 da->da_addr, da->da_addrlen, 0);
3188 } else if (da->da_users == 1) {
3189 __dev_addr_delete(to, to_count,
3190 da->da_addr, da->da_addrlen, 0);
3191 __dev_addr_delete(from, from_count,
3192 da->da_addr, da->da_addrlen, 0);
3199 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3200 struct dev_addr_list **from, int *from_count)
3202 struct dev_addr_list *da, *next;
3205 while (da != NULL) {
3207 if (da->da_synced) {
3208 __dev_addr_delete(to, to_count,
3209 da->da_addr, da->da_addrlen, 0);
3211 __dev_addr_delete(from, from_count,
3212 da->da_addr, da->da_addrlen, 0);
3219 * dev_unicast_sync - Synchronize device's unicast list to another device
3220 * @to: destination device
3221 * @from: source device
3223 * Add newly added addresses to the destination device and release
3224 * addresses that have no users left. The source device must be
3225 * locked by netif_tx_lock_bh.
3227 * This function is intended to be called from the dev->set_rx_mode
3228 * function of layered software devices.
3230 int dev_unicast_sync(struct net_device *to, struct net_device *from)
3234 netif_addr_lock_bh(to);
3235 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3236 &from->uc_list, &from->uc_count);
3238 __dev_set_rx_mode(to);
3239 netif_addr_unlock_bh(to);
3242 EXPORT_SYMBOL(dev_unicast_sync);
3245 * dev_unicast_unsync - Remove synchronized addresses from the destination device
3246 * @to: destination device
3247 * @from: source device
3249 * Remove all addresses that were added to the destination device by
3250 * dev_unicast_sync(). This function is intended to be called from the
3251 * dev->stop function of layered software devices.
3253 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3255 netif_addr_lock_bh(from);
3256 netif_addr_lock(to);
3258 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3259 &from->uc_list, &from->uc_count);
3260 __dev_set_rx_mode(to);
3262 netif_addr_unlock(to);
3263 netif_addr_unlock_bh(from);
3265 EXPORT_SYMBOL(dev_unicast_unsync);
3267 static void __dev_addr_discard(struct dev_addr_list **list)
3269 struct dev_addr_list *tmp;
3271 while (*list != NULL) {
3274 if (tmp->da_users > tmp->da_gusers)
3275 printk("__dev_addr_discard: address leakage! "
3276 "da_users=%d\n", tmp->da_users);
3281 static void dev_addr_discard(struct net_device *dev)
3283 netif_addr_lock_bh(dev);
3285 __dev_addr_discard(&dev->uc_list);
3288 __dev_addr_discard(&dev->mc_list);
3291 netif_addr_unlock_bh(dev);
3294 unsigned dev_get_flags(const struct net_device *dev)
3298 flags = (dev->flags & ~(IFF_PROMISC |
3303 (dev->gflags & (IFF_PROMISC |
3306 if (netif_running(dev)) {
3307 if (netif_oper_up(dev))
3308 flags |= IFF_RUNNING;
3309 if (netif_carrier_ok(dev))
3310 flags |= IFF_LOWER_UP;
3311 if (netif_dormant(dev))
3312 flags |= IFF_DORMANT;
3318 int dev_change_flags(struct net_device *dev, unsigned flags)
3321 int old_flags = dev->flags;
3326 * Set the flags on our device.
3329 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3330 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3332 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3336 * Load in the correct multicast list now the flags have changed.
3339 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
3340 dev->change_rx_flags(dev, IFF_MULTICAST);
3342 dev_set_rx_mode(dev);
3345 * Have we downed the interface. We handle IFF_UP ourselves
3346 * according to user attempts to set it, rather than blindly
3351 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3352 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3355 dev_set_rx_mode(dev);
3358 if (dev->flags & IFF_UP &&
3359 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3361 call_netdevice_notifiers(NETDEV_CHANGE, dev);
3363 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3364 int inc = (flags & IFF_PROMISC) ? +1 : -1;
3365 dev->gflags ^= IFF_PROMISC;
3366 dev_set_promiscuity(dev, inc);
3369 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3370 is important. Some (broken) drivers set IFF_PROMISC, when
3371 IFF_ALLMULTI is requested not asking us and not reporting.
3373 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3374 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3375 dev->gflags ^= IFF_ALLMULTI;
3376 dev_set_allmulti(dev, inc);
3379 /* Exclude state transition flags, already notified */
3380 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3382 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
3387 int dev_set_mtu(struct net_device *dev, int new_mtu)
3391 if (new_mtu == dev->mtu)
3394 /* MTU must be positive. */
3398 if (!netif_device_present(dev))
3402 if (dev->change_mtu)
3403 err = dev->change_mtu(dev, new_mtu);
3406 if (!err && dev->flags & IFF_UP)
3407 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
3411 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3415 if (!dev->set_mac_address)
3417 if (sa->sa_family != dev->type)
3419 if (!netif_device_present(dev))
3421 err = dev->set_mac_address(dev, sa);
3423 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3428 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
3430 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
3433 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3439 case SIOCGIFFLAGS: /* Get interface flags */
3440 ifr->ifr_flags = dev_get_flags(dev);
3443 case SIOCGIFMETRIC: /* Get the metric on the interface
3444 (currently unused) */
3445 ifr->ifr_metric = 0;
3448 case SIOCGIFMTU: /* Get the MTU of a device */
3449 ifr->ifr_mtu = dev->mtu;
3454 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3456 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3457 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3458 ifr->ifr_hwaddr.sa_family = dev->type;
3466 ifr->ifr_map.mem_start = dev->mem_start;
3467 ifr->ifr_map.mem_end = dev->mem_end;
3468 ifr->ifr_map.base_addr = dev->base_addr;
3469 ifr->ifr_map.irq = dev->irq;
3470 ifr->ifr_map.dma = dev->dma;
3471 ifr->ifr_map.port = dev->if_port;
3475 ifr->ifr_ifindex = dev->ifindex;
3479 ifr->ifr_qlen = dev->tx_queue_len;
3483 /* dev_ioctl() should ensure this case
3495 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3497 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3500 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3506 case SIOCSIFFLAGS: /* Set interface flags */
3507 return dev_change_flags(dev, ifr->ifr_flags);
3509 case SIOCSIFMETRIC: /* Set the metric on the interface
3510 (currently unused) */
3513 case SIOCSIFMTU: /* Set the MTU of a device */
3514 return dev_set_mtu(dev, ifr->ifr_mtu);
3517 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3519 case SIOCSIFHWBROADCAST:
3520 if (ifr->ifr_hwaddr.sa_family != dev->type)
3522 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3523 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3524 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3528 if (dev->set_config) {
3529 if (!netif_device_present(dev))
3531 return dev->set_config(dev, &ifr->ifr_map);
3536 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
3537 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3539 if (!netif_device_present(dev))
3541 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3545 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
3546 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3548 if (!netif_device_present(dev))
3550 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3554 if (ifr->ifr_qlen < 0)
3556 dev->tx_queue_len = ifr->ifr_qlen;
3560 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3561 return dev_change_name(dev, ifr->ifr_newname);
3564 * Unknown or private ioctl
3568 if ((cmd >= SIOCDEVPRIVATE &&
3569 cmd <= SIOCDEVPRIVATE + 15) ||
3570 cmd == SIOCBONDENSLAVE ||
3571 cmd == SIOCBONDRELEASE ||
3572 cmd == SIOCBONDSETHWADDR ||
3573 cmd == SIOCBONDSLAVEINFOQUERY ||
3574 cmd == SIOCBONDINFOQUERY ||
3575 cmd == SIOCBONDCHANGEACTIVE ||
3576 cmd == SIOCGMIIPHY ||
3577 cmd == SIOCGMIIREG ||
3578 cmd == SIOCSMIIREG ||
3579 cmd == SIOCBRADDIF ||
3580 cmd == SIOCBRDELIF ||
3581 cmd == SIOCWANDEV) {
3583 if (dev->do_ioctl) {
3584 if (netif_device_present(dev))
3585 err = dev->do_ioctl(dev, ifr,
3598 * This function handles all "interface"-type I/O control requests. The actual
3599 * 'doing' part of this is dev_ifsioc above.
3603 * dev_ioctl - network device ioctl
3604 * @net: the applicable net namespace
3605 * @cmd: command to issue
3606 * @arg: pointer to a struct ifreq in user space
3608 * Issue ioctl functions to devices. This is normally called by the
3609 * user space syscall interfaces but can sometimes be useful for
3610 * other purposes. The return value is the return from the syscall if
3611 * positive or a negative errno code on error.
3614 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3620 /* One special case: SIOCGIFCONF takes ifconf argument
3621 and requires shared lock, because it sleeps writing
3625 if (cmd == SIOCGIFCONF) {
3627 ret = dev_ifconf(net, (char __user *) arg);
3631 if (cmd == SIOCGIFNAME)
3632 return dev_ifname(net, (struct ifreq __user *)arg);
3634 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3637 ifr.ifr_name[IFNAMSIZ-1] = 0;
3639 colon = strchr(ifr.ifr_name, ':');
3644 * See which interface the caller is talking about.
3649 * These ioctl calls:
3650 * - can be done by all.
3651 * - atomic and do not require locking.
3662 dev_load(net, ifr.ifr_name);
3663 read_lock(&dev_base_lock);
3664 ret = dev_ifsioc_locked(net, &ifr, cmd);
3665 read_unlock(&dev_base_lock);
3669 if (copy_to_user(arg, &ifr,
3670 sizeof(struct ifreq)))
3676 dev_load(net, ifr.ifr_name);
3678 ret = dev_ethtool(net, &ifr);
3683 if (copy_to_user(arg, &ifr,
3684 sizeof(struct ifreq)))
3690 * These ioctl calls:
3691 * - require superuser power.
3692 * - require strict serialization.
3698 if (!capable(CAP_NET_ADMIN))
3700 dev_load(net, ifr.ifr_name);
3702 ret = dev_ifsioc(net, &ifr, cmd);
3707 if (copy_to_user(arg, &ifr,
3708 sizeof(struct ifreq)))
3714 * These ioctl calls:
3715 * - require superuser power.
3716 * - require strict serialization.
3717 * - do not return a value
3727 case SIOCSIFHWBROADCAST:
3730 case SIOCBONDENSLAVE:
3731 case SIOCBONDRELEASE:
3732 case SIOCBONDSETHWADDR:
3733 case SIOCBONDCHANGEACTIVE:
3736 if (!capable(CAP_NET_ADMIN))
3739 case SIOCBONDSLAVEINFOQUERY:
3740 case SIOCBONDINFOQUERY:
3741 dev_load(net, ifr.ifr_name);
3743 ret = dev_ifsioc(net, &ifr, cmd);
3748 /* Get the per device memory space. We can add this but
3749 * currently do not support it */
3751 /* Set the per device memory buffer space.
3752 * Not applicable in our case */
3757 * Unknown or private ioctl.
3760 if (cmd == SIOCWANDEV ||
3761 (cmd >= SIOCDEVPRIVATE &&
3762 cmd <= SIOCDEVPRIVATE + 15)) {
3763 dev_load(net, ifr.ifr_name);
3765 ret = dev_ifsioc(net, &ifr, cmd);
3767 if (!ret && copy_to_user(arg, &ifr,
3768 sizeof(struct ifreq)))
3772 /* Take care of Wireless Extensions */
3773 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
3774 return wext_handle_ioctl(net, &ifr, cmd, arg);
3781 * dev_new_index - allocate an ifindex
3782 * @net: the applicable net namespace
3784 * Returns a suitable unique value for a new device interface
3785 * number. The caller must hold the rtnl semaphore or the
3786 * dev_base_lock to be sure it remains unique.
3788 static int dev_new_index(struct net *net)
3794 if (!__dev_get_by_index(net, ifindex))
3799 /* Delayed registration/unregisteration */
3800 static DEFINE_SPINLOCK(net_todo_list_lock);
3801 static LIST_HEAD(net_todo_list);
3803 static void net_set_todo(struct net_device *dev)
3805 spin_lock(&net_todo_list_lock);
3806 list_add_tail(&dev->todo_list, &net_todo_list);
3807 spin_unlock(&net_todo_list_lock);
3810 static void rollback_registered(struct net_device *dev)
3812 BUG_ON(dev_boot_phase);
3815 /* Some devices call without registering for initialization unwind. */
3816 if (dev->reg_state == NETREG_UNINITIALIZED) {
3817 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3818 "was registered\n", dev->name, dev);
3824 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3826 /* If device is running, close it first. */
3829 /* And unlink it from device chain. */
3830 unlist_netdevice(dev);
3832 dev->reg_state = NETREG_UNREGISTERING;
3836 /* Shutdown queueing discipline. */
3840 /* Notify protocols, that we are about to destroy
3841 this device. They should clean all the things.
3843 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3846 * Flush the unicast and multicast chains
3848 dev_addr_discard(dev);
3853 /* Notifier chain MUST detach us from master device. */
3854 WARN_ON(dev->master);
3856 /* Remove entries from kobject tree */
3857 netdev_unregister_kobject(dev);
3864 static void __netdev_init_queue_locks_one(struct net_device *dev,
3865 struct netdev_queue *dev_queue,
3868 spin_lock_init(&dev_queue->_xmit_lock);
3869 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
3870 dev_queue->xmit_lock_owner = -1;
3873 static void netdev_init_queue_locks(struct net_device *dev)
3875 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
3876 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
3880 * register_netdevice - register a network device
3881 * @dev: device to register
3883 * Take a completed network device structure and add it to the kernel
3884 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3885 * chain. 0 is returned on success. A negative errno code is returned
3886 * on a failure to set up the device, or if the name is a duplicate.
3888 * Callers must hold the rtnl semaphore. You may want
3889 * register_netdev() instead of this.
3892 * The locking appears insufficient to guarantee two parallel registers
3893 * will not get the same name.
3896 int register_netdevice(struct net_device *dev)
3898 struct hlist_head *head;
3899 struct hlist_node *p;
3903 BUG_ON(dev_boot_phase);
3908 /* When net_device's are persistent, this will be fatal. */
3909 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
3910 BUG_ON(!dev_net(dev));
3913 spin_lock_init(&dev->addr_list_lock);
3914 netdev_set_addr_lockdep_class(dev);
3915 netdev_init_queue_locks(dev);
3919 /* Init, if this function is available */
3921 ret = dev->init(dev);
3929 if (!dev_valid_name(dev->name)) {
3934 dev->ifindex = dev_new_index(net);
3935 if (dev->iflink == -1)
3936 dev->iflink = dev->ifindex;
3938 /* Check for existence of name */
3939 head = dev_name_hash(net, dev->name);
3940 hlist_for_each(p, head) {
3941 struct net_device *d
3942 = hlist_entry(p, struct net_device, name_hlist);
3943 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
3949 /* Fix illegal checksum combinations */
3950 if ((dev->features & NETIF_F_HW_CSUM) &&
3951 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3952 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
3954 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3957 if ((dev->features & NETIF_F_NO_CSUM) &&
3958 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3959 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
3961 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
3965 /* Fix illegal SG+CSUM combinations. */
3966 if ((dev->features & NETIF_F_SG) &&
3967 !(dev->features & NETIF_F_ALL_CSUM)) {
3968 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
3970 dev->features &= ~NETIF_F_SG;
3973 /* TSO requires that SG is present as well. */
3974 if ((dev->features & NETIF_F_TSO) &&
3975 !(dev->features & NETIF_F_SG)) {
3976 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
3978 dev->features &= ~NETIF_F_TSO;
3980 if (dev->features & NETIF_F_UFO) {
3981 if (!(dev->features & NETIF_F_HW_CSUM)) {
3982 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3983 "NETIF_F_HW_CSUM feature.\n",
3985 dev->features &= ~NETIF_F_UFO;
3987 if (!(dev->features & NETIF_F_SG)) {
3988 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3989 "NETIF_F_SG feature.\n",
3991 dev->features &= ~NETIF_F_UFO;
3995 /* Enable software GSO if SG is supported. */
3996 if (dev->features & NETIF_F_SG)
3997 dev->features |= NETIF_F_GSO;
3999 netdev_initialize_kobject(dev);
4000 ret = netdev_register_kobject(dev);
4003 dev->reg_state = NETREG_REGISTERED;
4006 * Default initial state at registry is that the
4007 * device is present.
4010 set_bit(__LINK_STATE_PRESENT, &dev->state);
4012 dev_init_scheduler(dev);
4014 list_netdevice(dev);
4016 /* Notify protocols, that a new device appeared. */
4017 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
4018 ret = notifier_to_errno(ret);
4020 rollback_registered(dev);
4021 dev->reg_state = NETREG_UNREGISTERED;
4034 * register_netdev - register a network device
4035 * @dev: device to register
4037 * Take a completed network device structure and add it to the kernel
4038 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4039 * chain. 0 is returned on success. A negative errno code is returned
4040 * on a failure to set up the device, or if the name is a duplicate.
4042 * This is a wrapper around register_netdevice that takes the rtnl semaphore
4043 * and expands the device name if you passed a format string to
4046 int register_netdev(struct net_device *dev)
4053 * If the name is a format string the caller wants us to do a
4056 if (strchr(dev->name, '%')) {
4057 err = dev_alloc_name(dev, dev->name);
4062 err = register_netdevice(dev);
4067 EXPORT_SYMBOL(register_netdev);
4070 * netdev_wait_allrefs - wait until all references are gone.
4072 * This is called when unregistering network devices.
4074 * Any protocol or device that holds a reference should register
4075 * for netdevice notification, and cleanup and put back the
4076 * reference if they receive an UNREGISTER event.
4077 * We can get stuck here if buggy protocols don't correctly
4080 static void netdev_wait_allrefs(struct net_device *dev)
4082 unsigned long rebroadcast_time, warning_time;
4084 rebroadcast_time = warning_time = jiffies;
4085 while (atomic_read(&dev->refcnt) != 0) {
4086 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
4089 /* Rebroadcast unregister notification */
4090 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4092 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4094 /* We must not have linkwatch events
4095 * pending on unregister. If this
4096 * happens, we simply run the queue
4097 * unscheduled, resulting in a noop
4100 linkwatch_run_queue();
4105 rebroadcast_time = jiffies;
4110 if (time_after(jiffies, warning_time + 10 * HZ)) {
4111 printk(KERN_EMERG "unregister_netdevice: "
4112 "waiting for %s to become free. Usage "
4114 dev->name, atomic_read(&dev->refcnt));
4115 warning_time = jiffies;
4124 * register_netdevice(x1);
4125 * register_netdevice(x2);
4127 * unregister_netdevice(y1);
4128 * unregister_netdevice(y2);
4134 * We are invoked by rtnl_unlock() after it drops the semaphore.
4135 * This allows us to deal with problems:
4136 * 1) We can delete sysfs objects which invoke hotplug
4137 * without deadlocking with linkwatch via keventd.
4138 * 2) Since we run with the RTNL semaphore not held, we can sleep
4139 * safely in order to wait for the netdev refcnt to drop to zero.
4141 static DEFINE_MUTEX(net_todo_run_mutex);
4142 void netdev_run_todo(void)
4144 struct list_head list;
4146 /* Need to guard against multiple cpu's getting out of order. */
4147 mutex_lock(&net_todo_run_mutex);
4149 /* Not safe to do outside the semaphore. We must not return
4150 * until all unregister events invoked by the local processor
4151 * have been completed (either by this todo run, or one on
4154 if (list_empty(&net_todo_list))
4157 /* Snapshot list, allow later requests */
4158 spin_lock(&net_todo_list_lock);
4159 list_replace_init(&net_todo_list, &list);
4160 spin_unlock(&net_todo_list_lock);
4162 while (!list_empty(&list)) {
4163 struct net_device *dev
4164 = list_entry(list.next, struct net_device, todo_list);
4165 list_del(&dev->todo_list);
4167 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
4168 printk(KERN_ERR "network todo '%s' but state %d\n",
4169 dev->name, dev->reg_state);
4174 dev->reg_state = NETREG_UNREGISTERED;
4176 on_each_cpu(flush_backlog, dev, 1);
4178 netdev_wait_allrefs(dev);
4181 BUG_ON(atomic_read(&dev->refcnt));
4182 WARN_ON(dev->ip_ptr);
4183 WARN_ON(dev->ip6_ptr);
4184 WARN_ON(dev->dn_ptr);
4186 if (dev->destructor)
4187 dev->destructor(dev);
4189 /* Free network device */
4190 kobject_put(&dev->dev.kobj);
4194 mutex_unlock(&net_todo_run_mutex);
4197 static struct net_device_stats *internal_stats(struct net_device *dev)
4202 static void netdev_init_one_queue(struct net_device *dev,
4203 struct netdev_queue *queue,
4209 static void netdev_init_queues(struct net_device *dev)
4211 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4212 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
4213 spin_lock_init(&dev->tx_global_lock);
4217 * alloc_netdev_mq - allocate network device
4218 * @sizeof_priv: size of private data to allocate space for
4219 * @name: device name format string
4220 * @setup: callback to initialize device
4221 * @queue_count: the number of subqueues to allocate
4223 * Allocates a struct net_device with private data area for driver use
4224 * and performs basic initialization. Also allocates subquue structs
4225 * for each queue on the device at the end of the netdevice.
4227 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4228 void (*setup)(struct net_device *), unsigned int queue_count)
4230 struct netdev_queue *tx;
4231 struct net_device *dev;
4235 BUG_ON(strlen(name) >= sizeof(dev->name));
4237 alloc_size = sizeof(struct net_device);
4239 /* ensure 32-byte alignment of private area */
4240 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4241 alloc_size += sizeof_priv;
4243 /* ensure 32-byte alignment of whole construct */
4244 alloc_size += NETDEV_ALIGN_CONST;
4246 p = kzalloc(alloc_size, GFP_KERNEL);
4248 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
4252 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
4254 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4260 dev = (struct net_device *)
4261 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4262 dev->padded = (char *)dev - (char *)p;
4263 dev_net_set(dev, &init_net);
4266 dev->num_tx_queues = queue_count;
4267 dev->real_num_tx_queues = queue_count;
4270 dev->priv = ((char *)dev +
4271 ((sizeof(struct net_device) + NETDEV_ALIGN_CONST)
4272 & ~NETDEV_ALIGN_CONST));
4275 dev->gso_max_size = GSO_MAX_SIZE;
4277 netdev_init_queues(dev);
4279 dev->get_stats = internal_stats;
4280 netpoll_netdev_init(dev);
4282 strcpy(dev->name, name);
4285 EXPORT_SYMBOL(alloc_netdev_mq);
4288 * free_netdev - free network device
4291 * This function does the last stage of destroying an allocated device
4292 * interface. The reference to the device object is released.
4293 * If this is the last reference then it will be freed.
4295 void free_netdev(struct net_device *dev)
4297 release_net(dev_net(dev));
4301 /* Compatibility with error handling in drivers */
4302 if (dev->reg_state == NETREG_UNINITIALIZED) {
4303 kfree((char *)dev - dev->padded);
4307 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4308 dev->reg_state = NETREG_RELEASED;
4310 /* will free via device release */
4311 put_device(&dev->dev);
4314 /* Synchronize with packet receive processing. */
4315 void synchronize_net(void)
4322 * unregister_netdevice - remove device from the kernel
4325 * This function shuts down a device interface and removes it
4326 * from the kernel tables.
4328 * Callers must hold the rtnl semaphore. You may want
4329 * unregister_netdev() instead of this.
4332 void unregister_netdevice(struct net_device *dev)
4336 rollback_registered(dev);
4337 /* Finish processing unregister after unlock */
4342 * unregister_netdev - remove device from the kernel
4345 * This function shuts down a device interface and removes it
4346 * from the kernel tables.
4348 * This is just a wrapper for unregister_netdevice that takes
4349 * the rtnl semaphore. In general you want to use this and not
4350 * unregister_netdevice.
4352 void unregister_netdev(struct net_device *dev)
4355 unregister_netdevice(dev);
4359 EXPORT_SYMBOL(unregister_netdev);
4362 * dev_change_net_namespace - move device to different nethost namespace
4364 * @net: network namespace
4365 * @pat: If not NULL name pattern to try if the current device name
4366 * is already taken in the destination network namespace.
4368 * This function shuts down a device interface and moves it
4369 * to a new network namespace. On success 0 is returned, on
4370 * a failure a netagive errno code is returned.
4372 * Callers must hold the rtnl semaphore.
4375 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4378 const char *destname;
4383 /* Don't allow namespace local devices to be moved. */
4385 if (dev->features & NETIF_F_NETNS_LOCAL)
4388 /* Ensure the device has been registrered */
4390 if (dev->reg_state != NETREG_REGISTERED)
4393 /* Get out if there is nothing todo */
4395 if (net_eq(dev_net(dev), net))
4398 /* Pick the destination device name, and ensure
4399 * we can use it in the destination network namespace.
4402 destname = dev->name;
4403 if (__dev_get_by_name(net, destname)) {
4404 /* We get here if we can't use the current device name */
4407 if (!dev_valid_name(pat))
4409 if (strchr(pat, '%')) {
4410 if (__dev_alloc_name(net, pat, buf) < 0)
4415 if (__dev_get_by_name(net, destname))
4420 * And now a mini version of register_netdevice unregister_netdevice.
4423 /* If device is running close it first. */
4426 /* And unlink it from device chain */
4428 unlist_netdevice(dev);
4432 /* Shutdown queueing discipline. */
4435 /* Notify protocols, that we are about to destroy
4436 this device. They should clean all the things.
4438 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4441 * Flush the unicast and multicast chains
4443 dev_addr_discard(dev);
4445 /* Actually switch the network namespace */
4446 dev_net_set(dev, net);
4448 /* Assign the new device name */
4449 if (destname != dev->name)
4450 strcpy(dev->name, destname);
4452 /* If there is an ifindex conflict assign a new one */
4453 if (__dev_get_by_index(net, dev->ifindex)) {
4454 int iflink = (dev->iflink == dev->ifindex);
4455 dev->ifindex = dev_new_index(net);
4457 dev->iflink = dev->ifindex;
4460 /* Fixup kobjects */
4461 netdev_unregister_kobject(dev);
4462 err = netdev_register_kobject(dev);
4465 /* Add the device back in the hashes */
4466 list_netdevice(dev);
4468 /* Notify protocols, that a new device appeared. */
4469 call_netdevice_notifiers(NETDEV_REGISTER, dev);
4477 static int dev_cpu_callback(struct notifier_block *nfb,
4478 unsigned long action,
4481 struct sk_buff **list_skb;
4482 struct Qdisc **list_net;
4483 struct sk_buff *skb;
4484 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4485 struct softnet_data *sd, *oldsd;
4487 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
4490 local_irq_disable();
4491 cpu = smp_processor_id();
4492 sd = &per_cpu(softnet_data, cpu);
4493 oldsd = &per_cpu(softnet_data, oldcpu);
4495 /* Find end of our completion_queue. */
4496 list_skb = &sd->completion_queue;
4498 list_skb = &(*list_skb)->next;
4499 /* Append completion queue from offline CPU. */
4500 *list_skb = oldsd->completion_queue;
4501 oldsd->completion_queue = NULL;
4503 /* Find end of our output_queue. */
4504 list_net = &sd->output_queue;
4506 list_net = &(*list_net)->next_sched;
4507 /* Append output queue from offline CPU. */
4508 *list_net = oldsd->output_queue;
4509 oldsd->output_queue = NULL;
4511 raise_softirq_irqoff(NET_TX_SOFTIRQ);
4514 /* Process offline CPU's input_pkt_queue */
4515 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
4521 #ifdef CONFIG_NET_DMA
4523 * net_dma_rebalance - try to maintain one DMA channel per CPU
4524 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4526 * This is called when the number of channels allocated to the net_dma client
4527 * changes. The net_dma client tries to have one DMA channel per CPU.
4530 static void net_dma_rebalance(struct net_dma *net_dma)
4532 unsigned int cpu, i, n, chan_idx;
4533 struct dma_chan *chan;
4535 if (cpus_empty(net_dma->channel_mask)) {
4536 for_each_online_cpu(cpu)
4537 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
4542 cpu = first_cpu(cpu_online_map);
4544 for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
4545 chan = net_dma->channels[chan_idx];
4547 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4548 + (i < (num_online_cpus() %
4549 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
4552 per_cpu(softnet_data, cpu).net_dma = chan;
4553 cpu = next_cpu(cpu, cpu_online_map);
4561 * netdev_dma_event - event callback for the net_dma_client
4562 * @client: should always be net_dma_client
4563 * @chan: DMA channel for the event
4564 * @state: DMA state to be handled
4566 static enum dma_state_client
4567 netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4568 enum dma_state state)
4570 int i, found = 0, pos = -1;
4571 struct net_dma *net_dma =
4572 container_of(client, struct net_dma, client);
4573 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4575 spin_lock(&net_dma->lock);
4577 case DMA_RESOURCE_AVAILABLE:
4578 for (i = 0; i < nr_cpu_ids; i++)
4579 if (net_dma->channels[i] == chan) {
4582 } else if (net_dma->channels[i] == NULL && pos < 0)
4585 if (!found && pos >= 0) {
4587 net_dma->channels[pos] = chan;
4588 cpu_set(pos, net_dma->channel_mask);
4589 net_dma_rebalance(net_dma);
4592 case DMA_RESOURCE_REMOVED:
4593 for (i = 0; i < nr_cpu_ids; i++)
4594 if (net_dma->channels[i] == chan) {
4602 cpu_clear(pos, net_dma->channel_mask);
4603 net_dma->channels[i] = NULL;
4604 net_dma_rebalance(net_dma);
4610 spin_unlock(&net_dma->lock);
4616 * netdev_dma_regiser - register the networking subsystem as a DMA client
4618 static int __init netdev_dma_register(void)
4620 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
4622 if (unlikely(!net_dma.channels)) {
4624 "netdev_dma: no memory for net_dma.channels\n");
4627 spin_lock_init(&net_dma.lock);
4628 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4629 dma_async_client_register(&net_dma.client);
4630 dma_async_client_chan_request(&net_dma.client);
4635 static int __init netdev_dma_register(void) { return -ENODEV; }
4636 #endif /* CONFIG_NET_DMA */
4639 * netdev_compute_feature - compute conjunction of two feature sets
4640 * @all: first feature set
4641 * @one: second feature set
4643 * Computes a new feature set after adding a device with feature set
4644 * @one to the master device with current feature set @all. Returns
4645 * the new feature set.
4647 int netdev_compute_features(unsigned long all, unsigned long one)
4649 /* if device needs checksumming, downgrade to hw checksumming */
4650 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4651 all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
4653 /* if device can't do all checksum, downgrade to ipv4/ipv6 */
4654 if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
4655 all ^= NETIF_F_HW_CSUM
4656 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4658 if (one & NETIF_F_GSO)
4659 one |= NETIF_F_GSO_SOFTWARE;
4662 /* If even one device supports robust GSO, enable it for all. */
4663 if (one & NETIF_F_GSO_ROBUST)
4664 all |= NETIF_F_GSO_ROBUST;
4666 all &= one | NETIF_F_LLTX;
4668 if (!(all & NETIF_F_ALL_CSUM))
4670 if (!(all & NETIF_F_SG))
4671 all &= ~NETIF_F_GSO_MASK;
4675 EXPORT_SYMBOL(netdev_compute_features);
4677 static struct hlist_head *netdev_create_hash(void)
4680 struct hlist_head *hash;
4682 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
4684 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4685 INIT_HLIST_HEAD(&hash[i]);
4690 /* Initialize per network namespace state */
4691 static int __net_init netdev_init(struct net *net)
4693 INIT_LIST_HEAD(&net->dev_base_head);
4695 net->dev_name_head = netdev_create_hash();
4696 if (net->dev_name_head == NULL)
4699 net->dev_index_head = netdev_create_hash();
4700 if (net->dev_index_head == NULL)
4706 kfree(net->dev_name_head);
4711 char *netdev_drivername(struct net_device *dev, char *buffer, int len)
4713 struct device_driver *driver;
4714 struct device *parent;
4716 if (len <= 0 || !buffer)
4720 parent = dev->dev.parent;
4725 driver = parent->driver;
4726 if (driver && driver->name)
4727 strlcpy(buffer, driver->name, len);
4731 static void __net_exit netdev_exit(struct net *net)
4733 kfree(net->dev_name_head);
4734 kfree(net->dev_index_head);
4737 static struct pernet_operations __net_initdata netdev_net_ops = {
4738 .init = netdev_init,
4739 .exit = netdev_exit,
4742 static void __net_exit default_device_exit(struct net *net)
4744 struct net_device *dev, *next;
4746 * Push all migratable of the network devices back to the
4747 * initial network namespace
4750 for_each_netdev_safe(net, dev, next) {
4752 char fb_name[IFNAMSIZ];
4754 /* Ignore unmoveable devices (i.e. loopback) */
4755 if (dev->features & NETIF_F_NETNS_LOCAL)
4758 /* Push remaing network devices to init_net */
4759 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
4760 err = dev_change_net_namespace(dev, &init_net, fb_name);
4762 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
4763 __func__, dev->name, err);
4770 static struct pernet_operations __net_initdata default_device_ops = {
4771 .exit = default_device_exit,
4775 * Initialize the DEV module. At boot time this walks the device list and
4776 * unhooks any devices that fail to initialise (normally hardware not
4777 * present) and leaves us with a valid list of present and active devices.
4782 * This is called single threaded during boot, so no need
4783 * to take the rtnl semaphore.
4785 static int __init net_dev_init(void)
4787 int i, rc = -ENOMEM;
4789 BUG_ON(!dev_boot_phase);
4791 if (dev_proc_init())
4794 if (netdev_kobject_init())
4797 INIT_LIST_HEAD(&ptype_all);
4798 for (i = 0; i < PTYPE_HASH_SIZE; i++)
4799 INIT_LIST_HEAD(&ptype_base[i]);
4801 if (register_pernet_subsys(&netdev_net_ops))
4804 if (register_pernet_device(&default_device_ops))
4808 * Initialise the packet receive queues.
4811 for_each_possible_cpu(i) {
4812 struct softnet_data *queue;
4814 queue = &per_cpu(softnet_data, i);
4815 skb_queue_head_init(&queue->input_pkt_queue);
4816 queue->completion_queue = NULL;
4817 INIT_LIST_HEAD(&queue->poll_list);
4819 queue->backlog.poll = process_backlog;
4820 queue->backlog.weight = weight_p;
4823 netdev_dma_register();
4827 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
4828 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
4830 hotcpu_notifier(dev_cpu_callback, 0);
4838 subsys_initcall(net_dev_init);
4840 EXPORT_SYMBOL(__dev_get_by_index);
4841 EXPORT_SYMBOL(__dev_get_by_name);
4842 EXPORT_SYMBOL(__dev_remove_pack);
4843 EXPORT_SYMBOL(dev_valid_name);
4844 EXPORT_SYMBOL(dev_add_pack);
4845 EXPORT_SYMBOL(dev_alloc_name);
4846 EXPORT_SYMBOL(dev_close);
4847 EXPORT_SYMBOL(dev_get_by_flags);
4848 EXPORT_SYMBOL(dev_get_by_index);
4849 EXPORT_SYMBOL(dev_get_by_name);
4850 EXPORT_SYMBOL(dev_open);
4851 EXPORT_SYMBOL(dev_queue_xmit);
4852 EXPORT_SYMBOL(dev_remove_pack);
4853 EXPORT_SYMBOL(dev_set_allmulti);
4854 EXPORT_SYMBOL(dev_set_promiscuity);
4855 EXPORT_SYMBOL(dev_change_flags);
4856 EXPORT_SYMBOL(dev_set_mtu);
4857 EXPORT_SYMBOL(dev_set_mac_address);
4858 EXPORT_SYMBOL(free_netdev);
4859 EXPORT_SYMBOL(netdev_boot_setup_check);
4860 EXPORT_SYMBOL(netdev_set_master);
4861 EXPORT_SYMBOL(netdev_state_change);
4862 EXPORT_SYMBOL(netif_receive_skb);
4863 EXPORT_SYMBOL(netif_rx);
4864 EXPORT_SYMBOL(register_gifconf);
4865 EXPORT_SYMBOL(register_netdevice);
4866 EXPORT_SYMBOL(register_netdevice_notifier);
4867 EXPORT_SYMBOL(skb_checksum_help);
4868 EXPORT_SYMBOL(synchronize_net);
4869 EXPORT_SYMBOL(unregister_netdevice);
4870 EXPORT_SYMBOL(unregister_netdevice_notifier);
4871 EXPORT_SYMBOL(net_enable_timestamp);
4872 EXPORT_SYMBOL(net_disable_timestamp);
4873 EXPORT_SYMBOL(dev_get_flags);
4875 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4876 EXPORT_SYMBOL(br_handle_frame_hook);
4877 EXPORT_SYMBOL(br_fdb_get_hook);
4878 EXPORT_SYMBOL(br_fdb_put_hook);
4882 EXPORT_SYMBOL(dev_load);
4885 EXPORT_PER_CPU_SYMBOL(softnet_data);