2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
24 #include <linux/module.h>
26 #include <linux/capability.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/errno.h>
32 #include <linux/string.h>
33 #include <linux/stat.h>
34 #include <linux/socket.h>
36 #include <linux/fcntl.h>
37 #include <linux/termios.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
41 #include <linux/slab.h>
42 #include <asm/uaccess.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/proc_fs.h>
47 #include <linux/seq_file.h>
48 #include <linux/notifier.h>
49 #include <linux/security.h>
50 #include <linux/jhash.h>
51 #include <linux/jiffies.h>
52 #include <linux/random.h>
53 #include <linux/bitops.h>
55 #include <linux/types.h>
56 #include <linux/audit.h>
57 #include <linux/selinux.h>
58 #include <linux/mutex.h>
60 #include <net/net_namespace.h>
63 #include <net/netlink.h>
65 #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
66 #define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long))
69 /* struct sock has to be the first member of netlink_sock */
77 unsigned long *groups;
79 wait_queue_head_t wait;
80 struct netlink_callback *cb;
81 struct mutex *cb_mutex;
82 struct mutex cb_def_mutex;
83 void (*data_ready)(struct sock *sk, int bytes);
84 struct module *module;
87 #define NETLINK_KERNEL_SOCKET 0x1
88 #define NETLINK_RECV_PKTINFO 0x2
90 static inline struct netlink_sock *nlk_sk(struct sock *sk)
92 return container_of(sk, struct netlink_sock, sk);
96 struct hlist_head *table;
97 unsigned long rehash_time;
102 unsigned int entries;
103 unsigned int max_shift;
108 struct netlink_table {
109 struct nl_pid_hash hash;
110 struct hlist_head mc_list;
111 unsigned long *listeners;
112 unsigned int nl_nonroot;
114 struct mutex *cb_mutex;
115 struct module *module;
119 static struct netlink_table *nl_table;
121 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
123 static int netlink_dump(struct sock *sk);
124 static void netlink_destroy_callback(struct netlink_callback *cb);
125 static void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb);
127 static DEFINE_RWLOCK(nl_table_lock);
128 static atomic_t nl_table_users = ATOMIC_INIT(0);
130 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
132 static u32 netlink_group_mask(u32 group)
134 return group ? 1 << (group - 1) : 0;
137 static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
139 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
142 static void netlink_sock_destruct(struct sock *sk)
144 struct netlink_sock *nlk = nlk_sk(sk);
148 nlk->cb->done(nlk->cb);
149 netlink_destroy_callback(nlk->cb);
152 skb_queue_purge(&sk->sk_receive_queue);
154 if (!sock_flag(sk, SOCK_DEAD)) {
155 printk("Freeing alive netlink socket %p\n", sk);
158 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
159 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
160 BUG_TRAP(!nlk_sk(sk)->groups);
163 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
164 * Look, when several writers sleep and reader wakes them up, all but one
165 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
166 * this, _but_ remember, it adds useless work on UP machines.
169 static void netlink_table_grab(void)
171 write_lock_irq(&nl_table_lock);
173 if (atomic_read(&nl_table_users)) {
174 DECLARE_WAITQUEUE(wait, current);
176 add_wait_queue_exclusive(&nl_table_wait, &wait);
178 set_current_state(TASK_UNINTERRUPTIBLE);
179 if (atomic_read(&nl_table_users) == 0)
181 write_unlock_irq(&nl_table_lock);
183 write_lock_irq(&nl_table_lock);
186 __set_current_state(TASK_RUNNING);
187 remove_wait_queue(&nl_table_wait, &wait);
191 static __inline__ void netlink_table_ungrab(void)
193 write_unlock_irq(&nl_table_lock);
194 wake_up(&nl_table_wait);
197 static __inline__ void
198 netlink_lock_table(void)
200 /* read_lock() synchronizes us to netlink_table_grab */
202 read_lock(&nl_table_lock);
203 atomic_inc(&nl_table_users);
204 read_unlock(&nl_table_lock);
207 static __inline__ void
208 netlink_unlock_table(void)
210 if (atomic_dec_and_test(&nl_table_users))
211 wake_up(&nl_table_wait);
214 static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
216 struct nl_pid_hash *hash = &nl_table[protocol].hash;
217 struct hlist_head *head;
219 struct hlist_node *node;
221 read_lock(&nl_table_lock);
222 head = nl_pid_hashfn(hash, pid);
223 sk_for_each(sk, node, head) {
224 if (nlk_sk(sk)->pid == pid) {
231 read_unlock(&nl_table_lock);
235 static inline struct hlist_head *nl_pid_hash_alloc(size_t size)
237 if (size <= PAGE_SIZE)
238 return kmalloc(size, GFP_ATOMIC);
240 return (struct hlist_head *)
241 __get_free_pages(GFP_ATOMIC, get_order(size));
244 static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
246 if (size <= PAGE_SIZE)
249 free_pages((unsigned long)table, get_order(size));
252 static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
254 unsigned int omask, mask, shift;
256 struct hlist_head *otable, *table;
259 omask = mask = hash->mask;
260 osize = size = (mask + 1) * sizeof(*table);
264 if (++shift > hash->max_shift)
270 table = nl_pid_hash_alloc(size);
274 memset(table, 0, size);
275 otable = hash->table;
279 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
281 for (i = 0; i <= omask; i++) {
283 struct hlist_node *node, *tmp;
285 sk_for_each_safe(sk, node, tmp, &otable[i])
286 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
289 nl_pid_hash_free(otable, osize);
290 hash->rehash_time = jiffies + 10 * 60 * HZ;
294 static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
296 int avg = hash->entries >> hash->shift;
298 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
301 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
302 nl_pid_hash_rehash(hash, 0);
309 static const struct proto_ops netlink_ops;
312 netlink_update_listeners(struct sock *sk)
314 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
315 struct hlist_node *node;
319 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
321 sk_for_each_bound(sk, node, &tbl->mc_list) {
322 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
323 mask |= nlk_sk(sk)->groups[i];
325 tbl->listeners[i] = mask;
327 /* this function is only called with the netlink table "grabbed", which
328 * makes sure updates are visible before bind or setsockopt return. */
331 static int netlink_insert(struct sock *sk, u32 pid)
333 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
334 struct hlist_head *head;
335 int err = -EADDRINUSE;
337 struct hlist_node *node;
340 netlink_table_grab();
341 head = nl_pid_hashfn(hash, pid);
343 sk_for_each(osk, node, head) {
344 if (nlk_sk(osk)->pid == pid)
356 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
359 if (len && nl_pid_hash_dilute(hash, len))
360 head = nl_pid_hashfn(hash, pid);
362 nlk_sk(sk)->pid = pid;
363 sk_add_node(sk, head);
367 netlink_table_ungrab();
371 static void netlink_remove(struct sock *sk)
373 netlink_table_grab();
374 if (sk_del_node_init(sk))
375 nl_table[sk->sk_protocol].hash.entries--;
376 if (nlk_sk(sk)->subscriptions)
377 __sk_del_bind_node(sk);
378 netlink_table_ungrab();
381 static struct proto netlink_proto = {
383 .owner = THIS_MODULE,
384 .obj_size = sizeof(struct netlink_sock),
387 static int __netlink_create(struct socket *sock, struct mutex *cb_mutex,
391 struct netlink_sock *nlk;
393 sock->ops = &netlink_ops;
395 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, &netlink_proto, 1);
399 sock_init_data(sock, sk);
403 nlk->cb_mutex = cb_mutex;
405 nlk->cb_mutex = &nlk->cb_def_mutex;
406 mutex_init(nlk->cb_mutex);
408 init_waitqueue_head(&nlk->wait);
410 sk->sk_destruct = netlink_sock_destruct;
411 sk->sk_protocol = protocol;
415 static int netlink_create(struct socket *sock, int protocol)
417 struct module *module = NULL;
418 struct mutex *cb_mutex;
419 struct netlink_sock *nlk;
422 sock->state = SS_UNCONNECTED;
424 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
425 return -ESOCKTNOSUPPORT;
427 if (protocol<0 || protocol >= MAX_LINKS)
428 return -EPROTONOSUPPORT;
430 netlink_lock_table();
432 if (!nl_table[protocol].registered) {
433 netlink_unlock_table();
434 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
435 netlink_lock_table();
438 if (nl_table[protocol].registered &&
439 try_module_get(nl_table[protocol].module))
440 module = nl_table[protocol].module;
441 cb_mutex = nl_table[protocol].cb_mutex;
442 netlink_unlock_table();
444 if ((err = __netlink_create(sock, cb_mutex, protocol)) < 0)
447 nlk = nlk_sk(sock->sk);
448 nlk->module = module;
457 static int netlink_release(struct socket *sock)
459 struct sock *sk = sock->sk;
460 struct netlink_sock *nlk;
470 * OK. Socket is unlinked, any packets that arrive now
475 wake_up_interruptible_all(&nlk->wait);
477 skb_queue_purge(&sk->sk_write_queue);
479 if (nlk->pid && !nlk->subscriptions) {
480 struct netlink_notify n = {
481 .protocol = sk->sk_protocol,
484 atomic_notifier_call_chain(&netlink_chain,
485 NETLINK_URELEASE, &n);
488 module_put(nlk->module);
490 netlink_table_grab();
491 if (nlk->flags & NETLINK_KERNEL_SOCKET) {
492 kfree(nl_table[sk->sk_protocol].listeners);
493 nl_table[sk->sk_protocol].module = NULL;
494 nl_table[sk->sk_protocol].registered = 0;
495 } else if (nlk->subscriptions)
496 netlink_update_listeners(sk);
497 netlink_table_ungrab();
506 static int netlink_autobind(struct socket *sock)
508 struct sock *sk = sock->sk;
509 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
510 struct hlist_head *head;
512 struct hlist_node *node;
513 s32 pid = current->tgid;
515 static s32 rover = -4097;
519 netlink_table_grab();
520 head = nl_pid_hashfn(hash, pid);
521 sk_for_each(osk, node, head) {
522 if (nlk_sk(osk)->pid == pid) {
523 /* Bind collision, search negative pid values. */
527 netlink_table_ungrab();
531 netlink_table_ungrab();
533 err = netlink_insert(sk, pid);
534 if (err == -EADDRINUSE)
537 /* If 2 threads race to autobind, that is fine. */
544 static inline int netlink_capable(struct socket *sock, unsigned int flag)
546 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
547 capable(CAP_NET_ADMIN);
551 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
553 struct netlink_sock *nlk = nlk_sk(sk);
555 if (nlk->subscriptions && !subscriptions)
556 __sk_del_bind_node(sk);
557 else if (!nlk->subscriptions && subscriptions)
558 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
559 nlk->subscriptions = subscriptions;
562 static int netlink_realloc_groups(struct sock *sk)
564 struct netlink_sock *nlk = nlk_sk(sk);
566 unsigned long *new_groups;
569 netlink_table_grab();
571 groups = nl_table[sk->sk_protocol].groups;
572 if (!nl_table[sk->sk_protocol].registered) {
577 if (nlk->ngroups >= groups)
580 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
581 if (new_groups == NULL) {
585 memset((char*)new_groups + NLGRPSZ(nlk->ngroups), 0,
586 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
588 nlk->groups = new_groups;
589 nlk->ngroups = groups;
591 netlink_table_ungrab();
595 static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
597 struct sock *sk = sock->sk;
598 struct netlink_sock *nlk = nlk_sk(sk);
599 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
602 if (nladdr->nl_family != AF_NETLINK)
605 /* Only superuser is allowed to listen multicasts */
606 if (nladdr->nl_groups) {
607 if (!netlink_capable(sock, NL_NONROOT_RECV))
609 err = netlink_realloc_groups(sk);
615 if (nladdr->nl_pid != nlk->pid)
618 err = nladdr->nl_pid ?
619 netlink_insert(sk, nladdr->nl_pid) :
620 netlink_autobind(sock);
625 if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
628 netlink_table_grab();
629 netlink_update_subscriptions(sk, nlk->subscriptions +
630 hweight32(nladdr->nl_groups) -
631 hweight32(nlk->groups[0]));
632 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
633 netlink_update_listeners(sk);
634 netlink_table_ungrab();
639 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
643 struct sock *sk = sock->sk;
644 struct netlink_sock *nlk = nlk_sk(sk);
645 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
647 if (addr->sa_family == AF_UNSPEC) {
648 sk->sk_state = NETLINK_UNCONNECTED;
653 if (addr->sa_family != AF_NETLINK)
656 /* Only superuser is allowed to send multicasts */
657 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
661 err = netlink_autobind(sock);
664 sk->sk_state = NETLINK_CONNECTED;
665 nlk->dst_pid = nladdr->nl_pid;
666 nlk->dst_group = ffs(nladdr->nl_groups);
672 static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
674 struct sock *sk = sock->sk;
675 struct netlink_sock *nlk = nlk_sk(sk);
676 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
678 nladdr->nl_family = AF_NETLINK;
680 *addr_len = sizeof(*nladdr);
683 nladdr->nl_pid = nlk->dst_pid;
684 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
686 nladdr->nl_pid = nlk->pid;
687 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
692 static void netlink_overrun(struct sock *sk)
694 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
695 sk->sk_err = ENOBUFS;
696 sk->sk_error_report(sk);
700 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
702 int protocol = ssk->sk_protocol;
704 struct netlink_sock *nlk;
706 sock = netlink_lookup(protocol, pid);
708 return ERR_PTR(-ECONNREFUSED);
710 /* Don't bother queuing skb if kernel socket has no input function */
712 if ((nlk->pid == 0 && !nlk->data_ready) ||
713 (sock->sk_state == NETLINK_CONNECTED &&
714 nlk->dst_pid != nlk_sk(ssk)->pid)) {
716 return ERR_PTR(-ECONNREFUSED);
721 struct sock *netlink_getsockbyfilp(struct file *filp)
723 struct inode *inode = filp->f_path.dentry->d_inode;
726 if (!S_ISSOCK(inode->i_mode))
727 return ERR_PTR(-ENOTSOCK);
729 sock = SOCKET_I(inode)->sk;
730 if (sock->sk_family != AF_NETLINK)
731 return ERR_PTR(-EINVAL);
738 * Attach a skb to a netlink socket.
739 * The caller must hold a reference to the destination socket. On error, the
740 * reference is dropped. The skb is not send to the destination, just all
741 * all error checks are performed and memory in the queue is reserved.
743 * < 0: error. skb freed, reference to sock dropped.
745 * 1: repeat lookup - reference dropped while waiting for socket memory.
747 int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
748 long timeo, struct sock *ssk)
750 struct netlink_sock *nlk;
754 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
755 test_bit(0, &nlk->state)) {
756 DECLARE_WAITQUEUE(wait, current);
758 if (!ssk || nlk_sk(ssk)->pid == 0)
765 __set_current_state(TASK_INTERRUPTIBLE);
766 add_wait_queue(&nlk->wait, &wait);
768 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
769 test_bit(0, &nlk->state)) &&
770 !sock_flag(sk, SOCK_DEAD))
771 timeo = schedule_timeout(timeo);
773 __set_current_state(TASK_RUNNING);
774 remove_wait_queue(&nlk->wait, &wait);
777 if (signal_pending(current)) {
779 return sock_intr_errno(timeo);
783 skb_set_owner_r(skb, sk);
787 int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
791 skb_queue_tail(&sk->sk_receive_queue, skb);
792 sk->sk_data_ready(sk, len);
797 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
803 static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
810 delta = skb->end - skb->tail;
811 if (delta * 2 < skb->truesize)
814 if (skb_shared(skb)) {
815 struct sk_buff *nskb = skb_clone(skb, allocation);
822 if (!pskb_expand_head(skb, 0, -delta, allocation))
823 skb->truesize -= delta;
828 int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
834 skb = netlink_trim(skb, gfp_any());
836 timeo = sock_sndtimeo(ssk, nonblock);
838 sk = netlink_getsockbypid(ssk, pid);
843 err = netlink_attachskb(sk, skb, nonblock, timeo, ssk);
849 return netlink_sendskb(sk, skb, ssk->sk_protocol);
852 int netlink_has_listeners(struct sock *sk, unsigned int group)
855 unsigned long *listeners;
857 BUG_ON(!(nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET));
860 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
862 if (group - 1 < nl_table[sk->sk_protocol].groups)
863 res = test_bit(group - 1, listeners);
869 EXPORT_SYMBOL_GPL(netlink_has_listeners);
871 static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
873 struct netlink_sock *nlk = nlk_sk(sk);
875 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
876 !test_bit(0, &nlk->state)) {
877 skb_set_owner_r(skb, sk);
878 skb_queue_tail(&sk->sk_receive_queue, skb);
879 sk->sk_data_ready(sk, skb->len);
880 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
885 struct netlink_broadcast_data {
886 struct sock *exclude_sk;
893 struct sk_buff *skb, *skb2;
896 static inline int do_one_broadcast(struct sock *sk,
897 struct netlink_broadcast_data *p)
899 struct netlink_sock *nlk = nlk_sk(sk);
902 if (p->exclude_sk == sk)
905 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
906 !test_bit(p->group - 1, nlk->groups))
915 if (p->skb2 == NULL) {
916 if (skb_shared(p->skb)) {
917 p->skb2 = skb_clone(p->skb, p->allocation);
919 p->skb2 = skb_get(p->skb);
921 * skb ownership may have been set when
922 * delivered to a previous socket.
927 if (p->skb2 == NULL) {
929 /* Clone failed. Notify ALL listeners. */
931 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
944 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
945 u32 group, gfp_t allocation)
947 struct netlink_broadcast_data info;
948 struct hlist_node *node;
951 skb = netlink_trim(skb, allocation);
953 info.exclude_sk = ssk;
959 info.allocation = allocation;
963 /* While we sleep in clone, do not allow to change socket list */
965 netlink_lock_table();
967 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
968 do_one_broadcast(sk, &info);
972 netlink_unlock_table();
975 kfree_skb(info.skb2);
977 if (info.delivered) {
978 if (info.congested && (allocation & __GFP_WAIT))
987 struct netlink_set_err_data {
988 struct sock *exclude_sk;
994 static inline int do_one_set_err(struct sock *sk,
995 struct netlink_set_err_data *p)
997 struct netlink_sock *nlk = nlk_sk(sk);
999 if (sk == p->exclude_sk)
1002 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
1003 !test_bit(p->group - 1, nlk->groups))
1006 sk->sk_err = p->code;
1007 sk->sk_error_report(sk);
1012 void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1014 struct netlink_set_err_data info;
1015 struct hlist_node *node;
1018 info.exclude_sk = ssk;
1023 read_lock(&nl_table_lock);
1025 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1026 do_one_set_err(sk, &info);
1028 read_unlock(&nl_table_lock);
1031 /* must be called with netlink table grabbed */
1032 static void netlink_update_socket_mc(struct netlink_sock *nlk,
1036 int old, new = !!is_new, subscriptions;
1038 old = test_bit(group - 1, nlk->groups);
1039 subscriptions = nlk->subscriptions - old + new;
1041 __set_bit(group - 1, nlk->groups);
1043 __clear_bit(group - 1, nlk->groups);
1044 netlink_update_subscriptions(&nlk->sk, subscriptions);
1045 netlink_update_listeners(&nlk->sk);
1048 static int netlink_setsockopt(struct socket *sock, int level, int optname,
1049 char __user *optval, int optlen)
1051 struct sock *sk = sock->sk;
1052 struct netlink_sock *nlk = nlk_sk(sk);
1053 unsigned int val = 0;
1056 if (level != SOL_NETLINK)
1057 return -ENOPROTOOPT;
1059 if (optlen >= sizeof(int) &&
1060 get_user(val, (unsigned int __user *)optval))
1064 case NETLINK_PKTINFO:
1066 nlk->flags |= NETLINK_RECV_PKTINFO;
1068 nlk->flags &= ~NETLINK_RECV_PKTINFO;
1071 case NETLINK_ADD_MEMBERSHIP:
1072 case NETLINK_DROP_MEMBERSHIP: {
1073 if (!netlink_capable(sock, NL_NONROOT_RECV))
1075 err = netlink_realloc_groups(sk);
1078 if (!val || val - 1 >= nlk->ngroups)
1080 netlink_table_grab();
1081 netlink_update_socket_mc(nlk, val,
1082 optname == NETLINK_ADD_MEMBERSHIP);
1083 netlink_table_ungrab();
1093 static int netlink_getsockopt(struct socket *sock, int level, int optname,
1094 char __user *optval, int __user *optlen)
1096 struct sock *sk = sock->sk;
1097 struct netlink_sock *nlk = nlk_sk(sk);
1100 if (level != SOL_NETLINK)
1101 return -ENOPROTOOPT;
1103 if (get_user(len, optlen))
1109 case NETLINK_PKTINFO:
1110 if (len < sizeof(int))
1113 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
1114 if (put_user(len, optlen) ||
1115 put_user(val, optval))
1125 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1127 struct nl_pktinfo info;
1129 info.group = NETLINK_CB(skb).dst_group;
1130 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1133 static inline void netlink_rcv_wake(struct sock *sk)
1135 struct netlink_sock *nlk = nlk_sk(sk);
1137 if (skb_queue_empty(&sk->sk_receive_queue))
1138 clear_bit(0, &nlk->state);
1139 if (!test_bit(0, &nlk->state))
1140 wake_up_interruptible(&nlk->wait);
1143 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1144 struct msghdr *msg, size_t len)
1146 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1147 struct sock *sk = sock->sk;
1148 struct netlink_sock *nlk = nlk_sk(sk);
1149 struct sockaddr_nl *addr=msg->msg_name;
1152 struct sk_buff *skb;
1154 struct scm_cookie scm;
1156 if (msg->msg_flags&MSG_OOB)
1159 if (NULL == siocb->scm)
1161 err = scm_send(sock, msg, siocb->scm);
1165 if (msg->msg_namelen) {
1166 if (addr->nl_family != AF_NETLINK)
1168 dst_pid = addr->nl_pid;
1169 dst_group = ffs(addr->nl_groups);
1170 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
1173 dst_pid = nlk->dst_pid;
1174 dst_group = nlk->dst_group;
1178 err = netlink_autobind(sock);
1184 if (len > sk->sk_sndbuf - 32)
1187 skb = alloc_skb(len, GFP_KERNEL);
1191 NETLINK_CB(skb).pid = nlk->pid;
1192 NETLINK_CB(skb).dst_group = dst_group;
1193 NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context);
1194 selinux_get_task_sid(current, &(NETLINK_CB(skb).sid));
1195 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1197 /* What can I do? Netlink is asynchronous, so that
1198 we will have to save current capabilities to
1199 check them, when this message will be delivered
1200 to corresponding kernel module. --ANK (980802)
1204 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
1209 err = security_netlink_send(sk, skb);
1216 atomic_inc(&skb->users);
1217 netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
1219 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
1225 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1226 struct msghdr *msg, size_t len,
1229 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1230 struct scm_cookie scm;
1231 struct sock *sk = sock->sk;
1232 struct netlink_sock *nlk = nlk_sk(sk);
1233 int noblock = flags&MSG_DONTWAIT;
1235 struct sk_buff *skb;
1243 skb = skb_recv_datagram(sk,flags,noblock,&err);
1247 msg->msg_namelen = 0;
1251 msg->msg_flags |= MSG_TRUNC;
1255 skb_reset_transport_header(skb);
1256 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1258 if (msg->msg_name) {
1259 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
1260 addr->nl_family = AF_NETLINK;
1262 addr->nl_pid = NETLINK_CB(skb).pid;
1263 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1264 msg->msg_namelen = sizeof(*addr);
1267 if (nlk->flags & NETLINK_RECV_PKTINFO)
1268 netlink_cmsg_recv_pktinfo(msg, skb);
1270 if (NULL == siocb->scm) {
1271 memset(&scm, 0, sizeof(scm));
1274 siocb->scm->creds = *NETLINK_CREDS(skb);
1275 if (flags & MSG_TRUNC)
1277 skb_free_datagram(sk, skb);
1279 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
1282 scm_recv(sock, msg, siocb->scm, flags);
1284 netlink_rcv_wake(sk);
1285 return err ? : copied;
1288 static void netlink_data_ready(struct sock *sk, int len)
1290 struct netlink_sock *nlk = nlk_sk(sk);
1292 if (nlk->data_ready)
1293 nlk->data_ready(sk, len);
1294 netlink_rcv_wake(sk);
1298 * We export these functions to other modules. They provide a
1299 * complete set of kernel non-blocking support for message
1304 netlink_kernel_create(int unit, unsigned int groups,
1305 void (*input)(struct sock *sk, int len),
1306 struct mutex *cb_mutex, struct module *module)
1308 struct socket *sock;
1310 struct netlink_sock *nlk;
1311 unsigned long *listeners = NULL;
1315 if (unit<0 || unit>=MAX_LINKS)
1318 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1321 if (__netlink_create(sock, cb_mutex, unit) < 0)
1322 goto out_sock_release;
1327 listeners = kzalloc(NLGRPSZ(groups), GFP_KERNEL);
1329 goto out_sock_release;
1332 sk->sk_data_ready = netlink_data_ready;
1334 nlk_sk(sk)->data_ready = input;
1336 if (netlink_insert(sk, 0))
1337 goto out_sock_release;
1340 nlk->flags |= NETLINK_KERNEL_SOCKET;
1342 netlink_table_grab();
1343 nl_table[unit].groups = groups;
1344 nl_table[unit].listeners = listeners;
1345 nl_table[unit].cb_mutex = cb_mutex;
1346 nl_table[unit].module = module;
1347 nl_table[unit].registered = 1;
1348 netlink_table_ungrab();
1359 * netlink_change_ngroups - change number of multicast groups
1361 * This changes the number of multicast groups that are available
1362 * on a certain netlink family. Note that it is not possible to
1363 * change the number of groups to below 32. Also note that it does
1364 * not implicitly call netlink_clear_multicast_users() when the
1365 * number of groups is reduced.
1367 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
1368 * @groups: The new number of groups.
1370 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
1372 unsigned long *listeners, *old = NULL;
1373 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1379 netlink_table_grab();
1380 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
1381 listeners = kzalloc(NLGRPSZ(groups), GFP_ATOMIC);
1386 old = tbl->listeners;
1387 memcpy(listeners, old, NLGRPSZ(tbl->groups));
1388 rcu_assign_pointer(tbl->listeners, listeners);
1390 tbl->groups = groups;
1393 netlink_table_ungrab();
1398 EXPORT_SYMBOL(netlink_change_ngroups);
1401 * netlink_clear_multicast_users - kick off multicast listeners
1403 * This function removes all listeners from the given group.
1404 * @ksk: The kernel netlink socket, as returned by
1405 * netlink_kernel_create().
1406 * @group: The multicast group to clear.
1408 void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
1411 struct hlist_node *node;
1412 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
1414 netlink_table_grab();
1416 sk_for_each_bound(sk, node, &tbl->mc_list)
1417 netlink_update_socket_mc(nlk_sk(sk), group, 0);
1419 netlink_table_ungrab();
1421 EXPORT_SYMBOL(netlink_clear_multicast_users);
1423 void netlink_set_nonroot(int protocol, unsigned int flags)
1425 if ((unsigned int)protocol < MAX_LINKS)
1426 nl_table[protocol].nl_nonroot = flags;
1429 static void netlink_destroy_callback(struct netlink_callback *cb)
1437 * It looks a bit ugly.
1438 * It would be better to create kernel thread.
1441 static int netlink_dump(struct sock *sk)
1443 struct netlink_sock *nlk = nlk_sk(sk);
1444 struct netlink_callback *cb;
1445 struct sk_buff *skb;
1446 struct nlmsghdr *nlh;
1447 int len, err = -ENOBUFS;
1449 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
1453 mutex_lock(nlk->cb_mutex);
1461 len = cb->dump(skb, cb);
1464 mutex_unlock(nlk->cb_mutex);
1465 skb_queue_tail(&sk->sk_receive_queue, skb);
1466 sk->sk_data_ready(sk, len);
1470 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
1474 memcpy(nlmsg_data(nlh), &len, sizeof(len));
1476 skb_queue_tail(&sk->sk_receive_queue, skb);
1477 sk->sk_data_ready(sk, skb->len);
1482 mutex_unlock(nlk->cb_mutex);
1484 netlink_destroy_callback(cb);
1488 mutex_unlock(nlk->cb_mutex);
1494 int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1495 struct nlmsghdr *nlh,
1496 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
1497 int (*done)(struct netlink_callback*))
1499 struct netlink_callback *cb;
1501 struct netlink_sock *nlk;
1503 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1510 atomic_inc(&skb->users);
1513 sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
1515 netlink_destroy_callback(cb);
1516 return -ECONNREFUSED;
1519 /* A dump is in progress... */
1520 mutex_lock(nlk->cb_mutex);
1522 mutex_unlock(nlk->cb_mutex);
1523 netlink_destroy_callback(cb);
1528 mutex_unlock(nlk->cb_mutex);
1533 /* We successfully started a dump, by returning -EINTR we
1534 * signal the queue mangement to interrupt processing of
1535 * any netlink messages so userspace gets a chance to read
1540 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1542 struct sk_buff *skb;
1543 struct nlmsghdr *rep;
1544 struct nlmsgerr *errmsg;
1545 size_t payload = sizeof(*errmsg);
1547 /* error messages get the original request appened */
1549 payload += nlmsg_len(nlh);
1551 skb = nlmsg_new(payload, GFP_KERNEL);
1555 sk = netlink_lookup(in_skb->sk->sk_protocol,
1556 NETLINK_CB(in_skb).pid);
1558 sk->sk_err = ENOBUFS;
1559 sk->sk_error_report(sk);
1565 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1566 NLMSG_ERROR, sizeof(struct nlmsgerr), 0);
1567 errmsg = nlmsg_data(rep);
1568 errmsg->error = err;
1569 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
1570 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1573 static int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1576 struct nlmsghdr *nlh;
1579 while (skb->len >= nlmsg_total_size(0)) {
1580 nlh = nlmsg_hdr(skb);
1583 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
1586 /* Only requests are handled by the kernel */
1587 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
1590 /* Skip control messages */
1591 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
1595 if (err == -EINTR) {
1596 /* Not an error, but we interrupt processing */
1597 netlink_queue_skip(nlh, skb);
1601 if (nlh->nlmsg_flags & NLM_F_ACK || err)
1602 netlink_ack(skb, nlh, err);
1604 netlink_queue_skip(nlh, skb);
1611 * nelink_run_queue - Process netlink receive queue.
1612 * @sk: Netlink socket containing the queue
1613 * @qlen: Place to store queue length upon entry
1614 * @cb: Callback function invoked for each netlink message found
1616 * Processes as much as there was in the queue upon entry and invokes
1617 * a callback function for each netlink message found. The callback
1618 * function may refuse a message by returning a negative error code
1619 * but setting the error pointer to 0 in which case this function
1620 * returns with a qlen != 0.
1622 * qlen must be initialized to 0 before the initial entry, afterwards
1623 * the function may be called repeatedly until qlen reaches 0.
1625 * The callback function may return -EINTR to signal that processing
1626 * of netlink messages shall be interrupted. In this case the message
1627 * currently being processed will NOT be requeued onto the receive
1630 void netlink_run_queue(struct sock *sk, unsigned int *qlen,
1631 int (*cb)(struct sk_buff *, struct nlmsghdr *))
1633 struct sk_buff *skb;
1635 if (!*qlen || *qlen > skb_queue_len(&sk->sk_receive_queue))
1636 *qlen = skb_queue_len(&sk->sk_receive_queue);
1638 for (; *qlen; (*qlen)--) {
1639 skb = skb_dequeue(&sk->sk_receive_queue);
1640 if (netlink_rcv_skb(skb, cb)) {
1642 skb_queue_head(&sk->sk_receive_queue, skb);
1655 * netlink_queue_skip - Skip netlink message while processing queue.
1656 * @nlh: Netlink message to be skipped
1657 * @skb: Socket buffer containing the netlink messages.
1659 * Pulls the given netlink message off the socket buffer so the next
1660 * call to netlink_queue_run() will not reconsider the message.
1662 static void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb)
1664 int msglen = NLMSG_ALIGN(nlh->nlmsg_len);
1666 if (msglen > skb->len)
1669 skb_pull(skb, msglen);
1673 * nlmsg_notify - send a notification netlink message
1674 * @sk: netlink socket to use
1675 * @skb: notification message
1676 * @pid: destination netlink pid for reports or 0
1677 * @group: destination multicast group or 0
1678 * @report: 1 to report back, 0 to disable
1679 * @flags: allocation flags
1681 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid,
1682 unsigned int group, int report, gfp_t flags)
1687 int exclude_pid = 0;
1690 atomic_inc(&skb->users);
1694 /* errors reported via destination sk->sk_err */
1695 nlmsg_multicast(sk, skb, exclude_pid, group, flags);
1699 err = nlmsg_unicast(sk, skb, pid);
1704 #ifdef CONFIG_PROC_FS
1705 struct nl_seq_iter {
1710 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1712 struct nl_seq_iter *iter = seq->private;
1715 struct hlist_node *node;
1718 for (i=0; i<MAX_LINKS; i++) {
1719 struct nl_pid_hash *hash = &nl_table[i].hash;
1721 for (j = 0; j <= hash->mask; j++) {
1722 sk_for_each(s, node, &hash->table[j]) {
1735 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1737 read_lock(&nl_table_lock);
1738 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1741 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1744 struct nl_seq_iter *iter;
1749 if (v == SEQ_START_TOKEN)
1750 return netlink_seq_socket_idx(seq, 0);
1756 iter = seq->private;
1758 j = iter->hash_idx + 1;
1761 struct nl_pid_hash *hash = &nl_table[i].hash;
1763 for (; j <= hash->mask; j++) {
1764 s = sk_head(&hash->table[j]);
1773 } while (++i < MAX_LINKS);
1778 static void netlink_seq_stop(struct seq_file *seq, void *v)
1780 read_unlock(&nl_table_lock);
1784 static int netlink_seq_show(struct seq_file *seq, void *v)
1786 if (v == SEQ_START_TOKEN)
1788 "sk Eth Pid Groups "
1789 "Rmem Wmem Dump Locks\n");
1792 struct netlink_sock *nlk = nlk_sk(s);
1794 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1798 nlk->groups ? (u32)nlk->groups[0] : 0,
1799 atomic_read(&s->sk_rmem_alloc),
1800 atomic_read(&s->sk_wmem_alloc),
1802 atomic_read(&s->sk_refcnt)
1809 static const struct seq_operations netlink_seq_ops = {
1810 .start = netlink_seq_start,
1811 .next = netlink_seq_next,
1812 .stop = netlink_seq_stop,
1813 .show = netlink_seq_show,
1817 static int netlink_seq_open(struct inode *inode, struct file *file)
1819 struct seq_file *seq;
1820 struct nl_seq_iter *iter;
1823 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1827 err = seq_open(file, &netlink_seq_ops);
1833 seq = file->private_data;
1834 seq->private = iter;
1838 static const struct file_operations netlink_seq_fops = {
1839 .owner = THIS_MODULE,
1840 .open = netlink_seq_open,
1842 .llseek = seq_lseek,
1843 .release = seq_release_private,
1848 int netlink_register_notifier(struct notifier_block *nb)
1850 return atomic_notifier_chain_register(&netlink_chain, nb);
1853 int netlink_unregister_notifier(struct notifier_block *nb)
1855 return atomic_notifier_chain_unregister(&netlink_chain, nb);
1858 static const struct proto_ops netlink_ops = {
1859 .family = PF_NETLINK,
1860 .owner = THIS_MODULE,
1861 .release = netlink_release,
1862 .bind = netlink_bind,
1863 .connect = netlink_connect,
1864 .socketpair = sock_no_socketpair,
1865 .accept = sock_no_accept,
1866 .getname = netlink_getname,
1867 .poll = datagram_poll,
1868 .ioctl = sock_no_ioctl,
1869 .listen = sock_no_listen,
1870 .shutdown = sock_no_shutdown,
1871 .setsockopt = netlink_setsockopt,
1872 .getsockopt = netlink_getsockopt,
1873 .sendmsg = netlink_sendmsg,
1874 .recvmsg = netlink_recvmsg,
1875 .mmap = sock_no_mmap,
1876 .sendpage = sock_no_sendpage,
1879 static struct net_proto_family netlink_family_ops = {
1880 .family = PF_NETLINK,
1881 .create = netlink_create,
1882 .owner = THIS_MODULE, /* for consistency 8) */
1885 static int __init netlink_proto_init(void)
1887 struct sk_buff *dummy_skb;
1891 int err = proto_register(&netlink_proto, 0);
1896 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb));
1898 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
1902 if (num_physpages >= (128 * 1024))
1903 max = num_physpages >> (21 - PAGE_SHIFT);
1905 max = num_physpages >> (23 - PAGE_SHIFT);
1907 order = get_bitmask_order(max) - 1 + PAGE_SHIFT;
1908 max = (1UL << order) / sizeof(struct hlist_head);
1909 order = get_bitmask_order(max > UINT_MAX ? UINT_MAX : max) - 1;
1911 for (i = 0; i < MAX_LINKS; i++) {
1912 struct nl_pid_hash *hash = &nl_table[i].hash;
1914 hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table));
1917 nl_pid_hash_free(nl_table[i].hash.table,
1918 1 * sizeof(*hash->table));
1922 memset(hash->table, 0, 1 * sizeof(*hash->table));
1923 hash->max_shift = order;
1926 hash->rehash_time = jiffies;
1929 sock_register(&netlink_family_ops);
1930 #ifdef CONFIG_PROC_FS
1931 proc_net_fops_create(&init_net, "netlink", 0, &netlink_seq_fops);
1933 /* The netlink device handler may be needed early. */
1938 panic("netlink_init: Cannot allocate nl_table\n");
1941 core_initcall(netlink_proto_init);
1943 EXPORT_SYMBOL(netlink_ack);
1944 EXPORT_SYMBOL(netlink_run_queue);
1945 EXPORT_SYMBOL(netlink_broadcast);
1946 EXPORT_SYMBOL(netlink_dump_start);
1947 EXPORT_SYMBOL(netlink_kernel_create);
1948 EXPORT_SYMBOL(netlink_register_notifier);
1949 EXPORT_SYMBOL(netlink_set_nonroot);
1950 EXPORT_SYMBOL(netlink_unicast);
1951 EXPORT_SYMBOL(netlink_unregister_notifier);
1952 EXPORT_SYMBOL(nlmsg_notify);