2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
24 #include <linux/module.h>
26 #include <linux/capability.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/errno.h>
32 #include <linux/string.h>
33 #include <linux/stat.h>
34 #include <linux/socket.h>
36 #include <linux/fcntl.h>
37 #include <linux/termios.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
41 #include <linux/slab.h>
42 #include <asm/uaccess.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/proc_fs.h>
47 #include <linux/seq_file.h>
48 #include <linux/notifier.h>
49 #include <linux/security.h>
50 #include <linux/jhash.h>
51 #include <linux/jiffies.h>
52 #include <linux/random.h>
53 #include <linux/bitops.h>
55 #include <linux/types.h>
56 #include <linux/audit.h>
57 #include <linux/selinux.h>
58 #include <linux/mutex.h>
60 #include <net/net_namespace.h>
63 #include <net/netlink.h>
65 #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
66 #define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long))
69 /* struct sock has to be the first member of netlink_sock */
77 unsigned long *groups;
79 wait_queue_head_t wait;
80 struct netlink_callback *cb;
81 struct mutex *cb_mutex;
82 struct mutex cb_def_mutex;
83 void (*data_ready)(struct sock *sk, int bytes);
84 struct module *module;
87 #define NETLINK_KERNEL_SOCKET 0x1
88 #define NETLINK_RECV_PKTINFO 0x2
90 static inline struct netlink_sock *nlk_sk(struct sock *sk)
92 return container_of(sk, struct netlink_sock, sk);
96 struct hlist_head *table;
97 unsigned long rehash_time;
102 unsigned int entries;
103 unsigned int max_shift;
108 struct netlink_table {
109 struct nl_pid_hash hash;
110 struct hlist_head mc_list;
111 unsigned long *listeners;
112 unsigned int nl_nonroot;
114 struct mutex *cb_mutex;
115 struct module *module;
119 static struct netlink_table *nl_table;
121 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
123 static int netlink_dump(struct sock *sk);
124 static void netlink_destroy_callback(struct netlink_callback *cb);
125 static void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb);
127 static DEFINE_RWLOCK(nl_table_lock);
128 static atomic_t nl_table_users = ATOMIC_INIT(0);
130 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
132 static u32 netlink_group_mask(u32 group)
134 return group ? 1 << (group - 1) : 0;
137 static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
139 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
142 static void netlink_sock_destruct(struct sock *sk)
144 struct netlink_sock *nlk = nlk_sk(sk);
148 nlk->cb->done(nlk->cb);
149 netlink_destroy_callback(nlk->cb);
152 skb_queue_purge(&sk->sk_receive_queue);
154 if (!sock_flag(sk, SOCK_DEAD)) {
155 printk("Freeing alive netlink socket %p\n", sk);
158 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
159 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
160 BUG_TRAP(!nlk_sk(sk)->groups);
163 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
164 * Look, when several writers sleep and reader wakes them up, all but one
165 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
166 * this, _but_ remember, it adds useless work on UP machines.
169 static void netlink_table_grab(void)
171 write_lock_irq(&nl_table_lock);
173 if (atomic_read(&nl_table_users)) {
174 DECLARE_WAITQUEUE(wait, current);
176 add_wait_queue_exclusive(&nl_table_wait, &wait);
178 set_current_state(TASK_UNINTERRUPTIBLE);
179 if (atomic_read(&nl_table_users) == 0)
181 write_unlock_irq(&nl_table_lock);
183 write_lock_irq(&nl_table_lock);
186 __set_current_state(TASK_RUNNING);
187 remove_wait_queue(&nl_table_wait, &wait);
191 static __inline__ void netlink_table_ungrab(void)
193 write_unlock_irq(&nl_table_lock);
194 wake_up(&nl_table_wait);
197 static __inline__ void
198 netlink_lock_table(void)
200 /* read_lock() synchronizes us to netlink_table_grab */
202 read_lock(&nl_table_lock);
203 atomic_inc(&nl_table_users);
204 read_unlock(&nl_table_lock);
207 static __inline__ void
208 netlink_unlock_table(void)
210 if (atomic_dec_and_test(&nl_table_users))
211 wake_up(&nl_table_wait);
214 static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
216 struct nl_pid_hash *hash = &nl_table[protocol].hash;
217 struct hlist_head *head;
219 struct hlist_node *node;
221 read_lock(&nl_table_lock);
222 head = nl_pid_hashfn(hash, pid);
223 sk_for_each(sk, node, head) {
224 if (nlk_sk(sk)->pid == pid) {
231 read_unlock(&nl_table_lock);
235 static inline struct hlist_head *nl_pid_hash_alloc(size_t size)
237 if (size <= PAGE_SIZE)
238 return kmalloc(size, GFP_ATOMIC);
240 return (struct hlist_head *)
241 __get_free_pages(GFP_ATOMIC, get_order(size));
244 static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
246 if (size <= PAGE_SIZE)
249 free_pages((unsigned long)table, get_order(size));
252 static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
254 unsigned int omask, mask, shift;
256 struct hlist_head *otable, *table;
259 omask = mask = hash->mask;
260 osize = size = (mask + 1) * sizeof(*table);
264 if (++shift > hash->max_shift)
270 table = nl_pid_hash_alloc(size);
274 memset(table, 0, size);
275 otable = hash->table;
279 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
281 for (i = 0; i <= omask; i++) {
283 struct hlist_node *node, *tmp;
285 sk_for_each_safe(sk, node, tmp, &otable[i])
286 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
289 nl_pid_hash_free(otable, osize);
290 hash->rehash_time = jiffies + 10 * 60 * HZ;
294 static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
296 int avg = hash->entries >> hash->shift;
298 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
301 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
302 nl_pid_hash_rehash(hash, 0);
309 static const struct proto_ops netlink_ops;
312 netlink_update_listeners(struct sock *sk)
314 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
315 struct hlist_node *node;
319 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
321 sk_for_each_bound(sk, node, &tbl->mc_list) {
322 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
323 mask |= nlk_sk(sk)->groups[i];
325 tbl->listeners[i] = mask;
327 /* this function is only called with the netlink table "grabbed", which
328 * makes sure updates are visible before bind or setsockopt return. */
331 static int netlink_insert(struct sock *sk, u32 pid)
333 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
334 struct hlist_head *head;
335 int err = -EADDRINUSE;
337 struct hlist_node *node;
340 netlink_table_grab();
341 head = nl_pid_hashfn(hash, pid);
343 sk_for_each(osk, node, head) {
344 if (nlk_sk(osk)->pid == pid)
356 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
359 if (len && nl_pid_hash_dilute(hash, len))
360 head = nl_pid_hashfn(hash, pid);
362 nlk_sk(sk)->pid = pid;
363 sk_add_node(sk, head);
367 netlink_table_ungrab();
371 static void netlink_remove(struct sock *sk)
373 netlink_table_grab();
374 if (sk_del_node_init(sk))
375 nl_table[sk->sk_protocol].hash.entries--;
376 if (nlk_sk(sk)->subscriptions)
377 __sk_del_bind_node(sk);
378 netlink_table_ungrab();
381 static struct proto netlink_proto = {
383 .owner = THIS_MODULE,
384 .obj_size = sizeof(struct netlink_sock),
387 static int __netlink_create(struct net *net, struct socket *sock,
388 struct mutex *cb_mutex, int protocol)
391 struct netlink_sock *nlk;
393 sock->ops = &netlink_ops;
395 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, 1);
399 sock_init_data(sock, sk);
403 nlk->cb_mutex = cb_mutex;
405 nlk->cb_mutex = &nlk->cb_def_mutex;
406 mutex_init(nlk->cb_mutex);
408 init_waitqueue_head(&nlk->wait);
410 sk->sk_destruct = netlink_sock_destruct;
411 sk->sk_protocol = protocol;
415 static int netlink_create(struct net *net, struct socket *sock, int protocol)
417 struct module *module = NULL;
418 struct mutex *cb_mutex;
419 struct netlink_sock *nlk;
422 if (net != &init_net)
423 return -EAFNOSUPPORT;
425 sock->state = SS_UNCONNECTED;
427 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
428 return -ESOCKTNOSUPPORT;
430 if (protocol<0 || protocol >= MAX_LINKS)
431 return -EPROTONOSUPPORT;
433 netlink_lock_table();
435 if (!nl_table[protocol].registered) {
436 netlink_unlock_table();
437 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
438 netlink_lock_table();
441 if (nl_table[protocol].registered &&
442 try_module_get(nl_table[protocol].module))
443 module = nl_table[protocol].module;
444 cb_mutex = nl_table[protocol].cb_mutex;
445 netlink_unlock_table();
447 if ((err = __netlink_create(net, sock, cb_mutex, protocol)) < 0)
450 nlk = nlk_sk(sock->sk);
451 nlk->module = module;
460 static int netlink_release(struct socket *sock)
462 struct sock *sk = sock->sk;
463 struct netlink_sock *nlk;
473 * OK. Socket is unlinked, any packets that arrive now
478 wake_up_interruptible_all(&nlk->wait);
480 skb_queue_purge(&sk->sk_write_queue);
482 if (nlk->pid && !nlk->subscriptions) {
483 struct netlink_notify n = {
484 .protocol = sk->sk_protocol,
487 atomic_notifier_call_chain(&netlink_chain,
488 NETLINK_URELEASE, &n);
491 module_put(nlk->module);
493 netlink_table_grab();
494 if (nlk->flags & NETLINK_KERNEL_SOCKET) {
495 kfree(nl_table[sk->sk_protocol].listeners);
496 nl_table[sk->sk_protocol].module = NULL;
497 nl_table[sk->sk_protocol].registered = 0;
498 } else if (nlk->subscriptions)
499 netlink_update_listeners(sk);
500 netlink_table_ungrab();
509 static int netlink_autobind(struct socket *sock)
511 struct sock *sk = sock->sk;
512 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
513 struct hlist_head *head;
515 struct hlist_node *node;
516 s32 pid = current->tgid;
518 static s32 rover = -4097;
522 netlink_table_grab();
523 head = nl_pid_hashfn(hash, pid);
524 sk_for_each(osk, node, head) {
525 if (nlk_sk(osk)->pid == pid) {
526 /* Bind collision, search negative pid values. */
530 netlink_table_ungrab();
534 netlink_table_ungrab();
536 err = netlink_insert(sk, pid);
537 if (err == -EADDRINUSE)
540 /* If 2 threads race to autobind, that is fine. */
547 static inline int netlink_capable(struct socket *sock, unsigned int flag)
549 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
550 capable(CAP_NET_ADMIN);
554 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
556 struct netlink_sock *nlk = nlk_sk(sk);
558 if (nlk->subscriptions && !subscriptions)
559 __sk_del_bind_node(sk);
560 else if (!nlk->subscriptions && subscriptions)
561 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
562 nlk->subscriptions = subscriptions;
565 static int netlink_realloc_groups(struct sock *sk)
567 struct netlink_sock *nlk = nlk_sk(sk);
569 unsigned long *new_groups;
572 netlink_table_grab();
574 groups = nl_table[sk->sk_protocol].groups;
575 if (!nl_table[sk->sk_protocol].registered) {
580 if (nlk->ngroups >= groups)
583 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
584 if (new_groups == NULL) {
588 memset((char*)new_groups + NLGRPSZ(nlk->ngroups), 0,
589 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
591 nlk->groups = new_groups;
592 nlk->ngroups = groups;
594 netlink_table_ungrab();
598 static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
600 struct sock *sk = sock->sk;
601 struct netlink_sock *nlk = nlk_sk(sk);
602 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
605 if (nladdr->nl_family != AF_NETLINK)
608 /* Only superuser is allowed to listen multicasts */
609 if (nladdr->nl_groups) {
610 if (!netlink_capable(sock, NL_NONROOT_RECV))
612 err = netlink_realloc_groups(sk);
618 if (nladdr->nl_pid != nlk->pid)
621 err = nladdr->nl_pid ?
622 netlink_insert(sk, nladdr->nl_pid) :
623 netlink_autobind(sock);
628 if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
631 netlink_table_grab();
632 netlink_update_subscriptions(sk, nlk->subscriptions +
633 hweight32(nladdr->nl_groups) -
634 hweight32(nlk->groups[0]));
635 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
636 netlink_update_listeners(sk);
637 netlink_table_ungrab();
642 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
646 struct sock *sk = sock->sk;
647 struct netlink_sock *nlk = nlk_sk(sk);
648 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
650 if (addr->sa_family == AF_UNSPEC) {
651 sk->sk_state = NETLINK_UNCONNECTED;
656 if (addr->sa_family != AF_NETLINK)
659 /* Only superuser is allowed to send multicasts */
660 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
664 err = netlink_autobind(sock);
667 sk->sk_state = NETLINK_CONNECTED;
668 nlk->dst_pid = nladdr->nl_pid;
669 nlk->dst_group = ffs(nladdr->nl_groups);
675 static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
677 struct sock *sk = sock->sk;
678 struct netlink_sock *nlk = nlk_sk(sk);
679 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
681 nladdr->nl_family = AF_NETLINK;
683 *addr_len = sizeof(*nladdr);
686 nladdr->nl_pid = nlk->dst_pid;
687 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
689 nladdr->nl_pid = nlk->pid;
690 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
695 static void netlink_overrun(struct sock *sk)
697 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
698 sk->sk_err = ENOBUFS;
699 sk->sk_error_report(sk);
703 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
705 int protocol = ssk->sk_protocol;
707 struct netlink_sock *nlk;
709 sock = netlink_lookup(protocol, pid);
711 return ERR_PTR(-ECONNREFUSED);
713 /* Don't bother queuing skb if kernel socket has no input function */
715 if ((nlk->pid == 0 && !nlk->data_ready) ||
716 (sock->sk_state == NETLINK_CONNECTED &&
717 nlk->dst_pid != nlk_sk(ssk)->pid)) {
719 return ERR_PTR(-ECONNREFUSED);
724 struct sock *netlink_getsockbyfilp(struct file *filp)
726 struct inode *inode = filp->f_path.dentry->d_inode;
729 if (!S_ISSOCK(inode->i_mode))
730 return ERR_PTR(-ENOTSOCK);
732 sock = SOCKET_I(inode)->sk;
733 if (sock->sk_family != AF_NETLINK)
734 return ERR_PTR(-EINVAL);
741 * Attach a skb to a netlink socket.
742 * The caller must hold a reference to the destination socket. On error, the
743 * reference is dropped. The skb is not send to the destination, just all
744 * all error checks are performed and memory in the queue is reserved.
746 * < 0: error. skb freed, reference to sock dropped.
748 * 1: repeat lookup - reference dropped while waiting for socket memory.
750 int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
751 long timeo, struct sock *ssk)
753 struct netlink_sock *nlk;
757 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
758 test_bit(0, &nlk->state)) {
759 DECLARE_WAITQUEUE(wait, current);
761 if (!ssk || nlk_sk(ssk)->pid == 0)
768 __set_current_state(TASK_INTERRUPTIBLE);
769 add_wait_queue(&nlk->wait, &wait);
771 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
772 test_bit(0, &nlk->state)) &&
773 !sock_flag(sk, SOCK_DEAD))
774 timeo = schedule_timeout(timeo);
776 __set_current_state(TASK_RUNNING);
777 remove_wait_queue(&nlk->wait, &wait);
780 if (signal_pending(current)) {
782 return sock_intr_errno(timeo);
786 skb_set_owner_r(skb, sk);
790 int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
794 skb_queue_tail(&sk->sk_receive_queue, skb);
795 sk->sk_data_ready(sk, len);
800 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
806 static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
813 delta = skb->end - skb->tail;
814 if (delta * 2 < skb->truesize)
817 if (skb_shared(skb)) {
818 struct sk_buff *nskb = skb_clone(skb, allocation);
825 if (!pskb_expand_head(skb, 0, -delta, allocation))
826 skb->truesize -= delta;
831 int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
837 skb = netlink_trim(skb, gfp_any());
839 timeo = sock_sndtimeo(ssk, nonblock);
841 sk = netlink_getsockbypid(ssk, pid);
846 err = netlink_attachskb(sk, skb, nonblock, timeo, ssk);
852 return netlink_sendskb(sk, skb, ssk->sk_protocol);
855 int netlink_has_listeners(struct sock *sk, unsigned int group)
858 unsigned long *listeners;
860 BUG_ON(!(nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET));
863 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
865 if (group - 1 < nl_table[sk->sk_protocol].groups)
866 res = test_bit(group - 1, listeners);
872 EXPORT_SYMBOL_GPL(netlink_has_listeners);
874 static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
876 struct netlink_sock *nlk = nlk_sk(sk);
878 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
879 !test_bit(0, &nlk->state)) {
880 skb_set_owner_r(skb, sk);
881 skb_queue_tail(&sk->sk_receive_queue, skb);
882 sk->sk_data_ready(sk, skb->len);
883 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
888 struct netlink_broadcast_data {
889 struct sock *exclude_sk;
896 struct sk_buff *skb, *skb2;
899 static inline int do_one_broadcast(struct sock *sk,
900 struct netlink_broadcast_data *p)
902 struct netlink_sock *nlk = nlk_sk(sk);
905 if (p->exclude_sk == sk)
908 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
909 !test_bit(p->group - 1, nlk->groups))
918 if (p->skb2 == NULL) {
919 if (skb_shared(p->skb)) {
920 p->skb2 = skb_clone(p->skb, p->allocation);
922 p->skb2 = skb_get(p->skb);
924 * skb ownership may have been set when
925 * delivered to a previous socket.
930 if (p->skb2 == NULL) {
932 /* Clone failed. Notify ALL listeners. */
934 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
947 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
948 u32 group, gfp_t allocation)
950 struct netlink_broadcast_data info;
951 struct hlist_node *node;
954 skb = netlink_trim(skb, allocation);
956 info.exclude_sk = ssk;
962 info.allocation = allocation;
966 /* While we sleep in clone, do not allow to change socket list */
968 netlink_lock_table();
970 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
971 do_one_broadcast(sk, &info);
975 netlink_unlock_table();
978 kfree_skb(info.skb2);
980 if (info.delivered) {
981 if (info.congested && (allocation & __GFP_WAIT))
990 struct netlink_set_err_data {
991 struct sock *exclude_sk;
997 static inline int do_one_set_err(struct sock *sk,
998 struct netlink_set_err_data *p)
1000 struct netlink_sock *nlk = nlk_sk(sk);
1002 if (sk == p->exclude_sk)
1005 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
1006 !test_bit(p->group - 1, nlk->groups))
1009 sk->sk_err = p->code;
1010 sk->sk_error_report(sk);
1015 void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1017 struct netlink_set_err_data info;
1018 struct hlist_node *node;
1021 info.exclude_sk = ssk;
1026 read_lock(&nl_table_lock);
1028 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1029 do_one_set_err(sk, &info);
1031 read_unlock(&nl_table_lock);
1034 /* must be called with netlink table grabbed */
1035 static void netlink_update_socket_mc(struct netlink_sock *nlk,
1039 int old, new = !!is_new, subscriptions;
1041 old = test_bit(group - 1, nlk->groups);
1042 subscriptions = nlk->subscriptions - old + new;
1044 __set_bit(group - 1, nlk->groups);
1046 __clear_bit(group - 1, nlk->groups);
1047 netlink_update_subscriptions(&nlk->sk, subscriptions);
1048 netlink_update_listeners(&nlk->sk);
1051 static int netlink_setsockopt(struct socket *sock, int level, int optname,
1052 char __user *optval, int optlen)
1054 struct sock *sk = sock->sk;
1055 struct netlink_sock *nlk = nlk_sk(sk);
1056 unsigned int val = 0;
1059 if (level != SOL_NETLINK)
1060 return -ENOPROTOOPT;
1062 if (optlen >= sizeof(int) &&
1063 get_user(val, (unsigned int __user *)optval))
1067 case NETLINK_PKTINFO:
1069 nlk->flags |= NETLINK_RECV_PKTINFO;
1071 nlk->flags &= ~NETLINK_RECV_PKTINFO;
1074 case NETLINK_ADD_MEMBERSHIP:
1075 case NETLINK_DROP_MEMBERSHIP: {
1076 if (!netlink_capable(sock, NL_NONROOT_RECV))
1078 err = netlink_realloc_groups(sk);
1081 if (!val || val - 1 >= nlk->ngroups)
1083 netlink_table_grab();
1084 netlink_update_socket_mc(nlk, val,
1085 optname == NETLINK_ADD_MEMBERSHIP);
1086 netlink_table_ungrab();
1096 static int netlink_getsockopt(struct socket *sock, int level, int optname,
1097 char __user *optval, int __user *optlen)
1099 struct sock *sk = sock->sk;
1100 struct netlink_sock *nlk = nlk_sk(sk);
1103 if (level != SOL_NETLINK)
1104 return -ENOPROTOOPT;
1106 if (get_user(len, optlen))
1112 case NETLINK_PKTINFO:
1113 if (len < sizeof(int))
1116 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
1117 if (put_user(len, optlen) ||
1118 put_user(val, optval))
1128 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1130 struct nl_pktinfo info;
1132 info.group = NETLINK_CB(skb).dst_group;
1133 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1136 static inline void netlink_rcv_wake(struct sock *sk)
1138 struct netlink_sock *nlk = nlk_sk(sk);
1140 if (skb_queue_empty(&sk->sk_receive_queue))
1141 clear_bit(0, &nlk->state);
1142 if (!test_bit(0, &nlk->state))
1143 wake_up_interruptible(&nlk->wait);
1146 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1147 struct msghdr *msg, size_t len)
1149 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1150 struct sock *sk = sock->sk;
1151 struct netlink_sock *nlk = nlk_sk(sk);
1152 struct sockaddr_nl *addr=msg->msg_name;
1155 struct sk_buff *skb;
1157 struct scm_cookie scm;
1159 if (msg->msg_flags&MSG_OOB)
1162 if (NULL == siocb->scm)
1164 err = scm_send(sock, msg, siocb->scm);
1168 if (msg->msg_namelen) {
1169 if (addr->nl_family != AF_NETLINK)
1171 dst_pid = addr->nl_pid;
1172 dst_group = ffs(addr->nl_groups);
1173 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
1176 dst_pid = nlk->dst_pid;
1177 dst_group = nlk->dst_group;
1181 err = netlink_autobind(sock);
1187 if (len > sk->sk_sndbuf - 32)
1190 skb = alloc_skb(len, GFP_KERNEL);
1194 NETLINK_CB(skb).pid = nlk->pid;
1195 NETLINK_CB(skb).dst_group = dst_group;
1196 NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context);
1197 selinux_get_task_sid(current, &(NETLINK_CB(skb).sid));
1198 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1200 /* What can I do? Netlink is asynchronous, so that
1201 we will have to save current capabilities to
1202 check them, when this message will be delivered
1203 to corresponding kernel module. --ANK (980802)
1207 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
1212 err = security_netlink_send(sk, skb);
1219 atomic_inc(&skb->users);
1220 netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
1222 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
1228 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1229 struct msghdr *msg, size_t len,
1232 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1233 struct scm_cookie scm;
1234 struct sock *sk = sock->sk;
1235 struct netlink_sock *nlk = nlk_sk(sk);
1236 int noblock = flags&MSG_DONTWAIT;
1238 struct sk_buff *skb;
1246 skb = skb_recv_datagram(sk,flags,noblock,&err);
1250 msg->msg_namelen = 0;
1254 msg->msg_flags |= MSG_TRUNC;
1258 skb_reset_transport_header(skb);
1259 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1261 if (msg->msg_name) {
1262 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
1263 addr->nl_family = AF_NETLINK;
1265 addr->nl_pid = NETLINK_CB(skb).pid;
1266 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1267 msg->msg_namelen = sizeof(*addr);
1270 if (nlk->flags & NETLINK_RECV_PKTINFO)
1271 netlink_cmsg_recv_pktinfo(msg, skb);
1273 if (NULL == siocb->scm) {
1274 memset(&scm, 0, sizeof(scm));
1277 siocb->scm->creds = *NETLINK_CREDS(skb);
1278 if (flags & MSG_TRUNC)
1280 skb_free_datagram(sk, skb);
1282 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
1285 scm_recv(sock, msg, siocb->scm, flags);
1287 netlink_rcv_wake(sk);
1288 return err ? : copied;
1291 static void netlink_data_ready(struct sock *sk, int len)
1293 struct netlink_sock *nlk = nlk_sk(sk);
1295 if (nlk->data_ready)
1296 nlk->data_ready(sk, len);
1297 netlink_rcv_wake(sk);
1301 * We export these functions to other modules. They provide a
1302 * complete set of kernel non-blocking support for message
1307 netlink_kernel_create(int unit, unsigned int groups,
1308 void (*input)(struct sock *sk, int len),
1309 struct mutex *cb_mutex, struct module *module)
1311 struct socket *sock;
1313 struct netlink_sock *nlk;
1314 unsigned long *listeners = NULL;
1318 if (unit<0 || unit>=MAX_LINKS)
1321 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1324 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
1325 goto out_sock_release;
1330 listeners = kzalloc(NLGRPSZ(groups), GFP_KERNEL);
1332 goto out_sock_release;
1335 sk->sk_data_ready = netlink_data_ready;
1337 nlk_sk(sk)->data_ready = input;
1339 if (netlink_insert(sk, 0))
1340 goto out_sock_release;
1343 nlk->flags |= NETLINK_KERNEL_SOCKET;
1345 netlink_table_grab();
1346 nl_table[unit].groups = groups;
1347 nl_table[unit].listeners = listeners;
1348 nl_table[unit].cb_mutex = cb_mutex;
1349 nl_table[unit].module = module;
1350 nl_table[unit].registered = 1;
1351 netlink_table_ungrab();
1362 * netlink_change_ngroups - change number of multicast groups
1364 * This changes the number of multicast groups that are available
1365 * on a certain netlink family. Note that it is not possible to
1366 * change the number of groups to below 32. Also note that it does
1367 * not implicitly call netlink_clear_multicast_users() when the
1368 * number of groups is reduced.
1370 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
1371 * @groups: The new number of groups.
1373 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
1375 unsigned long *listeners, *old = NULL;
1376 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1382 netlink_table_grab();
1383 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
1384 listeners = kzalloc(NLGRPSZ(groups), GFP_ATOMIC);
1389 old = tbl->listeners;
1390 memcpy(listeners, old, NLGRPSZ(tbl->groups));
1391 rcu_assign_pointer(tbl->listeners, listeners);
1393 tbl->groups = groups;
1396 netlink_table_ungrab();
1401 EXPORT_SYMBOL(netlink_change_ngroups);
1404 * netlink_clear_multicast_users - kick off multicast listeners
1406 * This function removes all listeners from the given group.
1407 * @ksk: The kernel netlink socket, as returned by
1408 * netlink_kernel_create().
1409 * @group: The multicast group to clear.
1411 void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
1414 struct hlist_node *node;
1415 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
1417 netlink_table_grab();
1419 sk_for_each_bound(sk, node, &tbl->mc_list)
1420 netlink_update_socket_mc(nlk_sk(sk), group, 0);
1422 netlink_table_ungrab();
1424 EXPORT_SYMBOL(netlink_clear_multicast_users);
1426 void netlink_set_nonroot(int protocol, unsigned int flags)
1428 if ((unsigned int)protocol < MAX_LINKS)
1429 nl_table[protocol].nl_nonroot = flags;
1432 static void netlink_destroy_callback(struct netlink_callback *cb)
1440 * It looks a bit ugly.
1441 * It would be better to create kernel thread.
1444 static int netlink_dump(struct sock *sk)
1446 struct netlink_sock *nlk = nlk_sk(sk);
1447 struct netlink_callback *cb;
1448 struct sk_buff *skb;
1449 struct nlmsghdr *nlh;
1450 int len, err = -ENOBUFS;
1452 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
1456 mutex_lock(nlk->cb_mutex);
1464 len = cb->dump(skb, cb);
1467 mutex_unlock(nlk->cb_mutex);
1468 skb_queue_tail(&sk->sk_receive_queue, skb);
1469 sk->sk_data_ready(sk, len);
1473 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
1477 memcpy(nlmsg_data(nlh), &len, sizeof(len));
1479 skb_queue_tail(&sk->sk_receive_queue, skb);
1480 sk->sk_data_ready(sk, skb->len);
1485 mutex_unlock(nlk->cb_mutex);
1487 netlink_destroy_callback(cb);
1491 mutex_unlock(nlk->cb_mutex);
1497 int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1498 struct nlmsghdr *nlh,
1499 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
1500 int (*done)(struct netlink_callback*))
1502 struct netlink_callback *cb;
1504 struct netlink_sock *nlk;
1506 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1513 atomic_inc(&skb->users);
1516 sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
1518 netlink_destroy_callback(cb);
1519 return -ECONNREFUSED;
1522 /* A dump is in progress... */
1523 mutex_lock(nlk->cb_mutex);
1525 mutex_unlock(nlk->cb_mutex);
1526 netlink_destroy_callback(cb);
1531 mutex_unlock(nlk->cb_mutex);
1536 /* We successfully started a dump, by returning -EINTR we
1537 * signal the queue mangement to interrupt processing of
1538 * any netlink messages so userspace gets a chance to read
1543 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1545 struct sk_buff *skb;
1546 struct nlmsghdr *rep;
1547 struct nlmsgerr *errmsg;
1548 size_t payload = sizeof(*errmsg);
1550 /* error messages get the original request appened */
1552 payload += nlmsg_len(nlh);
1554 skb = nlmsg_new(payload, GFP_KERNEL);
1558 sk = netlink_lookup(in_skb->sk->sk_protocol,
1559 NETLINK_CB(in_skb).pid);
1561 sk->sk_err = ENOBUFS;
1562 sk->sk_error_report(sk);
1568 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1569 NLMSG_ERROR, sizeof(struct nlmsgerr), 0);
1570 errmsg = nlmsg_data(rep);
1571 errmsg->error = err;
1572 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
1573 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1576 static int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1579 struct nlmsghdr *nlh;
1582 while (skb->len >= nlmsg_total_size(0)) {
1583 nlh = nlmsg_hdr(skb);
1586 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
1589 /* Only requests are handled by the kernel */
1590 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
1593 /* Skip control messages */
1594 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
1598 if (err == -EINTR) {
1599 /* Not an error, but we interrupt processing */
1600 netlink_queue_skip(nlh, skb);
1604 if (nlh->nlmsg_flags & NLM_F_ACK || err)
1605 netlink_ack(skb, nlh, err);
1607 netlink_queue_skip(nlh, skb);
1614 * nelink_run_queue - Process netlink receive queue.
1615 * @sk: Netlink socket containing the queue
1616 * @qlen: Place to store queue length upon entry
1617 * @cb: Callback function invoked for each netlink message found
1619 * Processes as much as there was in the queue upon entry and invokes
1620 * a callback function for each netlink message found. The callback
1621 * function may refuse a message by returning a negative error code
1622 * but setting the error pointer to 0 in which case this function
1623 * returns with a qlen != 0.
1625 * qlen must be initialized to 0 before the initial entry, afterwards
1626 * the function may be called repeatedly until qlen reaches 0.
1628 * The callback function may return -EINTR to signal that processing
1629 * of netlink messages shall be interrupted. In this case the message
1630 * currently being processed will NOT be requeued onto the receive
1633 void netlink_run_queue(struct sock *sk, unsigned int *qlen,
1634 int (*cb)(struct sk_buff *, struct nlmsghdr *))
1636 struct sk_buff *skb;
1638 if (!*qlen || *qlen > skb_queue_len(&sk->sk_receive_queue))
1639 *qlen = skb_queue_len(&sk->sk_receive_queue);
1641 for (; *qlen; (*qlen)--) {
1642 skb = skb_dequeue(&sk->sk_receive_queue);
1643 if (netlink_rcv_skb(skb, cb)) {
1645 skb_queue_head(&sk->sk_receive_queue, skb);
1658 * netlink_queue_skip - Skip netlink message while processing queue.
1659 * @nlh: Netlink message to be skipped
1660 * @skb: Socket buffer containing the netlink messages.
1662 * Pulls the given netlink message off the socket buffer so the next
1663 * call to netlink_queue_run() will not reconsider the message.
1665 static void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb)
1667 int msglen = NLMSG_ALIGN(nlh->nlmsg_len);
1669 if (msglen > skb->len)
1672 skb_pull(skb, msglen);
1676 * nlmsg_notify - send a notification netlink message
1677 * @sk: netlink socket to use
1678 * @skb: notification message
1679 * @pid: destination netlink pid for reports or 0
1680 * @group: destination multicast group or 0
1681 * @report: 1 to report back, 0 to disable
1682 * @flags: allocation flags
1684 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid,
1685 unsigned int group, int report, gfp_t flags)
1690 int exclude_pid = 0;
1693 atomic_inc(&skb->users);
1697 /* errors reported via destination sk->sk_err */
1698 nlmsg_multicast(sk, skb, exclude_pid, group, flags);
1702 err = nlmsg_unicast(sk, skb, pid);
1707 #ifdef CONFIG_PROC_FS
1708 struct nl_seq_iter {
1713 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1715 struct nl_seq_iter *iter = seq->private;
1718 struct hlist_node *node;
1721 for (i=0; i<MAX_LINKS; i++) {
1722 struct nl_pid_hash *hash = &nl_table[i].hash;
1724 for (j = 0; j <= hash->mask; j++) {
1725 sk_for_each(s, node, &hash->table[j]) {
1738 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1740 read_lock(&nl_table_lock);
1741 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1744 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1747 struct nl_seq_iter *iter;
1752 if (v == SEQ_START_TOKEN)
1753 return netlink_seq_socket_idx(seq, 0);
1759 iter = seq->private;
1761 j = iter->hash_idx + 1;
1764 struct nl_pid_hash *hash = &nl_table[i].hash;
1766 for (; j <= hash->mask; j++) {
1767 s = sk_head(&hash->table[j]);
1776 } while (++i < MAX_LINKS);
1781 static void netlink_seq_stop(struct seq_file *seq, void *v)
1783 read_unlock(&nl_table_lock);
1787 static int netlink_seq_show(struct seq_file *seq, void *v)
1789 if (v == SEQ_START_TOKEN)
1791 "sk Eth Pid Groups "
1792 "Rmem Wmem Dump Locks\n");
1795 struct netlink_sock *nlk = nlk_sk(s);
1797 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1801 nlk->groups ? (u32)nlk->groups[0] : 0,
1802 atomic_read(&s->sk_rmem_alloc),
1803 atomic_read(&s->sk_wmem_alloc),
1805 atomic_read(&s->sk_refcnt)
1812 static const struct seq_operations netlink_seq_ops = {
1813 .start = netlink_seq_start,
1814 .next = netlink_seq_next,
1815 .stop = netlink_seq_stop,
1816 .show = netlink_seq_show,
1820 static int netlink_seq_open(struct inode *inode, struct file *file)
1822 struct seq_file *seq;
1823 struct nl_seq_iter *iter;
1826 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1830 err = seq_open(file, &netlink_seq_ops);
1836 seq = file->private_data;
1837 seq->private = iter;
1841 static const struct file_operations netlink_seq_fops = {
1842 .owner = THIS_MODULE,
1843 .open = netlink_seq_open,
1845 .llseek = seq_lseek,
1846 .release = seq_release_private,
1851 int netlink_register_notifier(struct notifier_block *nb)
1853 return atomic_notifier_chain_register(&netlink_chain, nb);
1856 int netlink_unregister_notifier(struct notifier_block *nb)
1858 return atomic_notifier_chain_unregister(&netlink_chain, nb);
1861 static const struct proto_ops netlink_ops = {
1862 .family = PF_NETLINK,
1863 .owner = THIS_MODULE,
1864 .release = netlink_release,
1865 .bind = netlink_bind,
1866 .connect = netlink_connect,
1867 .socketpair = sock_no_socketpair,
1868 .accept = sock_no_accept,
1869 .getname = netlink_getname,
1870 .poll = datagram_poll,
1871 .ioctl = sock_no_ioctl,
1872 .listen = sock_no_listen,
1873 .shutdown = sock_no_shutdown,
1874 .setsockopt = netlink_setsockopt,
1875 .getsockopt = netlink_getsockopt,
1876 .sendmsg = netlink_sendmsg,
1877 .recvmsg = netlink_recvmsg,
1878 .mmap = sock_no_mmap,
1879 .sendpage = sock_no_sendpage,
1882 static struct net_proto_family netlink_family_ops = {
1883 .family = PF_NETLINK,
1884 .create = netlink_create,
1885 .owner = THIS_MODULE, /* for consistency 8) */
1888 static int __init netlink_proto_init(void)
1890 struct sk_buff *dummy_skb;
1894 int err = proto_register(&netlink_proto, 0);
1899 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb));
1901 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
1905 if (num_physpages >= (128 * 1024))
1906 max = num_physpages >> (21 - PAGE_SHIFT);
1908 max = num_physpages >> (23 - PAGE_SHIFT);
1910 order = get_bitmask_order(max) - 1 + PAGE_SHIFT;
1911 max = (1UL << order) / sizeof(struct hlist_head);
1912 order = get_bitmask_order(max > UINT_MAX ? UINT_MAX : max) - 1;
1914 for (i = 0; i < MAX_LINKS; i++) {
1915 struct nl_pid_hash *hash = &nl_table[i].hash;
1917 hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table));
1920 nl_pid_hash_free(nl_table[i].hash.table,
1921 1 * sizeof(*hash->table));
1925 memset(hash->table, 0, 1 * sizeof(*hash->table));
1926 hash->max_shift = order;
1929 hash->rehash_time = jiffies;
1932 sock_register(&netlink_family_ops);
1933 #ifdef CONFIG_PROC_FS
1934 proc_net_fops_create(&init_net, "netlink", 0, &netlink_seq_fops);
1936 /* The netlink device handler may be needed early. */
1941 panic("netlink_init: Cannot allocate nl_table\n");
1944 core_initcall(netlink_proto_init);
1946 EXPORT_SYMBOL(netlink_ack);
1947 EXPORT_SYMBOL(netlink_run_queue);
1948 EXPORT_SYMBOL(netlink_broadcast);
1949 EXPORT_SYMBOL(netlink_dump_start);
1950 EXPORT_SYMBOL(netlink_kernel_create);
1951 EXPORT_SYMBOL(netlink_register_notifier);
1952 EXPORT_SYMBOL(netlink_set_nonroot);
1953 EXPORT_SYMBOL(netlink_unicast);
1954 EXPORT_SYMBOL(netlink_unregister_notifier);
1955 EXPORT_SYMBOL(nlmsg_notify);