skb_queue_purge(&sk->sk_receive_queue);
if (!sock_flag(sk, SOCK_DEAD)) {
- printk("Freeing alive netlink socket %p\n", sk);
+ printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
return;
}
BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
BUG_TRAP(!nlk_sk(sk)->groups);
}
-/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
- * Look, when several writers sleep and reader wakes them up, all but one
+/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
+ * SMP. Look, when several writers sleep and reader wakes them up, all but one
* immediately hit write lock and grab all the cpus. Exclusive sleep solves
* this, _but_ remember, it adds useless work on UP machines.
*/
static void netlink_table_grab(void)
+ __acquires(nl_table_lock)
{
write_lock_irq(&nl_table_lock);
DECLARE_WAITQUEUE(wait, current);
add_wait_queue_exclusive(&nl_table_wait, &wait);
- for(;;) {
+ for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (atomic_read(&nl_table_users) == 0)
break;
}
}
-static __inline__ void netlink_table_ungrab(void)
+static void netlink_table_ungrab(void)
+ __releases(nl_table_lock)
{
write_unlock_irq(&nl_table_lock);
wake_up(&nl_table_wait);
}
-static __inline__ void
+static inline void
netlink_lock_table(void)
{
/* read_lock() synchronizes us to netlink_table_grab */
read_unlock(&nl_table_lock);
}
-static __inline__ void
+static inline void
netlink_unlock_table(void)
{
if (atomic_dec_and_test(&nl_table_users))
wake_up(&nl_table_wait);
}
-static __inline__ struct sock *netlink_lookup(struct net *net, int protocol, u32 pid)
+static inline struct sock *netlink_lookup(struct net *net, int protocol,
+ u32 pid)
{
struct nl_pid_hash *hash = &nl_table[protocol].hash;
struct hlist_head *head;
return sk;
}
-static inline struct hlist_head *nl_pid_hash_alloc(size_t size)
+static inline struct hlist_head *nl_pid_hash_zalloc(size_t size)
{
if (size <= PAGE_SIZE)
- return kmalloc(size, GFP_ATOMIC);
+ return kzalloc(size, GFP_ATOMIC);
else
return (struct hlist_head *)
- __get_free_pages(GFP_ATOMIC, get_order(size));
+ __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
+ get_order(size));
}
static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
size *= 2;
}
- table = nl_pid_hash_alloc(size);
+ table = nl_pid_hash_zalloc(size);
if (!table)
return 0;
- memset(table, 0, size);
otable = hash->table;
hash->table = table;
hash->mask = mask;
if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
return -ESOCKTNOSUPPORT;
- if (protocol<0 || protocol >= MAX_LINKS)
+ if (protocol < 0 || protocol >= MAX_LINKS)
return -EPROTONOSUPPORT;
netlink_lock_table();
cb_mutex = nl_table[protocol].cb_mutex;
netlink_unlock_table();
- if ((err = __netlink_create(net, sock, cb_mutex, protocol)) < 0)
+ err = __netlink_create(net, sock, cb_mutex, protocol);
+ if (err < 0)
goto out_module;
nlk = nlk_sk(sock->sk);
err = -ENOMEM;
goto out_unlock;
}
- memset((char*)new_groups + NLGRPSZ(nlk->ngroups), 0,
+ memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
nlk->groups = new_groups;
return err;
}
-static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+static int netlink_bind(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
{
struct sock *sk = sock->sk;
struct net *net = sk->sk_net;
int err = 0;
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
- struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
+ struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
if (addr->sa_family == AF_UNSPEC) {
sk->sk_state = NETLINK_UNCONNECTED;
return err;
}
-static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
+static int netlink_getname(struct socket *sock, struct sockaddr *addr,
+ int *addr_len, int peer)
{
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
- struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
+ struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
nladdr->nl_family = AF_NETLINK;
nladdr->nl_pad = 0;
return netlink_sendskb(sk, skb);
}
+EXPORT_SYMBOL(netlink_unicast);
int netlink_has_listeners(struct sock *sk, unsigned int group)
{
}
EXPORT_SYMBOL_GPL(netlink_has_listeners);
-static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
+static inline int netlink_broadcast_deliver(struct sock *sk,
+ struct sk_buff *skb)
{
struct netlink_sock *nlk = nlk_sk(sk);
return -ENOBUFS;
return -ESRCH;
}
+EXPORT_SYMBOL(netlink_broadcast);
struct netlink_set_err_data {
struct sock *exclude_sk;
struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
- struct sockaddr_nl *addr=msg->msg_name;
+ struct sockaddr_nl *addr = msg->msg_name;
u32 dst_pid;
u32 dst_group;
struct sk_buff *skb;
goto out;
err = -ENOBUFS;
skb = alloc_skb(len, GFP_KERNEL);
- if (skb==NULL)
+ if (skb == NULL)
goto out;
NETLINK_CB(skb).pid = nlk->pid;
*/
err = -EFAULT;
- if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
+ if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
kfree_skb(skb);
goto out;
}
copied = 0;
- skb = skb_recv_datagram(sk,flags,noblock,&err);
- if (skb==NULL)
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (skb == NULL)
goto out;
msg->msg_namelen = 0;
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (msg->msg_name) {
- struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
+ struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
addr->nl_family = AF_NETLINK;
addr->nl_pad = 0;
addr->nl_pid = NETLINK_CB(skb).pid;
BUG_ON(!nl_table);
- if (unit<0 || unit>=MAX_LINKS)
+ if (unit < 0 || unit >= MAX_LINKS)
return NULL;
if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
sock_release(sock);
return NULL;
}
+EXPORT_SYMBOL(netlink_kernel_create);
/**
* netlink_change_ngroups - change number of multicast groups
if ((unsigned int)protocol < MAX_LINKS)
nl_table[protocol].nl_nonroot = flags;
}
+EXPORT_SYMBOL(netlink_set_nonroot);
static void netlink_destroy_callback(struct netlink_callback *cb)
{
int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
struct nlmsghdr *nlh,
- int (*dump)(struct sk_buff *skb, struct netlink_callback*),
- int (*done)(struct netlink_callback*))
+ int (*dump)(struct sk_buff *skb,
+ struct netlink_callback *),
+ int (*done)(struct netlink_callback *))
{
struct netlink_callback *cb;
struct sock *sk;
*/
return -EINTR;
}
+EXPORT_SYMBOL(netlink_dump_start);
void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
{
memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
}
+EXPORT_SYMBOL(netlink_ack);
int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
struct nlmsghdr *))
netlink_ack(skb, nlh, err);
skip:
- msglen = NLMSG_ALIGN(nlh->nlmsg_len);
+ msglen = NLMSG_ALIGN(nlh->nlmsg_len);
if (msglen > skb->len)
msglen = skb->len;
skb_pull(skb, msglen);
return 0;
}
+EXPORT_SYMBOL(netlink_rcv_skb);
/**
* nlmsg_notify - send a notification netlink message
return err;
}
+EXPORT_SYMBOL(nlmsg_notify);
#ifdef CONFIG_PROC_FS
struct nl_seq_iter {
- struct net *net;
+ struct seq_net_private p;
int link;
int hash_idx;
};
struct hlist_node *node;
loff_t off = 0;
- for (i=0; i<MAX_LINKS; i++) {
+ for (i = 0; i < MAX_LINKS; i++) {
struct nl_pid_hash *hash = &nl_table[i].hash;
for (j = 0; j <= hash->mask; j++) {
sk_for_each(s, node, &hash->table[j]) {
- if (iter->net != s->sk_net)
+ if (iter->p.net != s->sk_net)
continue;
if (off == pos) {
iter->link = i;
}
static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(nl_table_lock)
{
read_lock(&nl_table_lock);
return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
s = v;
do {
s = sk_next(s);
- } while (s && (iter->net != s->sk_net));
+ } while (s && (iter->p.net != s->sk_net));
if (s)
return s;
for (; j <= hash->mask; j++) {
s = sk_head(&hash->table[j]);
- while (s && (iter->net != s->sk_net))
+ while (s && (iter->p.net != s->sk_net))
s = sk_next(s);
if (s) {
iter->link = i;
}
static void netlink_seq_stop(struct seq_file *seq, void *v)
+ __releases(nl_table_lock)
{
read_unlock(&nl_table_lock);
}
static int netlink_seq_open(struct inode *inode, struct file *file)
{
- struct nl_seq_iter *iter;
-
- iter = __seq_open_private(file, &netlink_seq_ops, sizeof(*iter));
- if (!iter)
- return -ENOMEM;
-
- iter->net = get_proc_net(inode);
- if (!iter->net) {
- seq_release_private(inode, file);
- return -ENXIO;
- }
-
- return 0;
-}
-
-static int netlink_seq_release(struct inode *inode, struct file *file)
-{
- struct seq_file *seq = file->private_data;
- struct nl_seq_iter *iter = seq->private;
- put_net(iter->net);
- return seq_release_private(inode, file);
+ return seq_open_net(inode, file, &netlink_seq_ops,
+ sizeof(struct nl_seq_iter));
}
static const struct file_operations netlink_seq_fops = {
.open = netlink_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = netlink_seq_release,
+ .release = seq_release_net,
};
#endif
{
return atomic_notifier_chain_register(&netlink_chain, nb);
}
+EXPORT_SYMBOL(netlink_register_notifier);
int netlink_unregister_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&netlink_chain, nb);
}
+EXPORT_SYMBOL(netlink_unregister_notifier);
static const struct proto_ops netlink_ops = {
.family = PF_NETLINK,
for (i = 0; i < MAX_LINKS; i++) {
struct nl_pid_hash *hash = &nl_table[i].hash;
- hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table));
+ hash->table = nl_pid_hash_zalloc(1 * sizeof(*hash->table));
if (!hash->table) {
while (i-- > 0)
nl_pid_hash_free(nl_table[i].hash.table,
kfree(nl_table);
goto panic;
}
- memset(hash->table, 0, 1 * sizeof(*hash->table));
hash->max_shift = order;
hash->shift = 0;
hash->mask = 0;
}
core_initcall(netlink_proto_init);
-
-EXPORT_SYMBOL(netlink_ack);
-EXPORT_SYMBOL(netlink_rcv_skb);
-EXPORT_SYMBOL(netlink_broadcast);
-EXPORT_SYMBOL(netlink_dump_start);
-EXPORT_SYMBOL(netlink_kernel_create);
-EXPORT_SYMBOL(netlink_register_notifier);
-EXPORT_SYMBOL(netlink_set_nonroot);
-EXPORT_SYMBOL(netlink_unicast);
-EXPORT_SYMBOL(netlink_unregister_notifier);
-EXPORT_SYMBOL(nlmsg_notify);