2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic INET transport hashtables
8 * Authors: Lotsa people, from code originally in tcp
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/wait.h>
22 #include <net/inet_connection_sock.h>
23 #include <net/inet_hashtables.h>
27 * Allocate and initialize a new local port bind bucket.
28 * The bindhash mutex for snum's hash chain must be held here.
30 struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
32 struct inet_bind_hashbucket *head,
33 const unsigned short snum)
35 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
38 tb->ib_net = hold_net(net);
41 INIT_HLIST_HEAD(&tb->owners);
42 hlist_add_head(&tb->node, &head->chain);
48 * Caller must hold hashbucket lock for this tb with local BH disabled
50 void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
52 if (hlist_empty(&tb->owners)) {
53 __hlist_del(&tb->node);
54 release_net(tb->ib_net);
55 kmem_cache_free(cachep, tb);
59 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
60 const unsigned short snum)
62 inet_sk(sk)->num = snum;
63 sk_add_bind_node(sk, &tb->owners);
64 inet_csk(sk)->icsk_bind_hash = tb;
68 * Get rid of any references to a local port held by the given sock.
70 static void __inet_put_port(struct sock *sk)
72 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
73 const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size);
74 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
75 struct inet_bind_bucket *tb;
77 spin_lock(&head->lock);
78 tb = inet_csk(sk)->icsk_bind_hash;
79 __sk_del_bind_node(sk);
80 inet_csk(sk)->icsk_bind_hash = NULL;
82 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
83 spin_unlock(&head->lock);
86 void inet_put_port(struct sock *sk)
93 EXPORT_SYMBOL(inet_put_port);
96 * This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
97 * Look, when several writers sleep and reader wakes them up, all but one
98 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
99 * this, _but_ remember, it adds useless work on UP machines (wake up each
100 * exclusive lock release). It should be ifdefed really.
102 void inet_listen_wlock(struct inet_hashinfo *hashinfo)
103 __acquires(hashinfo->lhash_lock)
105 write_lock(&hashinfo->lhash_lock);
107 if (atomic_read(&hashinfo->lhash_users)) {
111 prepare_to_wait_exclusive(&hashinfo->lhash_wait,
112 &wait, TASK_UNINTERRUPTIBLE);
113 if (!atomic_read(&hashinfo->lhash_users))
115 write_unlock_bh(&hashinfo->lhash_lock);
117 write_lock_bh(&hashinfo->lhash_lock);
120 finish_wait(&hashinfo->lhash_wait, &wait);
125 * Don't inline this cruft. Here are some nice properties to exploit here. The
126 * BSD API does not allow a listening sock to specify the remote port nor the
127 * remote address for the connection. So always assume those are both
128 * wildcarded during the search since they can never be otherwise.
130 static struct sock *inet_lookup_listener_slow(struct net *net,
131 const struct hlist_head *head,
133 const unsigned short hnum,
136 struct sock *result = NULL, *sk;
137 const struct hlist_node *node;
140 sk_for_each(sk, node, head) {
141 const struct inet_sock *inet = inet_sk(sk);
143 if (net_eq(sock_net(sk), net) && inet->num == hnum &&
144 !ipv6_only_sock(sk)) {
145 const __be32 rcv_saddr = inet->rcv_saddr;
146 int score = sk->sk_family == PF_INET ? 1 : 0;
149 if (rcv_saddr != daddr)
153 if (sk->sk_bound_dev_if) {
154 if (sk->sk_bound_dev_if != dif)
160 if (score > hiscore) {
169 /* Optimize the common listener case. */
170 struct sock *__inet_lookup_listener(struct net *net,
171 struct inet_hashinfo *hashinfo,
172 const __be32 daddr, const unsigned short hnum,
175 struct sock *sk = NULL;
176 const struct hlist_head *head;
178 read_lock(&hashinfo->lhash_lock);
179 head = &hashinfo->listening_hash[inet_lhashfn(hnum)];
180 if (!hlist_empty(head)) {
181 const struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
183 if (inet->num == hnum && !sk->sk_node.next &&
184 (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
185 (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
186 !sk->sk_bound_dev_if && net_eq(sock_net(sk), net))
188 sk = inet_lookup_listener_slow(net, head, daddr, hnum, dif);
194 read_unlock(&hashinfo->lhash_lock);
197 EXPORT_SYMBOL_GPL(__inet_lookup_listener);
199 struct sock * __inet_lookup_established(struct net *net,
200 struct inet_hashinfo *hashinfo,
201 const __be32 saddr, const __be16 sport,
202 const __be32 daddr, const u16 hnum,
205 INET_ADDR_COOKIE(acookie, saddr, daddr)
206 const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
208 const struct hlist_node *node;
209 /* Optimize here for direct hit, only listening connections can
210 * have wildcards anyways.
212 unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport);
213 struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
214 rwlock_t *lock = inet_ehash_lockp(hashinfo, hash);
216 prefetch(head->chain.first);
218 sk_for_each(sk, node, &head->chain) {
219 if (INET_MATCH(sk, net, hash, acookie,
220 saddr, daddr, ports, dif))
221 goto hit; /* You sunk my battleship! */
224 /* Must check for a TIME_WAIT'er before going to listener hash. */
225 sk_for_each(sk, node, &head->twchain) {
226 if (INET_TW_MATCH(sk, net, hash, acookie,
227 saddr, daddr, ports, dif))
238 EXPORT_SYMBOL_GPL(__inet_lookup_established);
240 /* called with local bh disabled */
241 static int __inet_check_established(struct inet_timewait_death_row *death_row,
242 struct sock *sk, __u16 lport,
243 struct inet_timewait_sock **twp)
245 struct inet_hashinfo *hinfo = death_row->hashinfo;
246 struct inet_sock *inet = inet_sk(sk);
247 __be32 daddr = inet->rcv_saddr;
248 __be32 saddr = inet->daddr;
249 int dif = sk->sk_bound_dev_if;
250 INET_ADDR_COOKIE(acookie, saddr, daddr)
251 const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport);
252 unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport);
253 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
254 rwlock_t *lock = inet_ehash_lockp(hinfo, hash);
256 const struct hlist_node *node;
257 struct inet_timewait_sock *tw;
258 struct net *net = sock_net(sk);
260 prefetch(head->chain.first);
263 /* Check TIME-WAIT sockets first. */
264 sk_for_each(sk2, node, &head->twchain) {
267 if (INET_TW_MATCH(sk2, net, hash, acookie,
268 saddr, daddr, ports, dif)) {
269 if (twsk_unique(sk, sk2, twp))
277 /* And established part... */
278 sk_for_each(sk2, node, &head->chain) {
279 if (INET_MATCH(sk2, net, hash, acookie,
280 saddr, daddr, ports, dif))
285 /* Must record num and sport now. Otherwise we will see
286 * in hash table socket with a funny identity. */
288 inet->sport = htons(lport);
290 BUG_TRAP(sk_unhashed(sk));
291 __sk_add_node(sk, &head->chain);
292 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
297 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
299 /* Silly. Should hash-dance instead... */
300 inet_twsk_deschedule(tw, death_row);
301 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
310 return -EADDRNOTAVAIL;
313 static inline u32 inet_sk_port_offset(const struct sock *sk)
315 const struct inet_sock *inet = inet_sk(sk);
316 return secure_ipv4_port_ephemeral(inet->rcv_saddr, inet->daddr,
320 void __inet_hash_nolisten(struct sock *sk)
322 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
323 struct hlist_head *list;
325 struct inet_ehash_bucket *head;
327 BUG_TRAP(sk_unhashed(sk));
329 sk->sk_hash = inet_sk_ehashfn(sk);
330 head = inet_ehash_bucket(hashinfo, sk->sk_hash);
332 lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
335 __sk_add_node(sk, list);
336 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
339 EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
341 static void __inet_hash(struct sock *sk)
343 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
344 struct hlist_head *list;
347 if (sk->sk_state != TCP_LISTEN) {
348 __inet_hash_nolisten(sk);
352 BUG_TRAP(sk_unhashed(sk));
353 list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
354 lock = &hashinfo->lhash_lock;
356 inet_listen_wlock(hashinfo);
357 __sk_add_node(sk, list);
358 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
360 wake_up(&hashinfo->lhash_wait);
363 void inet_hash(struct sock *sk)
365 if (sk->sk_state != TCP_CLOSE) {
371 EXPORT_SYMBOL_GPL(inet_hash);
373 void inet_unhash(struct sock *sk)
376 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
381 if (sk->sk_state == TCP_LISTEN) {
383 inet_listen_wlock(hashinfo);
384 lock = &hashinfo->lhash_lock;
386 lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
390 if (__sk_del_node_init(sk))
391 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
392 write_unlock_bh(lock);
394 if (sk->sk_state == TCP_LISTEN)
395 wake_up(&hashinfo->lhash_wait);
397 EXPORT_SYMBOL_GPL(inet_unhash);
399 int __inet_hash_connect(struct inet_timewait_death_row *death_row,
400 struct sock *sk, u32 port_offset,
401 int (*check_established)(struct inet_timewait_death_row *,
402 struct sock *, __u16, struct inet_timewait_sock **),
403 void (*hash)(struct sock *sk))
405 struct inet_hashinfo *hinfo = death_row->hashinfo;
406 const unsigned short snum = inet_sk(sk)->num;
407 struct inet_bind_hashbucket *head;
408 struct inet_bind_bucket *tb;
410 struct net *net = sock_net(sk);
413 int i, remaining, low, high, port;
415 u32 offset = hint + port_offset;
416 struct hlist_node *node;
417 struct inet_timewait_sock *tw = NULL;
419 inet_get_local_port_range(&low, &high);
420 remaining = (high - low) + 1;
423 for (i = 1; i <= remaining; i++) {
424 port = low + (i + offset) % remaining;
425 head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)];
426 spin_lock(&head->lock);
428 /* Does not bother with rcv_saddr checks,
429 * because the established check is already
432 inet_bind_bucket_for_each(tb, node, &head->chain) {
433 if (tb->ib_net == net && tb->port == port) {
434 BUG_TRAP(!hlist_empty(&tb->owners));
435 if (tb->fastreuse >= 0)
437 if (!check_established(death_row, sk,
444 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
447 spin_unlock(&head->lock);
454 spin_unlock(&head->lock);
458 return -EADDRNOTAVAIL;
463 /* Head lock still held and bh's disabled */
464 inet_bind_hash(sk, tb, port);
465 if (sk_unhashed(sk)) {
466 inet_sk(sk)->sport = htons(port);
469 spin_unlock(&head->lock);
472 inet_twsk_deschedule(tw, death_row);
480 head = &hinfo->bhash[inet_bhashfn(snum, hinfo->bhash_size)];
481 tb = inet_csk(sk)->icsk_bind_hash;
482 spin_lock_bh(&head->lock);
483 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
485 spin_unlock_bh(&head->lock);
488 spin_unlock(&head->lock);
489 /* No definite answer... Walk to established hash table */
490 ret = check_established(death_row, sk, snum, NULL);
498 * Bind a port for a connect operation and hash it.
500 int inet_hash_connect(struct inet_timewait_death_row *death_row,
503 return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk),
504 __inet_check_established, __inet_hash_nolisten);
507 EXPORT_SYMBOL_GPL(inet_hash_connect);