]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - net/ipv4/inet_hashtables.c
net: convert BUG_TRAP to generic WARN_ON
[linux-2.6-omap-h63xx.git] / net / ipv4 / inet_hashtables.c
index 4f597b3175e7da3842e398c20e3668568b6b9d58..44981906fb913e7afbb4f7e03cd0192b9fdcda12 100644 (file)
@@ -227,7 +227,7 @@ struct sock * __inet_lookup_established(struct net *net,
        /* Optimize here for direct hit, only listening connections can
         * have wildcards anyways.
         */
-       unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport);
+       unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
        struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
        rwlock_t *lock = inet_ehash_lockp(hashinfo, hash);
 
@@ -267,13 +267,13 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
        int dif = sk->sk_bound_dev_if;
        INET_ADDR_COOKIE(acookie, saddr, daddr)
        const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport);
-       unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport);
+       struct net *net = sock_net(sk);
+       unsigned int hash = inet_ehashfn(net, daddr, lport, saddr, inet->dport);
        struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
        rwlock_t *lock = inet_ehash_lockp(hinfo, hash);
        struct sock *sk2;
        const struct hlist_node *node;
        struct inet_timewait_sock *tw;
-       struct net *net = sock_net(sk);
 
        prefetch(head->chain.first);
        write_lock(lock);
@@ -305,18 +305,18 @@ unique:
        inet->num = lport;
        inet->sport = htons(lport);
        sk->sk_hash = hash;
-       BUG_TRAP(sk_unhashed(sk));
+       WARN_ON(!sk_unhashed(sk));
        __sk_add_node(sk, &head->chain);
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
        write_unlock(lock);
 
        if (twp) {
                *twp = tw;
-               NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
+               NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
        } else if (tw) {
                /* Silly. Should hash-dance instead... */
                inet_twsk_deschedule(tw, death_row);
-               NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
+               NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
 
                inet_twsk_put(tw);
        }
@@ -342,7 +342,7 @@ void __inet_hash_nolisten(struct sock *sk)
        rwlock_t *lock;
        struct inet_ehash_bucket *head;
 
-       BUG_TRAP(sk_unhashed(sk));
+       WARN_ON(!sk_unhashed(sk));
 
        sk->sk_hash = inet_sk_ehashfn(sk);
        head = inet_ehash_bucket(hashinfo, sk->sk_hash);
@@ -367,7 +367,7 @@ static void __inet_hash(struct sock *sk)
                return;
        }
 
-       BUG_TRAP(sk_unhashed(sk));
+       WARN_ON(!sk_unhashed(sk));
        list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
        lock = &hashinfo->lhash_lock;
 
@@ -450,7 +450,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
                         */
                        inet_bind_bucket_for_each(tb, node, &head->chain) {
                                if (tb->ib_net == net && tb->port == port) {
-                                       BUG_TRAP(!hlist_empty(&tb->owners));
+                                       WARN_ON(hlist_empty(&tb->owners));
                                        if (tb->fastreuse >= 0)
                                                goto next_port;
                                        if (!check_established(death_row, sk,