{
        struct rt_cache_iter_state *st = rcu_dereference(seq->private);
 
-       r = r->u.rt_next;
+       r = r->u.dst.rt_next;
        while (!r) {
                rcu_read_unlock_bh();
                if (--st->bucket < 0)
        /* Kill broadcast/multicast entries very aggresively, if they
           collide in hash table with more useful entries */
        return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
-               rth->fl.iif && rth->u.rt_next;
+               rth->fl.iif && rth->u.dst.rt_next;
 }
 
 static __inline__ int rt_valuable(struct rtable *rth)
                if (((*rthp)->u.dst.flags & DST_BALANCED) != 0  &&
                    compare_keys(&(*rthp)->fl, &expentry->fl)) {
                        if (*rthp == expentry) {
-                               *rthp = rth->u.rt_next;
+                               *rthp = rth->u.dst.rt_next;
                                continue;
                        } else {
-                               *rthp = rth->u.rt_next;
+                               *rthp = rth->u.dst.rt_next;
                                rt_free(rth);
                                if (removed_count)
                                        ++(*removed_count);
                } else {
                        if (!((*rthp)->u.dst.flags & DST_BALANCED) &&
                            passedexpired && !nextstep)
-                               nextstep = &rth->u.rt_next;
+                               nextstep = &rth->u.dst.rt_next;
 
-                       rthp = &rth->u.rt_next;
+                       rthp = &rth->u.dst.rt_next;
                }
        }
 
                                /* Entry is expired even if it is in use */
                                if (time_before_eq(now, rth->u.dst.expires)) {
                                        tmo >>= 1;
-                                       rthp = &rth->u.rt_next;
+                                       rthp = &rth->u.dst.rt_next;
                                        continue;
                                }
                        } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
                                tmo >>= 1;
-                               rthp = &rth->u.rt_next;
+                               rthp = &rth->u.dst.rt_next;
                                continue;
                        }
 
                                if (!rthp)
                                        break;
                        } else {
-                               *rthp = rth->u.rt_next;
+                               *rthp = rth->u.dst.rt_next;
                                rt_free(rth);
                        }
 #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
-                       *rthp = rth->u.rt_next;
+                       *rthp = rth->u.dst.rt_next;
                        rt_free(rth);
 #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
                }
                spin_unlock_bh(rt_hash_lock_addr(i));
 
                for (; rth; rth = next) {
-                       next = rth->u.rt_next;
+                       next = rth->u.dst.rt_next;
                        rt_free(rth);
                }
        }
                        while ((rth = *rthp) != NULL) {
                                if (!rt_may_expire(rth, tmo, expire)) {
                                        tmo >>= 1;
-                                       rthp = &rth->u.rt_next;
+                                       rthp = &rth->u.dst.rt_next;
                                        continue;
                                }
 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
                                        if (!rthp)
                                                break;
                                } else {
-                                       *rthp = rth->u.rt_next;
+                                       *rthp = rth->u.dst.rt_next;
                                        rt_free(rth);
                                        goal--;
                                }
 #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
-                               *rthp = rth->u.rt_next;
+                               *rthp = rth->u.dst.rt_next;
                                rt_free(rth);
                                goal--;
 #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
                if (compare_keys(&rth->fl, &rt->fl)) {
 #endif
                        /* Put it first */
-                       *rthp = rth->u.rt_next;
+                       *rthp = rth->u.dst.rt_next;
                        /*
                         * Since lookup is lockfree, the deletion
                         * must be visible to another weakly ordered CPU before
                         * the insertion at the start of the hash chain.
                         */
-                       rcu_assign_pointer(rth->u.rt_next,
+                       rcu_assign_pointer(rth->u.dst.rt_next,
                                           rt_hash_table[hash].chain);
                        /*
                         * Since lookup is lockfree, the update writes
 
                chain_length++;
 
-               rthp = &rth->u.rt_next;
+               rthp = &rth->u.dst.rt_next;
        }
 
        if (cand) {
                 * only 2 entries per bucket. We will see.
                 */
                if (chain_length > ip_rt_gc_elasticity) {
-                       *candp = cand->u.rt_next;
+                       *candp = cand->u.dst.rt_next;
                        rt_free(cand);
                }
        }
                }
        }
 
-       rt->u.rt_next = rt_hash_table[hash].chain;
+       rt->u.dst.rt_next = rt_hash_table[hash].chain;
 #if RT_CACHE_DEBUG >= 2
-       if (rt->u.rt_next) {
+       if (rt->u.dst.rt_next) {
                struct rtable *trt;
                printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash,
                       NIPQUAD(rt->rt_dst));
-               for (trt = rt->u.rt_next; trt; trt = trt->u.rt_next)
+               for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
                        printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst));
                printk("\n");
        }
        spin_lock_bh(rt_hash_lock_addr(hash));
        ip_rt_put(rt);
        for (rthp = &rt_hash_table[hash].chain; *rthp;
-            rthp = &(*rthp)->u.rt_next)
+            rthp = &(*rthp)->u.dst.rt_next)
                if (*rthp == rt) {
-                       *rthp = rt->u.rt_next;
+                       *rthp = rt->u.dst.rt_next;
                        rt_free(rt);
                        break;
                }
                                    rth->fl.fl4_src != skeys[i] ||
                                    rth->fl.oif != ikeys[k] ||
                                    rth->fl.iif != 0) {
-                                       rthp = &rth->u.rt_next;
+                                       rthp = &rth->u.dst.rt_next;
                                        continue;
                                }
 
 
                rcu_read_lock();
                for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
-                    rth = rcu_dereference(rth->u.rt_next)) {
+                    rth = rcu_dereference(rth->u.dst.rt_next)) {
                        if (rth->fl.fl4_dst == daddr &&
                            rth->fl.fl4_src == skeys[i] &&
                            rth->rt_dst  == daddr &&
 
        rcu_read_lock();
        for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
-            rth = rcu_dereference(rth->u.rt_next)) {
+            rth = rcu_dereference(rth->u.dst.rt_next)) {
                if (rth->fl.fl4_dst == daddr &&
                    rth->fl.fl4_src == saddr &&
                    rth->fl.iif == iif &&
 
        rcu_read_lock_bh();
        for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
-               rth = rcu_dereference(rth->u.rt_next)) {
+               rth = rcu_dereference(rth->u.dst.rt_next)) {
                if (rth->fl.fl4_dst == flp->fl4_dst &&
                    rth->fl.fl4_src == flp->fl4_src &&
                    rth->fl.iif == 0 &&
                        s_idx = 0;
                rcu_read_lock_bh();
                for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
-                    rt = rcu_dereference(rt->u.rt_next), idx++) {
+                    rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
                        if (idx < s_idx)
                                continue;
                        skb->dst = dst_clone(&rt->u.dst);