#endif
 
 static struct mfc6_cache *mfc_unres_queue;             /* Queue of unresolved entries */
-static atomic_t cache_resolve_queue_len;               /* Size of unresolved   */
 
 /* Special spinlock for queue of unresolved entries */
 static DEFINE_SPINLOCK(mfc_unres_lock);
 {
        struct sk_buff *skb;
 
-       atomic_dec(&cache_resolve_queue_len);
+       atomic_dec(&init_net.ipv6.cache_resolve_queue_len);
 
        while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
                if (ipv6_hdr(skb)->version == 0) {
                ip6mr_destroy_unres(c);
        }
 
-       if (atomic_read(&cache_resolve_queue_len))
+       if (mfc_unres_queue != NULL)
                mod_timer(&ipmr_expire_timer, jiffies + expires);
 }
 
                return;
        }
 
-       if (atomic_read(&cache_resolve_queue_len))
+       if (mfc_unres_queue != NULL)
                ipmr_do_expire_process(dummy);
 
        spin_unlock(&mfc_unres_lock);
 
        spin_lock_bh(&mfc_unres_lock);
        for (c = mfc_unres_queue; c; c = c->next) {
-               if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
+               if (net_eq(mfc6_net(c), &init_net) &&
+                   ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
                    ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr))
                        break;
        }
                 *      Create a new entry if allowable
                 */
 
-               if (atomic_read(&cache_resolve_queue_len) >= 10 ||
+               if (atomic_read(&init_net.ipv6.cache_resolve_queue_len) >= 10 ||
                    (c = ip6mr_cache_alloc_unres(&init_net)) == NULL) {
                        spin_unlock_bh(&mfc_unres_lock);
 
                        return err;
                }
 
-               atomic_inc(&cache_resolve_queue_len);
+               atomic_inc(&init_net.ipv6.cache_resolve_queue_len);
                c->next = mfc_unres_queue;
                mfc_unres_queue = c;
 
        spin_lock_bh(&mfc_unres_lock);
        for (cp = &mfc_unres_queue; (uc = *cp) != NULL;
             cp = &uc->next) {
-               if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
+               if (net_eq(mfc6_net(uc), &init_net) &&
+                   ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
                    ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
                        *cp = uc->next;
-                       if (atomic_dec_and_test(&cache_resolve_queue_len))
-                               del_timer(&ipmr_expire_timer);
+                       atomic_dec(&init_net.ipv6.cache_resolve_queue_len);
                        break;
                }
        }
+       if (mfc_unres_queue == NULL)
+               del_timer(&ipmr_expire_timer);
        spin_unlock_bh(&mfc_unres_lock);
 
        if (uc) {
                }
        }
 
-       if (atomic_read(&cache_resolve_queue_len) != 0) {
-               struct mfc6_cache *c;
+       if (atomic_read(&init_net.ipv6.cache_resolve_queue_len) != 0) {
+               struct mfc6_cache *c, **cp;
 
                spin_lock_bh(&mfc_unres_lock);
-               while (mfc_unres_queue != NULL) {
-                       c = mfc_unres_queue;
-                       mfc_unres_queue = c->next;
-                       spin_unlock_bh(&mfc_unres_lock);
-
+               cp = &mfc_unres_queue;
+               while ((c = *cp) != NULL) {
+                       if (!net_eq(mfc6_net(c), &init_net)) {
+                               cp = &c->next;
+                               continue;
+                       }
+                       *cp = c->next;
                        ip6mr_destroy_unres(c);
-
-                       spin_lock_bh(&mfc_unres_lock);
                }
                spin_unlock_bh(&mfc_unres_lock);
        }