struct mfc6_cache
 {
        struct mfc6_cache *next;                /* Next entry on cache line     */
+#ifdef CONFIG_NET_NS
+       struct net *mfc6_net;
+#endif
        struct in6_addr mf6c_mcastgrp;                  /* Group the entry belongs to   */
        struct in6_addr mf6c_origin;                    /* Source of packet             */
        mifi_t mf6c_parent;                     /* Source interface             */
        } mfc_un;
 };
 
+static inline
+struct net *mfc6_net(const struct mfc6_cache *mfc)
+{
+       return read_pnet(&mfc->mfc6_net);
+}
+
+static inline
+void mfc6_net_set(struct mfc6_cache *mfc, struct net *net)
+{
+       write_pnet(&mfc->mfc6_net, hold_net(net));
+}
+
 #define MFC_STATIC             1
 #define MFC_NOTIFY             2
 
 
        return 0;
 }
 
+static inline void ip6mr_cache_free(struct mfc6_cache *c)
+{
+       release_net(mfc6_net(c));
+       kmem_cache_free(mrt_cachep, c);
+}
+
 /* Destroy an unresolved cache entry, killing queued skbs
    and reporting error to netlink readers.
  */
                        kfree_skb(skb);
        }
 
-       kmem_cache_free(mrt_cachep, c);
+       ip6mr_cache_free(c);
 }
 
 
 /*
  *     Allocate a multicast cache entry
  */
-static struct mfc6_cache *ip6mr_cache_alloc(void)
+static struct mfc6_cache *ip6mr_cache_alloc(struct net *net)
 {
        struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
        if (c == NULL)
                return NULL;
        c->mfc_un.res.minvif = MAXMIFS;
+       mfc6_net_set(c, net);
        return c;
 }
 
-static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
+static struct mfc6_cache *ip6mr_cache_alloc_unres(struct net *net)
 {
        struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
        if (c == NULL)
                return NULL;
        skb_queue_head_init(&c->mfc_un.unres.unresolved);
        c->mfc_un.unres.expires = jiffies + 10 * HZ;
+       mfc6_net_set(c, net);
        return c;
 }
 
                 */
 
                if (atomic_read(&cache_resolve_queue_len) >= 10 ||
-                   (c = ip6mr_cache_alloc_unres()) == NULL) {
+                   (c = ip6mr_cache_alloc_unres(&init_net)) == NULL) {
                        spin_unlock_bh(&mfc_unres_lock);
 
                        kfree_skb(skb);
                         */
                        spin_unlock_bh(&mfc_unres_lock);
 
-                       kmem_cache_free(mrt_cachep, c);
+                       ip6mr_cache_free(c);
                        kfree_skb(skb);
                        return err;
                }
                        *cp = c->next;
                        write_unlock_bh(&mrt_lock);
 
-                       kmem_cache_free(mrt_cachep, c);
+                       ip6mr_cache_free(c);
                        return 0;
                }
        }
        if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
                return -EINVAL;
 
-       c = ip6mr_cache_alloc();
+       c = ip6mr_cache_alloc(&init_net);
        if (c == NULL)
                return -ENOMEM;
 
 
        if (uc) {
                ip6mr_cache_resolve(uc, c);
-               kmem_cache_free(mrt_cachep, uc);
+               ip6mr_cache_free(uc);
        }
        return 0;
 }
                        *cp = c->next;
                        write_unlock_bh(&mrt_lock);
 
-                       kmem_cache_free(mrt_cachep, c);
+                       ip6mr_cache_free(c);
                }
        }