2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <asm/system.h>
20 #include <asm/uaccess.h>
21 #include <linux/types.h>
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/timer.h>
26 #include <linux/kernel.h>
27 #include <linux/fcntl.h>
28 #include <linux/stat.h>
29 #include <linux/socket.h>
30 #include <linux/inet.h>
31 #include <linux/netdevice.h>
32 #include <linux/inetdevice.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35 #include <linux/init.h>
36 #include <net/protocol.h>
37 #include <linux/skbuff.h>
40 #include <linux/notifier.h>
41 #include <linux/if_arp.h>
42 #include <net/checksum.h>
43 #include <net/netlink.h>
46 #include <net/ip6_route.h>
47 #include <linux/mroute6.h>
48 #include <linux/pim.h>
49 #include <net/addrconf.h>
50 #include <linux/netfilter_ipv6.h>
52 struct sock *mroute6_socket;
55 /* Big lock, protecting vif table, mrt cache and mroute socket state.
56 Note that the changes are semaphored via rtnl_lock.
59 static DEFINE_RWLOCK(mrt_lock);
62 * Multicast router control variables
65 static struct mif_device vif6_table[MAXMIFS]; /* Devices */
68 #define MIF_EXISTS(idx) (vif6_table[idx].dev != NULL)
70 static int mroute_do_assert; /* Set in PIM assert */
71 #ifdef CONFIG_IPV6_PIMSM_V2
72 static int mroute_do_pim;
74 #define mroute_do_pim 0
77 static struct mfc6_cache *mfc6_cache_array[MFC6_LINES]; /* Forwarding cache */
79 static struct mfc6_cache *mfc_unres_queue; /* Queue of unresolved entries */
80 static atomic_t cache_resolve_queue_len; /* Size of unresolved */
82 /* Special spinlock for queue of unresolved entries */
83 static DEFINE_SPINLOCK(mfc_unres_lock);
85 /* We return to original Alan's scheme. Hash table of resolved
86 entries is changed only in process context and protected
87 with weak lock mrt_lock. Queue of unresolved entries is protected
88 with strong spinlock mfc_unres_lock.
90 In this case data path is free of exclusive locks at all.
93 static struct kmem_cache *mrt_cachep __read_mostly;
95 static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache);
96 static int ip6mr_cache_report(struct sk_buff *pkt, mifi_t mifi, int assert);
97 static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm);
99 #ifdef CONFIG_IPV6_PIMSM_V2
100 static struct inet6_protocol pim6_protocol;
103 static struct timer_list ipmr_expire_timer;
106 #ifdef CONFIG_PROC_FS
108 struct ipmr_mfc_iter {
109 struct mfc6_cache **cache;
114 static struct mfc6_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
116 struct mfc6_cache *mfc;
118 it->cache = mfc6_cache_array;
119 read_lock(&mrt_lock);
120 for (it->ct = 0; it->ct < ARRAY_SIZE(mfc6_cache_array); it->ct++)
121 for (mfc = mfc6_cache_array[it->ct]; mfc; mfc = mfc->next)
124 read_unlock(&mrt_lock);
126 it->cache = &mfc_unres_queue;
127 spin_lock_bh(&mfc_unres_lock);
128 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
131 spin_unlock_bh(&mfc_unres_lock);
141 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
144 struct ipmr_vif_iter {
148 static struct mif_device *ip6mr_vif_seq_idx(struct ipmr_vif_iter *iter,
151 for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
152 if (!MIF_EXISTS(iter->ct))
155 return &vif6_table[iter->ct];
160 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
163 read_lock(&mrt_lock);
164 return (*pos ? ip6mr_vif_seq_idx(seq->private, *pos - 1)
168 static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
170 struct ipmr_vif_iter *iter = seq->private;
173 if (v == SEQ_START_TOKEN)
174 return ip6mr_vif_seq_idx(iter, 0);
176 while (++iter->ct < maxvif) {
177 if (!MIF_EXISTS(iter->ct))
179 return &vif6_table[iter->ct];
184 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
187 read_unlock(&mrt_lock);
190 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
192 if (v == SEQ_START_TOKEN) {
194 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
196 const struct mif_device *vif = v;
197 const char *name = vif->dev ? vif->dev->name : "none";
200 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
202 name, vif->bytes_in, vif->pkt_in,
203 vif->bytes_out, vif->pkt_out,
209 static struct seq_operations ip6mr_vif_seq_ops = {
210 .start = ip6mr_vif_seq_start,
211 .next = ip6mr_vif_seq_next,
212 .stop = ip6mr_vif_seq_stop,
213 .show = ip6mr_vif_seq_show,
216 static int ip6mr_vif_open(struct inode *inode, struct file *file)
218 return seq_open_private(file, &ip6mr_vif_seq_ops,
219 sizeof(struct ipmr_vif_iter));
222 static struct file_operations ip6mr_vif_fops = {
223 .owner = THIS_MODULE,
224 .open = ip6mr_vif_open,
227 .release = seq_release,
230 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
232 return (*pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
236 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
238 struct mfc6_cache *mfc = v;
239 struct ipmr_mfc_iter *it = seq->private;
243 if (v == SEQ_START_TOKEN)
244 return ipmr_mfc_seq_idx(seq->private, 0);
249 if (it->cache == &mfc_unres_queue)
252 BUG_ON(it->cache != mfc6_cache_array);
254 while (++it->ct < ARRAY_SIZE(mfc6_cache_array)) {
255 mfc = mfc6_cache_array[it->ct];
260 /* exhausted cache_array, show unresolved */
261 read_unlock(&mrt_lock);
262 it->cache = &mfc_unres_queue;
265 spin_lock_bh(&mfc_unres_lock);
266 mfc = mfc_unres_queue;
271 spin_unlock_bh(&mfc_unres_lock);
277 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
279 struct ipmr_mfc_iter *it = seq->private;
281 if (it->cache == &mfc_unres_queue)
282 spin_unlock_bh(&mfc_unres_lock);
283 else if (it->cache == mfc6_cache_array)
284 read_unlock(&mrt_lock);
287 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
291 if (v == SEQ_START_TOKEN) {
295 "Iif Pkts Bytes Wrong Oifs\n");
297 const struct mfc6_cache *mfc = v;
298 const struct ipmr_mfc_iter *it = seq->private;
301 NIP6_FMT " " NIP6_FMT " %-3d %8ld %8ld %8ld",
302 NIP6(mfc->mf6c_mcastgrp), NIP6(mfc->mf6c_origin),
305 mfc->mfc_un.res.bytes,
306 mfc->mfc_un.res.wrong_if);
308 if (it->cache != &mfc_unres_queue) {
309 for (n = mfc->mfc_un.res.minvif;
310 n < mfc->mfc_un.res.maxvif; n++) {
312 mfc->mfc_un.res.ttls[n] < 255)
315 n, mfc->mfc_un.res.ttls[n]);
323 static struct seq_operations ipmr_mfc_seq_ops = {
324 .start = ipmr_mfc_seq_start,
325 .next = ipmr_mfc_seq_next,
326 .stop = ipmr_mfc_seq_stop,
327 .show = ipmr_mfc_seq_show,
330 static int ipmr_mfc_open(struct inode *inode, struct file *file)
332 return seq_open_private(file, &ipmr_mfc_seq_ops,
333 sizeof(struct ipmr_mfc_iter));
336 static struct file_operations ip6mr_mfc_fops = {
337 .owner = THIS_MODULE,
338 .open = ipmr_mfc_open,
341 .release = seq_release,
345 #ifdef CONFIG_IPV6_PIMSM_V2
346 static int reg_vif_num = -1;
348 static int pim6_rcv(struct sk_buff *skb)
350 struct pimreghdr *pim;
351 struct ipv6hdr *encap;
352 struct net_device *reg_dev = NULL;
354 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
357 pim = (struct pimreghdr *)skb_transport_header(skb);
358 if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
359 (pim->flags & PIM_NULL_REGISTER) ||
360 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
361 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
364 /* check if the inner packet is destined to mcast group */
365 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
368 if (!ipv6_addr_is_multicast(&encap->daddr) ||
369 encap->payload_len == 0 ||
370 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
373 read_lock(&mrt_lock);
374 if (reg_vif_num >= 0)
375 reg_dev = vif6_table[reg_vif_num].dev;
378 read_unlock(&mrt_lock);
383 skb->mac_header = skb->network_header;
384 skb_pull(skb, (u8 *)encap - skb->data);
385 skb_reset_network_header(skb);
387 skb->protocol = htons(ETH_P_IP);
389 skb->pkt_type = PACKET_HOST;
390 dst_release(skb->dst);
391 reg_dev->stats.rx_bytes += skb->len;
392 reg_dev->stats.rx_packets++;
403 static struct inet6_protocol pim6_protocol = {
407 /* Service routines creating virtual interfaces: PIMREG */
409 static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
411 read_lock(&mrt_lock);
412 dev->stats.tx_bytes += skb->len;
413 dev->stats.tx_packets++;
414 ip6mr_cache_report(skb, reg_vif_num, MRT6MSG_WHOLEPKT);
415 read_unlock(&mrt_lock);
420 static void reg_vif_setup(struct net_device *dev)
422 dev->type = ARPHRD_PIMREG;
423 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
424 dev->flags = IFF_NOARP;
425 dev->hard_start_xmit = reg_vif_xmit;
426 dev->destructor = free_netdev;
429 static struct net_device *ip6mr_reg_vif(void)
431 struct net_device *dev;
433 dev = alloc_netdev(0, "pim6reg", reg_vif_setup);
437 if (register_netdevice(dev)) {
450 /* allow the register to be completed before unregistering. */
454 unregister_netdevice(dev);
463 static int mif6_delete(int vifi)
465 struct mif_device *v;
466 struct net_device *dev;
467 if (vifi < 0 || vifi >= maxvif)
468 return -EADDRNOTAVAIL;
470 v = &vif6_table[vifi];
472 write_lock_bh(&mrt_lock);
477 write_unlock_bh(&mrt_lock);
478 return -EADDRNOTAVAIL;
481 #ifdef CONFIG_IPV6_PIMSM_V2
482 if (vifi == reg_vif_num)
486 if (vifi + 1 == maxvif) {
488 for (tmp = vifi - 1; tmp >= 0; tmp--) {
495 write_unlock_bh(&mrt_lock);
497 dev_set_allmulti(dev, -1);
499 if (v->flags & MIFF_REGISTER)
500 unregister_netdevice(dev);
506 /* Destroy an unresolved cache entry, killing queued skbs
507 and reporting error to netlink readers.
510 static void ip6mr_destroy_unres(struct mfc6_cache *c)
514 atomic_dec(&cache_resolve_queue_len);
516 while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
517 if (ipv6_hdr(skb)->version == 0) {
518 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
519 nlh->nlmsg_type = NLMSG_ERROR;
520 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
521 skb_trim(skb, nlh->nlmsg_len);
522 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
523 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
528 kmem_cache_free(mrt_cachep, c);
532 /* Single timer process for all the unresolved queue. */
534 static void ipmr_do_expire_process(unsigned long dummy)
536 unsigned long now = jiffies;
537 unsigned long expires = 10 * HZ;
538 struct mfc6_cache *c, **cp;
540 cp = &mfc_unres_queue;
542 while ((c = *cp) != NULL) {
543 if (time_after(c->mfc_un.unres.expires, now)) {
545 unsigned long interval = c->mfc_un.unres.expires - now;
546 if (interval < expires)
553 ip6mr_destroy_unres(c);
556 if (atomic_read(&cache_resolve_queue_len))
557 mod_timer(&ipmr_expire_timer, jiffies + expires);
560 static void ipmr_expire_process(unsigned long dummy)
562 if (!spin_trylock(&mfc_unres_lock)) {
563 mod_timer(&ipmr_expire_timer, jiffies + 1);
567 if (atomic_read(&cache_resolve_queue_len))
568 ipmr_do_expire_process(dummy);
570 spin_unlock(&mfc_unres_lock);
573 /* Fill oifs list. It is called under write locked mrt_lock. */
575 static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttls)
579 cache->mfc_un.res.minvif = MAXMIFS;
580 cache->mfc_un.res.maxvif = 0;
581 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
583 for (vifi = 0; vifi < maxvif; vifi++) {
584 if (MIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) {
585 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
586 if (cache->mfc_un.res.minvif > vifi)
587 cache->mfc_un.res.minvif = vifi;
588 if (cache->mfc_un.res.maxvif <= vifi)
589 cache->mfc_un.res.maxvif = vifi + 1;
594 static int mif6_add(struct mif6ctl *vifc, int mrtsock)
596 int vifi = vifc->mif6c_mifi;
597 struct mif_device *v = &vif6_table[vifi];
598 struct net_device *dev;
602 if (MIF_EXISTS(vifi))
605 switch (vifc->mif6c_flags) {
606 #ifdef CONFIG_IPV6_PIMSM_V2
609 * Special Purpose VIF in PIM
610 * All the packets will be sent to the daemon
612 if (reg_vif_num >= 0)
614 dev = ip6mr_reg_vif();
617 err = dev_set_allmulti(dev, 1);
619 unregister_netdevice(dev);
626 dev = dev_get_by_index(&init_net, vifc->mif6c_pifi);
628 return -EADDRNOTAVAIL;
629 err = dev_set_allmulti(dev, 1);
640 * Fill in the VIF structures
642 v->rate_limit = vifc->vifc_rate_limit;
643 v->flags = vifc->mif6c_flags;
645 v->flags |= VIFF_STATIC;
646 v->threshold = vifc->vifc_threshold;
651 v->link = dev->ifindex;
652 if (v->flags & MIFF_REGISTER)
653 v->link = dev->iflink;
655 /* And finish update writing critical data */
656 write_lock_bh(&mrt_lock);
658 #ifdef CONFIG_IPV6_PIMSM_V2
659 if (v->flags & MIFF_REGISTER)
662 if (vifi + 1 > maxvif)
664 write_unlock_bh(&mrt_lock);
668 static struct mfc6_cache *ip6mr_cache_find(struct in6_addr *origin, struct in6_addr *mcastgrp)
670 int line = MFC6_HASH(mcastgrp, origin);
671 struct mfc6_cache *c;
673 for (c = mfc6_cache_array[line]; c; c = c->next) {
674 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
675 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
682 * Allocate a multicast cache entry
684 static struct mfc6_cache *ip6mr_cache_alloc(void)
686 struct mfc6_cache *c = kmem_cache_alloc(mrt_cachep, GFP_KERNEL);
689 memset(c, 0, sizeof(*c));
690 c->mfc_un.res.minvif = MAXMIFS;
694 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
696 struct mfc6_cache *c = kmem_cache_alloc(mrt_cachep, GFP_ATOMIC);
699 memset(c, 0, sizeof(*c));
700 skb_queue_head_init(&c->mfc_un.unres.unresolved);
701 c->mfc_un.unres.expires = jiffies + 10 * HZ;
706 * A cache entry has gone into a resolved state from queued
709 static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c)
714 * Play the pending entries through our router
717 while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
718 if (ipv6_hdr(skb)->version == 0) {
720 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
722 if (ip6mr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
723 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
725 nlh->nlmsg_type = NLMSG_ERROR;
726 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
727 skb_trim(skb, nlh->nlmsg_len);
728 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
730 err = rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
732 ip6_mr_forward(skb, c);
737 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
738 * expects the following bizarre scheme.
740 * Called under mrt_lock.
743 static int ip6mr_cache_report(struct sk_buff *pkt, mifi_t mifi, int assert)
749 #ifdef CONFIG_IPV6_PIMSM_V2
750 if (assert == MRT6MSG_WHOLEPKT)
751 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
755 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
760 /* I suppose that internal messages
761 * do not require checksums */
763 skb->ip_summed = CHECKSUM_UNNECESSARY;
765 #ifdef CONFIG_IPV6_PIMSM_V2
766 if (assert == MRT6MSG_WHOLEPKT) {
767 /* Ugly, but we have no choice with this interface.
768 Duplicate old header, fix length etc.
769 And all this only to mangle msg->im6_msgtype and
770 to set msg->im6_mbz to "mbz" :-)
772 skb_push(skb, -skb_network_offset(pkt));
774 skb_push(skb, sizeof(*msg));
775 skb_reset_transport_header(skb);
776 msg = (struct mrt6msg *)skb_transport_header(skb);
778 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
779 msg->im6_mif = reg_vif_num;
781 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
782 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
784 skb->ip_summed = CHECKSUM_UNNECESSARY;
792 skb_put(skb, sizeof(struct ipv6hdr));
793 skb_reset_network_header(skb);
794 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
799 skb_put(skb, sizeof(*msg));
800 skb_reset_transport_header(skb);
801 msg = (struct mrt6msg *)skb_transport_header(skb);
804 msg->im6_msgtype = assert;
807 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
808 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
810 skb->dst = dst_clone(pkt->dst);
811 skb->ip_summed = CHECKSUM_UNNECESSARY;
813 skb_pull(skb, sizeof(struct ipv6hdr));
816 if (mroute6_socket == NULL) {
822 * Deliver to user space multicast routing algorithms
824 if ((ret = sock_queue_rcv_skb(mroute6_socket, skb)) < 0) {
826 printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
834 * Queue a packet for resolution. It gets locked cache entry!
838 ip6mr_cache_unresolved(mifi_t mifi, struct sk_buff *skb)
841 struct mfc6_cache *c;
843 spin_lock_bh(&mfc_unres_lock);
844 for (c = mfc_unres_queue; c; c = c->next) {
845 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
846 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr))
852 * Create a new entry if allowable
855 if (atomic_read(&cache_resolve_queue_len) >= 10 ||
856 (c = ip6mr_cache_alloc_unres()) == NULL) {
857 spin_unlock_bh(&mfc_unres_lock);
864 * Fill in the new cache entry
867 c->mf6c_origin = ipv6_hdr(skb)->saddr;
868 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
871 * Reflect first query at pim6sd
873 if ((err = ip6mr_cache_report(skb, mifi, MRT6MSG_NOCACHE)) < 0) {
874 /* If the report failed throw the cache entry
877 spin_unlock_bh(&mfc_unres_lock);
879 kmem_cache_free(mrt_cachep, c);
884 atomic_inc(&cache_resolve_queue_len);
885 c->next = mfc_unres_queue;
888 ipmr_do_expire_process(1);
892 * See if we can append the packet
894 if (c->mfc_un.unres.unresolved.qlen > 3) {
898 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
902 spin_unlock_bh(&mfc_unres_lock);
907 * MFC6 cache manipulation by user space
910 static int ip6mr_mfc_delete(struct mf6cctl *mfc)
913 struct mfc6_cache *c, **cp;
915 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
917 for (cp = &mfc6_cache_array[line]; (c = *cp) != NULL; cp = &c->next) {
918 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
919 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
920 write_lock_bh(&mrt_lock);
922 write_unlock_bh(&mrt_lock);
924 kmem_cache_free(mrt_cachep, c);
931 static int ip6mr_device_event(struct notifier_block *this,
932 unsigned long event, void *ptr)
934 struct net_device *dev = ptr;
935 struct mif_device *v;
938 if (!net_eq(dev_net(dev), &init_net))
941 if (event != NETDEV_UNREGISTER)
945 for (ct = 0; ct < maxvif; ct++, v++) {
952 static struct notifier_block ip6_mr_notifier = {
953 .notifier_call = ip6mr_device_event
957 * Setup for IP multicast routing
960 int __init ip6_mr_init(void)
964 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
965 sizeof(struct mfc6_cache),
966 0, SLAB_HWCACHE_ALIGN,
971 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
972 err = register_netdevice_notifier(&ip6_mr_notifier);
975 #ifdef CONFIG_PROC_FS
977 if (!proc_net_fops_create(&init_net, "ip6_mr_vif", 0, &ip6mr_vif_fops))
979 if (!proc_net_fops_create(&init_net, "ip6_mr_cache",
981 goto proc_cache_fail;
985 kmem_cache_destroy(mrt_cachep);
986 #ifdef CONFIG_PROC_FS
988 unregister_netdevice_notifier(&ip6_mr_notifier);
990 proc_net_remove(&init_net, "ip6_mr_vif");
995 void ip6_mr_cleanup(void)
997 #ifdef CONFIG_PROC_FS
998 proc_net_remove(&init_net, "ip6_mr_cache");
999 proc_net_remove(&init_net, "ip6_mr_vif");
1001 unregister_netdevice_notifier(&ip6_mr_notifier);
1002 del_timer(&ipmr_expire_timer);
1003 kmem_cache_destroy(mrt_cachep);
1006 static int ip6mr_mfc_add(struct mf6cctl *mfc, int mrtsock)
1009 struct mfc6_cache *uc, *c, **cp;
1010 unsigned char ttls[MAXMIFS];
1013 memset(ttls, 255, MAXMIFS);
1014 for (i = 0; i < MAXMIFS; i++) {
1015 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1020 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1022 for (cp = &mfc6_cache_array[line]; (c = *cp) != NULL; cp = &c->next) {
1023 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1024 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr))
1029 write_lock_bh(&mrt_lock);
1030 c->mf6c_parent = mfc->mf6cc_parent;
1031 ip6mr_update_thresholds(c, ttls);
1033 c->mfc_flags |= MFC_STATIC;
1034 write_unlock_bh(&mrt_lock);
1038 if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1041 c = ip6mr_cache_alloc();
1045 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1046 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1047 c->mf6c_parent = mfc->mf6cc_parent;
1048 ip6mr_update_thresholds(c, ttls);
1050 c->mfc_flags |= MFC_STATIC;
1052 write_lock_bh(&mrt_lock);
1053 c->next = mfc6_cache_array[line];
1054 mfc6_cache_array[line] = c;
1055 write_unlock_bh(&mrt_lock);
1058 * Check to see if we resolved a queued list. If so we
1059 * need to send on the frames and tidy up.
1061 spin_lock_bh(&mfc_unres_lock);
1062 for (cp = &mfc_unres_queue; (uc = *cp) != NULL;
1064 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1065 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1067 if (atomic_dec_and_test(&cache_resolve_queue_len))
1068 del_timer(&ipmr_expire_timer);
1072 spin_unlock_bh(&mfc_unres_lock);
1075 ip6mr_cache_resolve(uc, c);
1076 kmem_cache_free(mrt_cachep, uc);
1082 * Close the multicast socket, and clear the vif tables etc
1085 static void mroute_clean_tables(struct sock *sk)
1090 * Shut down all active vif entries
1092 for (i = 0; i < maxvif; i++) {
1093 if (!(vif6_table[i].flags & VIFF_STATIC))
1100 for (i = 0; i < ARRAY_SIZE(mfc6_cache_array); i++) {
1101 struct mfc6_cache *c, **cp;
1103 cp = &mfc6_cache_array[i];
1104 while ((c = *cp) != NULL) {
1105 if (c->mfc_flags & MFC_STATIC) {
1109 write_lock_bh(&mrt_lock);
1111 write_unlock_bh(&mrt_lock);
1113 kmem_cache_free(mrt_cachep, c);
1117 if (atomic_read(&cache_resolve_queue_len) != 0) {
1118 struct mfc6_cache *c;
1120 spin_lock_bh(&mfc_unres_lock);
1121 while (mfc_unres_queue != NULL) {
1122 c = mfc_unres_queue;
1123 mfc_unres_queue = c->next;
1124 spin_unlock_bh(&mfc_unres_lock);
1126 ip6mr_destroy_unres(c);
1128 spin_lock_bh(&mfc_unres_lock);
1130 spin_unlock_bh(&mfc_unres_lock);
1134 static int ip6mr_sk_init(struct sock *sk)
1139 write_lock_bh(&mrt_lock);
1140 if (likely(mroute6_socket == NULL))
1141 mroute6_socket = sk;
1144 write_unlock_bh(&mrt_lock);
1151 int ip6mr_sk_done(struct sock *sk)
1156 if (sk == mroute6_socket) {
1157 write_lock_bh(&mrt_lock);
1158 mroute6_socket = NULL;
1159 write_unlock_bh(&mrt_lock);
1161 mroute_clean_tables(sk);
1170 * Socket options and virtual interface manipulation. The whole
1171 * virtual interface system is a complete heap, but unfortunately
1172 * that's how BSD mrouted happens to think. Maybe one day with a proper
1173 * MOSPF/PIM router set up we can clean this up.
1176 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen)
1183 if (optname != MRT6_INIT) {
1184 if (sk != mroute6_socket && !capable(CAP_NET_ADMIN))
1190 if (sk->sk_type != SOCK_RAW ||
1191 inet_sk(sk)->num != IPPROTO_ICMPV6)
1193 if (optlen < sizeof(int))
1196 return ip6mr_sk_init(sk);
1199 return ip6mr_sk_done(sk);
1202 if (optlen < sizeof(vif))
1204 if (copy_from_user(&vif, optval, sizeof(vif)))
1206 if (vif.mif6c_mifi >= MAXMIFS)
1209 ret = mif6_add(&vif, sk == mroute6_socket);
1214 if (optlen < sizeof(mifi_t))
1216 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1219 ret = mif6_delete(mifi);
1224 * Manipulate the forwarding caches. These live
1225 * in a sort of kernel/user symbiosis.
1229 if (optlen < sizeof(mfc))
1231 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1234 if (optname == MRT6_DEL_MFC)
1235 ret = ip6mr_mfc_delete(&mfc);
1237 ret = ip6mr_mfc_add(&mfc, sk == mroute6_socket);
1242 * Control PIM assert (to activate pim will activate assert)
1247 if (get_user(v, (int __user *)optval))
1249 mroute_do_assert = !!v;
1253 #ifdef CONFIG_IPV6_PIMSM_V2
1257 if (get_user(v, (int __user *)optval))
1262 if (v != mroute_do_pim) {
1264 mroute_do_assert = v;
1266 ret = inet6_add_protocol(&pim6_protocol,
1269 ret = inet6_del_protocol(&pim6_protocol,
1280 * Spurious command, or MRT6_VERSION which you cannot
1284 return -ENOPROTOOPT;
1289 * Getsock opt support for the multicast routing system.
1292 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1302 #ifdef CONFIG_IPV6_PIMSM_V2
1304 val = mroute_do_pim;
1308 val = mroute_do_assert;
1311 return -ENOPROTOOPT;
1314 if (get_user(olr, optlen))
1317 olr = min_t(int, olr, sizeof(int));
1321 if (put_user(olr, optlen))
1323 if (copy_to_user(optval, &val, olr))
1329 * The IP multicast ioctl support routines.
1332 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1334 struct sioc_sg_req6 sr;
1335 struct sioc_mif_req6 vr;
1336 struct mif_device *vif;
1337 struct mfc6_cache *c;
1340 case SIOCGETMIFCNT_IN6:
1341 if (copy_from_user(&vr, arg, sizeof(vr)))
1343 if (vr.mifi >= maxvif)
1345 read_lock(&mrt_lock);
1346 vif = &vif6_table[vr.mifi];
1347 if (MIF_EXISTS(vr.mifi)) {
1348 vr.icount = vif->pkt_in;
1349 vr.ocount = vif->pkt_out;
1350 vr.ibytes = vif->bytes_in;
1351 vr.obytes = vif->bytes_out;
1352 read_unlock(&mrt_lock);
1354 if (copy_to_user(arg, &vr, sizeof(vr)))
1358 read_unlock(&mrt_lock);
1359 return -EADDRNOTAVAIL;
1360 case SIOCGETSGCNT_IN6:
1361 if (copy_from_user(&sr, arg, sizeof(sr)))
1364 read_lock(&mrt_lock);
1365 c = ip6mr_cache_find(&sr.src.sin6_addr, &sr.grp.sin6_addr);
1367 sr.pktcnt = c->mfc_un.res.pkt;
1368 sr.bytecnt = c->mfc_un.res.bytes;
1369 sr.wrong_if = c->mfc_un.res.wrong_if;
1370 read_unlock(&mrt_lock);
1372 if (copy_to_user(arg, &sr, sizeof(sr)))
1376 read_unlock(&mrt_lock);
1377 return -EADDRNOTAVAIL;
1379 return -ENOIOCTLCMD;
1384 static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1386 IP6_INC_STATS_BH(dev_net(skb->dst->dev), ip6_dst_idev(skb->dst),
1387 IPSTATS_MIB_OUTFORWDATAGRAMS);
1388 return dst_output(skb);
1392 * Processing handlers for ip6mr_forward
1395 static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1397 struct ipv6hdr *ipv6h;
1398 struct mif_device *vif = &vif6_table[vifi];
1399 struct net_device *dev;
1400 struct dst_entry *dst;
1403 if (vif->dev == NULL)
1406 #ifdef CONFIG_IPV6_PIMSM_V2
1407 if (vif->flags & MIFF_REGISTER) {
1409 vif->bytes_out += skb->len;
1410 vif->dev->stats.tx_bytes += skb->len;
1411 vif->dev->stats.tx_packets++;
1412 ip6mr_cache_report(skb, vifi, MRT6MSG_WHOLEPKT);
1418 ipv6h = ipv6_hdr(skb);
1420 fl = (struct flowi) {
1423 { .daddr = ipv6h->daddr, }
1427 dst = ip6_route_output(&init_net, NULL, &fl);
1431 dst_release(skb->dst);
1435 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1436 * not only before forwarding, but after forwarding on all output
1437 * interfaces. It is clear, if mrouter runs a multicasting
1438 * program, it should receive packets not depending to what interface
1439 * program is joined.
1440 * If we will not make it, the program will have to join on all
1441 * interfaces. On the other hand, multihoming host (or router, but
1442 * not mrouter) cannot join to more than one interface - it will
1443 * result in receiving multiple packets.
1448 vif->bytes_out += skb->len;
1450 /* We are about to write */
1451 /* XXX: extension headers? */
1452 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
1455 ipv6h = ipv6_hdr(skb);
1458 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
1460 return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dev,
1461 ip6mr_forward2_finish);
1468 static int ip6mr_find_vif(struct net_device *dev)
1471 for (ct = maxvif - 1; ct >= 0; ct--) {
1472 if (vif6_table[ct].dev == dev)
1478 static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache)
1483 vif = cache->mf6c_parent;
1484 cache->mfc_un.res.pkt++;
1485 cache->mfc_un.res.bytes += skb->len;
1488 * Wrong interface: drop packet and (maybe) send PIM assert.
1490 if (vif6_table[vif].dev != skb->dev) {
1493 cache->mfc_un.res.wrong_if++;
1494 true_vifi = ip6mr_find_vif(skb->dev);
1496 if (true_vifi >= 0 && mroute_do_assert &&
1497 /* pimsm uses asserts, when switching from RPT to SPT,
1498 so that we cannot check that packet arrived on an oif.
1499 It is bad, but otherwise we would need to move pretty
1500 large chunk of pimd to kernel. Ough... --ANK
1502 (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
1504 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1505 cache->mfc_un.res.last_assert = jiffies;
1506 ip6mr_cache_report(skb, true_vifi, MRT6MSG_WRONGMIF);
1511 vif6_table[vif].pkt_in++;
1512 vif6_table[vif].bytes_in += skb->len;
1517 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
1518 if (ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
1520 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1522 ip6mr_forward2(skb2, cache, psend);
1528 ip6mr_forward2(skb, cache, psend);
1539 * Multicast packets for forwarding arrive here
1542 int ip6_mr_input(struct sk_buff *skb)
1544 struct mfc6_cache *cache;
1546 read_lock(&mrt_lock);
1547 cache = ip6mr_cache_find(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
1550 * No usable cache entry
1552 if (cache == NULL) {
1555 vif = ip6mr_find_vif(skb->dev);
1557 int err = ip6mr_cache_unresolved(vif, skb);
1558 read_unlock(&mrt_lock);
1562 read_unlock(&mrt_lock);
1567 ip6_mr_forward(skb, cache);
1569 read_unlock(&mrt_lock);
1576 ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm)
1579 struct rtnexthop *nhp;
1580 struct net_device *dev = vif6_table[c->mf6c_parent].dev;
1581 u8 *b = skb_tail_pointer(skb);
1582 struct rtattr *mp_head;
1585 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1587 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1589 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1590 if (c->mfc_un.res.ttls[ct] < 255) {
1591 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1592 goto rtattr_failure;
1593 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1594 nhp->rtnh_flags = 0;
1595 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1596 nhp->rtnh_ifindex = vif6_table[ct].dev->ifindex;
1597 nhp->rtnh_len = sizeof(*nhp);
1600 mp_head->rta_type = RTA_MULTIPATH;
1601 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
1602 rtm->rtm_type = RTN_MULTICAST;
1610 int ip6mr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1613 struct mfc6_cache *cache;
1614 struct rt6_info *rt = (struct rt6_info *)skb->dst;
1616 read_lock(&mrt_lock);
1617 cache = ip6mr_cache_find(&rt->rt6i_src.addr, &rt->rt6i_dst.addr);
1620 struct sk_buff *skb2;
1621 struct ipv6hdr *iph;
1622 struct net_device *dev;
1626 read_unlock(&mrt_lock);
1631 if (dev == NULL || (vif = ip6mr_find_vif(dev)) < 0) {
1632 read_unlock(&mrt_lock);
1636 /* really correct? */
1637 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
1639 read_unlock(&mrt_lock);
1643 skb_reset_transport_header(skb2);
1645 skb_put(skb2, sizeof(struct ipv6hdr));
1646 skb_reset_network_header(skb2);
1648 iph = ipv6_hdr(skb2);
1651 iph->flow_lbl[0] = 0;
1652 iph->flow_lbl[1] = 0;
1653 iph->flow_lbl[2] = 0;
1654 iph->payload_len = 0;
1655 iph->nexthdr = IPPROTO_NONE;
1657 ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
1658 ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
1660 err = ip6mr_cache_unresolved(vif, skb2);
1661 read_unlock(&mrt_lock);
1666 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1667 cache->mfc_flags |= MFC_NOTIFY;
1669 err = ip6mr_fill_mroute(skb, cache, rtm);
1670 read_unlock(&mrt_lock);