]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/ipv4/ipmr.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[karo-tx-linux.git] / net / ipv4 / ipmr.c
1 /*
2  *      IP multicast routing support for mrouted 3.6/3.8
3  *
4  *              (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5  *        Linux Consultancy and Custom Driver Development
6  *
7  *      This program is free software; you can redistribute it and/or
8  *      modify it under the terms of the GNU General Public License
9  *      as published by the Free Software Foundation; either version
10  *      2 of the License, or (at your option) any later version.
11  *
12  *      Fixes:
13  *      Michael Chastain        :       Incorrect size of copying.
14  *      Alan Cox                :       Added the cache manager code
15  *      Alan Cox                :       Fixed the clone/copy bug and device race.
16  *      Mike McLagan            :       Routing by source
17  *      Malcolm Beattie         :       Buffer handling fixes.
18  *      Alexey Kuznetsov        :       Double buffer free and other fixes.
19  *      SVR Anand               :       Fixed several multicast bugs and problems.
20  *      Alexey Kuznetsov        :       Status, optimisations and more.
21  *      Brad Parker             :       Better behaviour on mrouted upcall
22  *                                      overflow.
23  *      Carlos Picoto           :       PIMv1 Support
24  *      Pavlin Ivanov Radoslavov:       PIMv2 Registers must checksum only PIM header
25  *                                      Relax this requirement to work with older peers.
26  *
27  */
28
29 #include <asm/uaccess.h>
30 #include <linux/types.h>
31 #include <linux/capability.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
34 #include <linux/mm.h>
35 #include <linux/kernel.h>
36 #include <linux/fcntl.h>
37 #include <linux/stat.h>
38 #include <linux/socket.h>
39 #include <linux/in.h>
40 #include <linux/inet.h>
41 #include <linux/netdevice.h>
42 #include <linux/inetdevice.h>
43 #include <linux/igmp.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/mroute.h>
47 #include <linux/init.h>
48 #include <linux/if_ether.h>
49 #include <linux/slab.h>
50 #include <net/net_namespace.h>
51 #include <net/ip.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54 #include <net/route.h>
55 #include <net/sock.h>
56 #include <net/icmp.h>
57 #include <net/udp.h>
58 #include <net/raw.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
62 #include <linux/compat.h>
63 #include <linux/export.h>
64 #include <net/ip_tunnels.h>
65 #include <net/checksum.h>
66 #include <net/netlink.h>
67 #include <net/fib_rules.h>
68 #include <linux/netconf.h>
69
70 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
71 #define CONFIG_IP_PIMSM 1
72 #endif
73
74 struct mr_table {
75         struct list_head        list;
76         possible_net_t          net;
77         u32                     id;
78         struct sock __rcu       *mroute_sk;
79         struct timer_list       ipmr_expire_timer;
80         struct list_head        mfc_unres_queue;
81         struct list_head        mfc_cache_array[MFC_LINES];
82         struct vif_device       vif_table[MAXVIFS];
83         int                     maxvif;
84         atomic_t                cache_resolve_queue_len;
85         bool                    mroute_do_assert;
86         bool                    mroute_do_pim;
87 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
88         int                     mroute_reg_vif_num;
89 #endif
90 };
91
92 struct ipmr_rule {
93         struct fib_rule         common;
94 };
95
96 struct ipmr_result {
97         struct mr_table         *mrt;
98 };
99
100 /* Big lock, protecting vif table, mrt cache and mroute socket state.
101  * Note that the changes are semaphored via rtnl_lock.
102  */
103
104 static DEFINE_RWLOCK(mrt_lock);
105
106 /*
107  *      Multicast router control variables
108  */
109
110 #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
111
112 /* Special spinlock for queue of unresolved entries */
113 static DEFINE_SPINLOCK(mfc_unres_lock);
114
115 /* We return to original Alan's scheme. Hash table of resolved
116  * entries is changed only in process context and protected
117  * with weak lock mrt_lock. Queue of unresolved entries is protected
118  * with strong spinlock mfc_unres_lock.
119  *
120  * In this case data path is free of exclusive locks at all.
121  */
122
123 static struct kmem_cache *mrt_cachep __read_mostly;
124
125 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
126 static void ipmr_free_table(struct mr_table *mrt);
127
128 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
129                           struct sk_buff *skb, struct mfc_cache *cache,
130                           int local);
131 static int ipmr_cache_report(struct mr_table *mrt,
132                              struct sk_buff *pkt, vifi_t vifi, int assert);
133 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
134                               struct mfc_cache *c, struct rtmsg *rtm);
135 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
136                                  int cmd);
137 static void mroute_clean_tables(struct mr_table *mrt);
138 static void ipmr_expire_process(unsigned long arg);
139
140 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
141 #define ipmr_for_each_table(mrt, net) \
142         list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
143
144 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
145 {
146         struct mr_table *mrt;
147
148         ipmr_for_each_table(mrt, net) {
149                 if (mrt->id == id)
150                         return mrt;
151         }
152         return NULL;
153 }
154
155 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
156                            struct mr_table **mrt)
157 {
158         int err;
159         struct ipmr_result res;
160         struct fib_lookup_arg arg = {
161                 .result = &res,
162                 .flags = FIB_LOOKUP_NOREF,
163         };
164
165         err = fib_rules_lookup(net->ipv4.mr_rules_ops,
166                                flowi4_to_flowi(flp4), 0, &arg);
167         if (err < 0)
168                 return err;
169         *mrt = res.mrt;
170         return 0;
171 }
172
173 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
174                             int flags, struct fib_lookup_arg *arg)
175 {
176         struct ipmr_result *res = arg->result;
177         struct mr_table *mrt;
178
179         switch (rule->action) {
180         case FR_ACT_TO_TBL:
181                 break;
182         case FR_ACT_UNREACHABLE:
183                 return -ENETUNREACH;
184         case FR_ACT_PROHIBIT:
185                 return -EACCES;
186         case FR_ACT_BLACKHOLE:
187         default:
188                 return -EINVAL;
189         }
190
191         mrt = ipmr_get_table(rule->fr_net, rule->table);
192         if (!mrt)
193                 return -EAGAIN;
194         res->mrt = mrt;
195         return 0;
196 }
197
198 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
199 {
200         return 1;
201 }
202
203 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
204         FRA_GENERIC_POLICY,
205 };
206
207 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
208                                struct fib_rule_hdr *frh, struct nlattr **tb)
209 {
210         return 0;
211 }
212
213 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
214                              struct nlattr **tb)
215 {
216         return 1;
217 }
218
219 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
220                           struct fib_rule_hdr *frh)
221 {
222         frh->dst_len = 0;
223         frh->src_len = 0;
224         frh->tos     = 0;
225         return 0;
226 }
227
228 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
229         .family         = RTNL_FAMILY_IPMR,
230         .rule_size      = sizeof(struct ipmr_rule),
231         .addr_size      = sizeof(u32),
232         .action         = ipmr_rule_action,
233         .match          = ipmr_rule_match,
234         .configure      = ipmr_rule_configure,
235         .compare        = ipmr_rule_compare,
236         .fill           = ipmr_rule_fill,
237         .nlgroup        = RTNLGRP_IPV4_RULE,
238         .policy         = ipmr_rule_policy,
239         .owner          = THIS_MODULE,
240 };
241
242 static int __net_init ipmr_rules_init(struct net *net)
243 {
244         struct fib_rules_ops *ops;
245         struct mr_table *mrt;
246         int err;
247
248         ops = fib_rules_register(&ipmr_rules_ops_template, net);
249         if (IS_ERR(ops))
250                 return PTR_ERR(ops);
251
252         INIT_LIST_HEAD(&net->ipv4.mr_tables);
253
254         mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
255         if (!mrt) {
256                 err = -ENOMEM;
257                 goto err1;
258         }
259
260         err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
261         if (err < 0)
262                 goto err2;
263
264         net->ipv4.mr_rules_ops = ops;
265         return 0;
266
267 err2:
268         ipmr_free_table(mrt);
269 err1:
270         fib_rules_unregister(ops);
271         return err;
272 }
273
274 static void __net_exit ipmr_rules_exit(struct net *net)
275 {
276         struct mr_table *mrt, *next;
277
278         rtnl_lock();
279         list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
280                 list_del(&mrt->list);
281                 ipmr_free_table(mrt);
282         }
283         fib_rules_unregister(net->ipv4.mr_rules_ops);
284         rtnl_unlock();
285 }
286 #else
287 #define ipmr_for_each_table(mrt, net) \
288         for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
289
290 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
291 {
292         return net->ipv4.mrt;
293 }
294
295 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
296                            struct mr_table **mrt)
297 {
298         *mrt = net->ipv4.mrt;
299         return 0;
300 }
301
302 static int __net_init ipmr_rules_init(struct net *net)
303 {
304         net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
305         return net->ipv4.mrt ? 0 : -ENOMEM;
306 }
307
308 static void __net_exit ipmr_rules_exit(struct net *net)
309 {
310         rtnl_lock();
311         ipmr_free_table(net->ipv4.mrt);
312         net->ipv4.mrt = NULL;
313         rtnl_unlock();
314 }
315 #endif
316
317 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
318 {
319         struct mr_table *mrt;
320         unsigned int i;
321
322         mrt = ipmr_get_table(net, id);
323         if (mrt)
324                 return mrt;
325
326         mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
327         if (!mrt)
328                 return NULL;
329         write_pnet(&mrt->net, net);
330         mrt->id = id;
331
332         /* Forwarding cache */
333         for (i = 0; i < MFC_LINES; i++)
334                 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
335
336         INIT_LIST_HEAD(&mrt->mfc_unres_queue);
337
338         setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
339                     (unsigned long)mrt);
340
341 #ifdef CONFIG_IP_PIMSM
342         mrt->mroute_reg_vif_num = -1;
343 #endif
344 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
345         list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
346 #endif
347         return mrt;
348 }
349
350 static void ipmr_free_table(struct mr_table *mrt)
351 {
352         del_timer_sync(&mrt->ipmr_expire_timer);
353         mroute_clean_tables(mrt);
354         kfree(mrt);
355 }
356
357 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
358
359 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
360 {
361         struct net *net = dev_net(dev);
362
363         dev_close(dev);
364
365         dev = __dev_get_by_name(net, "tunl0");
366         if (dev) {
367                 const struct net_device_ops *ops = dev->netdev_ops;
368                 struct ifreq ifr;
369                 struct ip_tunnel_parm p;
370
371                 memset(&p, 0, sizeof(p));
372                 p.iph.daddr = v->vifc_rmt_addr.s_addr;
373                 p.iph.saddr = v->vifc_lcl_addr.s_addr;
374                 p.iph.version = 4;
375                 p.iph.ihl = 5;
376                 p.iph.protocol = IPPROTO_IPIP;
377                 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
378                 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
379
380                 if (ops->ndo_do_ioctl) {
381                         mm_segment_t oldfs = get_fs();
382
383                         set_fs(KERNEL_DS);
384                         ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
385                         set_fs(oldfs);
386                 }
387         }
388 }
389
390 static
391 struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
392 {
393         struct net_device  *dev;
394
395         dev = __dev_get_by_name(net, "tunl0");
396
397         if (dev) {
398                 const struct net_device_ops *ops = dev->netdev_ops;
399                 int err;
400                 struct ifreq ifr;
401                 struct ip_tunnel_parm p;
402                 struct in_device  *in_dev;
403
404                 memset(&p, 0, sizeof(p));
405                 p.iph.daddr = v->vifc_rmt_addr.s_addr;
406                 p.iph.saddr = v->vifc_lcl_addr.s_addr;
407                 p.iph.version = 4;
408                 p.iph.ihl = 5;
409                 p.iph.protocol = IPPROTO_IPIP;
410                 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
411                 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
412
413                 if (ops->ndo_do_ioctl) {
414                         mm_segment_t oldfs = get_fs();
415
416                         set_fs(KERNEL_DS);
417                         err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
418                         set_fs(oldfs);
419                 } else {
420                         err = -EOPNOTSUPP;
421                 }
422                 dev = NULL;
423
424                 if (err == 0 &&
425                     (dev = __dev_get_by_name(net, p.name)) != NULL) {
426                         dev->flags |= IFF_MULTICAST;
427
428                         in_dev = __in_dev_get_rtnl(dev);
429                         if (!in_dev)
430                                 goto failure;
431
432                         ipv4_devconf_setall(in_dev);
433                         neigh_parms_data_state_setall(in_dev->arp_parms);
434                         IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
435
436                         if (dev_open(dev))
437                                 goto failure;
438                         dev_hold(dev);
439                 }
440         }
441         return dev;
442
443 failure:
444         /* allow the register to be completed before unregistering. */
445         rtnl_unlock();
446         rtnl_lock();
447
448         unregister_netdevice(dev);
449         return NULL;
450 }
451
452 #ifdef CONFIG_IP_PIMSM
453
454 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
455 {
456         struct net *net = dev_net(dev);
457         struct mr_table *mrt;
458         struct flowi4 fl4 = {
459                 .flowi4_oif     = dev->ifindex,
460                 .flowi4_iif     = skb->skb_iif ? : LOOPBACK_IFINDEX,
461                 .flowi4_mark    = skb->mark,
462         };
463         int err;
464
465         err = ipmr_fib_lookup(net, &fl4, &mrt);
466         if (err < 0) {
467                 kfree_skb(skb);
468                 return err;
469         }
470
471         read_lock(&mrt_lock);
472         dev->stats.tx_bytes += skb->len;
473         dev->stats.tx_packets++;
474         ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
475         read_unlock(&mrt_lock);
476         kfree_skb(skb);
477         return NETDEV_TX_OK;
478 }
479
480 static int reg_vif_get_iflink(const struct net_device *dev)
481 {
482         return 0;
483 }
484
485 static const struct net_device_ops reg_vif_netdev_ops = {
486         .ndo_start_xmit = reg_vif_xmit,
487         .ndo_get_iflink = reg_vif_get_iflink,
488 };
489
490 static void reg_vif_setup(struct net_device *dev)
491 {
492         dev->type               = ARPHRD_PIMREG;
493         dev->mtu                = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
494         dev->flags              = IFF_NOARP;
495         dev->netdev_ops         = &reg_vif_netdev_ops;
496         dev->destructor         = free_netdev;
497         dev->features           |= NETIF_F_NETNS_LOCAL;
498 }
499
500 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
501 {
502         struct net_device *dev;
503         struct in_device *in_dev;
504         char name[IFNAMSIZ];
505
506         if (mrt->id == RT_TABLE_DEFAULT)
507                 sprintf(name, "pimreg");
508         else
509                 sprintf(name, "pimreg%u", mrt->id);
510
511         dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
512
513         if (!dev)
514                 return NULL;
515
516         dev_net_set(dev, net);
517
518         if (register_netdevice(dev)) {
519                 free_netdev(dev);
520                 return NULL;
521         }
522
523         rcu_read_lock();
524         in_dev = __in_dev_get_rcu(dev);
525         if (!in_dev) {
526                 rcu_read_unlock();
527                 goto failure;
528         }
529
530         ipv4_devconf_setall(in_dev);
531         neigh_parms_data_state_setall(in_dev->arp_parms);
532         IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
533         rcu_read_unlock();
534
535         if (dev_open(dev))
536                 goto failure;
537
538         dev_hold(dev);
539
540         return dev;
541
542 failure:
543         /* allow the register to be completed before unregistering. */
544         rtnl_unlock();
545         rtnl_lock();
546
547         unregister_netdevice(dev);
548         return NULL;
549 }
550 #endif
551
552 /**
553  *      vif_delete - Delete a VIF entry
554  *      @notify: Set to 1, if the caller is a notifier_call
555  */
556
557 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
558                       struct list_head *head)
559 {
560         struct vif_device *v;
561         struct net_device *dev;
562         struct in_device *in_dev;
563
564         if (vifi < 0 || vifi >= mrt->maxvif)
565                 return -EADDRNOTAVAIL;
566
567         v = &mrt->vif_table[vifi];
568
569         write_lock_bh(&mrt_lock);
570         dev = v->dev;
571         v->dev = NULL;
572
573         if (!dev) {
574                 write_unlock_bh(&mrt_lock);
575                 return -EADDRNOTAVAIL;
576         }
577
578 #ifdef CONFIG_IP_PIMSM
579         if (vifi == mrt->mroute_reg_vif_num)
580                 mrt->mroute_reg_vif_num = -1;
581 #endif
582
583         if (vifi + 1 == mrt->maxvif) {
584                 int tmp;
585
586                 for (tmp = vifi - 1; tmp >= 0; tmp--) {
587                         if (VIF_EXISTS(mrt, tmp))
588                                 break;
589                 }
590                 mrt->maxvif = tmp+1;
591         }
592
593         write_unlock_bh(&mrt_lock);
594
595         dev_set_allmulti(dev, -1);
596
597         in_dev = __in_dev_get_rtnl(dev);
598         if (in_dev) {
599                 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
600                 inet_netconf_notify_devconf(dev_net(dev),
601                                             NETCONFA_MC_FORWARDING,
602                                             dev->ifindex, &in_dev->cnf);
603                 ip_rt_multicast_event(in_dev);
604         }
605
606         if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
607                 unregister_netdevice_queue(dev, head);
608
609         dev_put(dev);
610         return 0;
611 }
612
613 static void ipmr_cache_free_rcu(struct rcu_head *head)
614 {
615         struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
616
617         kmem_cache_free(mrt_cachep, c);
618 }
619
620 static inline void ipmr_cache_free(struct mfc_cache *c)
621 {
622         call_rcu(&c->rcu, ipmr_cache_free_rcu);
623 }
624
625 /* Destroy an unresolved cache entry, killing queued skbs
626  * and reporting error to netlink readers.
627  */
628
629 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
630 {
631         struct net *net = read_pnet(&mrt->net);
632         struct sk_buff *skb;
633         struct nlmsgerr *e;
634
635         atomic_dec(&mrt->cache_resolve_queue_len);
636
637         while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
638                 if (ip_hdr(skb)->version == 0) {
639                         struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
640                         nlh->nlmsg_type = NLMSG_ERROR;
641                         nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
642                         skb_trim(skb, nlh->nlmsg_len);
643                         e = nlmsg_data(nlh);
644                         e->error = -ETIMEDOUT;
645                         memset(&e->msg, 0, sizeof(e->msg));
646
647                         rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
648                 } else {
649                         kfree_skb(skb);
650                 }
651         }
652
653         ipmr_cache_free(c);
654 }
655
656
657 /* Timer process for the unresolved queue. */
658
659 static void ipmr_expire_process(unsigned long arg)
660 {
661         struct mr_table *mrt = (struct mr_table *)arg;
662         unsigned long now;
663         unsigned long expires;
664         struct mfc_cache *c, *next;
665
666         if (!spin_trylock(&mfc_unres_lock)) {
667                 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
668                 return;
669         }
670
671         if (list_empty(&mrt->mfc_unres_queue))
672                 goto out;
673
674         now = jiffies;
675         expires = 10*HZ;
676
677         list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
678                 if (time_after(c->mfc_un.unres.expires, now)) {
679                         unsigned long interval = c->mfc_un.unres.expires - now;
680                         if (interval < expires)
681                                 expires = interval;
682                         continue;
683                 }
684
685                 list_del(&c->list);
686                 mroute_netlink_event(mrt, c, RTM_DELROUTE);
687                 ipmr_destroy_unres(mrt, c);
688         }
689
690         if (!list_empty(&mrt->mfc_unres_queue))
691                 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
692
693 out:
694         spin_unlock(&mfc_unres_lock);
695 }
696
697 /* Fill oifs list. It is called under write locked mrt_lock. */
698
699 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
700                                    unsigned char *ttls)
701 {
702         int vifi;
703
704         cache->mfc_un.res.minvif = MAXVIFS;
705         cache->mfc_un.res.maxvif = 0;
706         memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
707
708         for (vifi = 0; vifi < mrt->maxvif; vifi++) {
709                 if (VIF_EXISTS(mrt, vifi) &&
710                     ttls[vifi] && ttls[vifi] < 255) {
711                         cache->mfc_un.res.ttls[vifi] = ttls[vifi];
712                         if (cache->mfc_un.res.minvif > vifi)
713                                 cache->mfc_un.res.minvif = vifi;
714                         if (cache->mfc_un.res.maxvif <= vifi)
715                                 cache->mfc_un.res.maxvif = vifi + 1;
716                 }
717         }
718 }
719
720 static int vif_add(struct net *net, struct mr_table *mrt,
721                    struct vifctl *vifc, int mrtsock)
722 {
723         int vifi = vifc->vifc_vifi;
724         struct vif_device *v = &mrt->vif_table[vifi];
725         struct net_device *dev;
726         struct in_device *in_dev;
727         int err;
728
729         /* Is vif busy ? */
730         if (VIF_EXISTS(mrt, vifi))
731                 return -EADDRINUSE;
732
733         switch (vifc->vifc_flags) {
734 #ifdef CONFIG_IP_PIMSM
735         case VIFF_REGISTER:
736                 /*
737                  * Special Purpose VIF in PIM
738                  * All the packets will be sent to the daemon
739                  */
740                 if (mrt->mroute_reg_vif_num >= 0)
741                         return -EADDRINUSE;
742                 dev = ipmr_reg_vif(net, mrt);
743                 if (!dev)
744                         return -ENOBUFS;
745                 err = dev_set_allmulti(dev, 1);
746                 if (err) {
747                         unregister_netdevice(dev);
748                         dev_put(dev);
749                         return err;
750                 }
751                 break;
752 #endif
753         case VIFF_TUNNEL:
754                 dev = ipmr_new_tunnel(net, vifc);
755                 if (!dev)
756                         return -ENOBUFS;
757                 err = dev_set_allmulti(dev, 1);
758                 if (err) {
759                         ipmr_del_tunnel(dev, vifc);
760                         dev_put(dev);
761                         return err;
762                 }
763                 break;
764
765         case VIFF_USE_IFINDEX:
766         case 0:
767                 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
768                         dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
769                         if (dev && !__in_dev_get_rtnl(dev)) {
770                                 dev_put(dev);
771                                 return -EADDRNOTAVAIL;
772                         }
773                 } else {
774                         dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
775                 }
776                 if (!dev)
777                         return -EADDRNOTAVAIL;
778                 err = dev_set_allmulti(dev, 1);
779                 if (err) {
780                         dev_put(dev);
781                         return err;
782                 }
783                 break;
784         default:
785                 return -EINVAL;
786         }
787
788         in_dev = __in_dev_get_rtnl(dev);
789         if (!in_dev) {
790                 dev_put(dev);
791                 return -EADDRNOTAVAIL;
792         }
793         IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
794         inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, dev->ifindex,
795                                     &in_dev->cnf);
796         ip_rt_multicast_event(in_dev);
797
798         /* Fill in the VIF structures */
799
800         v->rate_limit = vifc->vifc_rate_limit;
801         v->local = vifc->vifc_lcl_addr.s_addr;
802         v->remote = vifc->vifc_rmt_addr.s_addr;
803         v->flags = vifc->vifc_flags;
804         if (!mrtsock)
805                 v->flags |= VIFF_STATIC;
806         v->threshold = vifc->vifc_threshold;
807         v->bytes_in = 0;
808         v->bytes_out = 0;
809         v->pkt_in = 0;
810         v->pkt_out = 0;
811         v->link = dev->ifindex;
812         if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
813                 v->link = dev_get_iflink(dev);
814
815         /* And finish update writing critical data */
816         write_lock_bh(&mrt_lock);
817         v->dev = dev;
818 #ifdef CONFIG_IP_PIMSM
819         if (v->flags & VIFF_REGISTER)
820                 mrt->mroute_reg_vif_num = vifi;
821 #endif
822         if (vifi+1 > mrt->maxvif)
823                 mrt->maxvif = vifi+1;
824         write_unlock_bh(&mrt_lock);
825         return 0;
826 }
827
828 /* called with rcu_read_lock() */
829 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
830                                          __be32 origin,
831                                          __be32 mcastgrp)
832 {
833         int line = MFC_HASH(mcastgrp, origin);
834         struct mfc_cache *c;
835
836         list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
837                 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
838                         return c;
839         }
840         return NULL;
841 }
842
843 /* Look for a (*,*,oif) entry */
844 static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
845                                                     int vifi)
846 {
847         int line = MFC_HASH(htonl(INADDR_ANY), htonl(INADDR_ANY));
848         struct mfc_cache *c;
849
850         list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
851                 if (c->mfc_origin == htonl(INADDR_ANY) &&
852                     c->mfc_mcastgrp == htonl(INADDR_ANY) &&
853                     c->mfc_un.res.ttls[vifi] < 255)
854                         return c;
855
856         return NULL;
857 }
858
859 /* Look for a (*,G) entry */
860 static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
861                                              __be32 mcastgrp, int vifi)
862 {
863         int line = MFC_HASH(mcastgrp, htonl(INADDR_ANY));
864         struct mfc_cache *c, *proxy;
865
866         if (mcastgrp == htonl(INADDR_ANY))
867                 goto skip;
868
869         list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
870                 if (c->mfc_origin == htonl(INADDR_ANY) &&
871                     c->mfc_mcastgrp == mcastgrp) {
872                         if (c->mfc_un.res.ttls[vifi] < 255)
873                                 return c;
874
875                         /* It's ok if the vifi is part of the static tree */
876                         proxy = ipmr_cache_find_any_parent(mrt,
877                                                            c->mfc_parent);
878                         if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
879                                 return c;
880                 }
881
882 skip:
883         return ipmr_cache_find_any_parent(mrt, vifi);
884 }
885
886 /*
887  *      Allocate a multicast cache entry
888  */
889 static struct mfc_cache *ipmr_cache_alloc(void)
890 {
891         struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
892
893         if (c)
894                 c->mfc_un.res.minvif = MAXVIFS;
895         return c;
896 }
897
898 static struct mfc_cache *ipmr_cache_alloc_unres(void)
899 {
900         struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
901
902         if (c) {
903                 skb_queue_head_init(&c->mfc_un.unres.unresolved);
904                 c->mfc_un.unres.expires = jiffies + 10*HZ;
905         }
906         return c;
907 }
908
909 /*
910  *      A cache entry has gone into a resolved state from queued
911  */
912
913 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
914                                struct mfc_cache *uc, struct mfc_cache *c)
915 {
916         struct sk_buff *skb;
917         struct nlmsgerr *e;
918
919         /* Play the pending entries through our router */
920
921         while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
922                 if (ip_hdr(skb)->version == 0) {
923                         struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
924
925                         if (__ipmr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
926                                 nlh->nlmsg_len = skb_tail_pointer(skb) -
927                                                  (u8 *)nlh;
928                         } else {
929                                 nlh->nlmsg_type = NLMSG_ERROR;
930                                 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
931                                 skb_trim(skb, nlh->nlmsg_len);
932                                 e = nlmsg_data(nlh);
933                                 e->error = -EMSGSIZE;
934                                 memset(&e->msg, 0, sizeof(e->msg));
935                         }
936
937                         rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
938                 } else {
939                         ip_mr_forward(net, mrt, skb, c, 0);
940                 }
941         }
942 }
943
944 /*
945  *      Bounce a cache query up to mrouted. We could use netlink for this but mrouted
946  *      expects the following bizarre scheme.
947  *
948  *      Called under mrt_lock.
949  */
950
951 static int ipmr_cache_report(struct mr_table *mrt,
952                              struct sk_buff *pkt, vifi_t vifi, int assert)
953 {
954         struct sk_buff *skb;
955         const int ihl = ip_hdrlen(pkt);
956         struct igmphdr *igmp;
957         struct igmpmsg *msg;
958         struct sock *mroute_sk;
959         int ret;
960
961 #ifdef CONFIG_IP_PIMSM
962         if (assert == IGMPMSG_WHOLEPKT)
963                 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
964         else
965 #endif
966                 skb = alloc_skb(128, GFP_ATOMIC);
967
968         if (!skb)
969                 return -ENOBUFS;
970
971 #ifdef CONFIG_IP_PIMSM
972         if (assert == IGMPMSG_WHOLEPKT) {
973                 /* Ugly, but we have no choice with this interface.
974                  * Duplicate old header, fix ihl, length etc.
975                  * And all this only to mangle msg->im_msgtype and
976                  * to set msg->im_mbz to "mbz" :-)
977                  */
978                 skb_push(skb, sizeof(struct iphdr));
979                 skb_reset_network_header(skb);
980                 skb_reset_transport_header(skb);
981                 msg = (struct igmpmsg *)skb_network_header(skb);
982                 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
983                 msg->im_msgtype = IGMPMSG_WHOLEPKT;
984                 msg->im_mbz = 0;
985                 msg->im_vif = mrt->mroute_reg_vif_num;
986                 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
987                 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
988                                              sizeof(struct iphdr));
989         } else
990 #endif
991         {
992
993         /* Copy the IP header */
994
995         skb_set_network_header(skb, skb->len);
996         skb_put(skb, ihl);
997         skb_copy_to_linear_data(skb, pkt->data, ihl);
998         ip_hdr(skb)->protocol = 0;      /* Flag to the kernel this is a route add */
999         msg = (struct igmpmsg *)skb_network_header(skb);
1000         msg->im_vif = vifi;
1001         skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1002
1003         /* Add our header */
1004
1005         igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
1006         igmp->type      =
1007         msg->im_msgtype = assert;
1008         igmp->code      = 0;
1009         ip_hdr(skb)->tot_len = htons(skb->len);         /* Fix the length */
1010         skb->transport_header = skb->network_header;
1011         }
1012
1013         rcu_read_lock();
1014         mroute_sk = rcu_dereference(mrt->mroute_sk);
1015         if (!mroute_sk) {
1016                 rcu_read_unlock();
1017                 kfree_skb(skb);
1018                 return -EINVAL;
1019         }
1020
1021         /* Deliver to mrouted */
1022
1023         ret = sock_queue_rcv_skb(mroute_sk, skb);
1024         rcu_read_unlock();
1025         if (ret < 0) {
1026                 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1027                 kfree_skb(skb);
1028         }
1029
1030         return ret;
1031 }
1032
1033 /*
1034  *      Queue a packet for resolution. It gets locked cache entry!
1035  */
1036
1037 static int
1038 ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
1039 {
1040         bool found = false;
1041         int err;
1042         struct mfc_cache *c;
1043         const struct iphdr *iph = ip_hdr(skb);
1044
1045         spin_lock_bh(&mfc_unres_lock);
1046         list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
1047                 if (c->mfc_mcastgrp == iph->daddr &&
1048                     c->mfc_origin == iph->saddr) {
1049                         found = true;
1050                         break;
1051                 }
1052         }
1053
1054         if (!found) {
1055                 /* Create a new entry if allowable */
1056
1057                 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1058                     (c = ipmr_cache_alloc_unres()) == NULL) {
1059                         spin_unlock_bh(&mfc_unres_lock);
1060
1061                         kfree_skb(skb);
1062                         return -ENOBUFS;
1063                 }
1064
1065                 /* Fill in the new cache entry */
1066
1067                 c->mfc_parent   = -1;
1068                 c->mfc_origin   = iph->saddr;
1069                 c->mfc_mcastgrp = iph->daddr;
1070
1071                 /* Reflect first query at mrouted. */
1072
1073                 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1074                 if (err < 0) {
1075                         /* If the report failed throw the cache entry
1076                            out - Brad Parker
1077                          */
1078                         spin_unlock_bh(&mfc_unres_lock);
1079
1080                         ipmr_cache_free(c);
1081                         kfree_skb(skb);
1082                         return err;
1083                 }
1084
1085                 atomic_inc(&mrt->cache_resolve_queue_len);
1086                 list_add(&c->list, &mrt->mfc_unres_queue);
1087                 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1088
1089                 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1090                         mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1091         }
1092
1093         /* See if we can append the packet */
1094
1095         if (c->mfc_un.unres.unresolved.qlen > 3) {
1096                 kfree_skb(skb);
1097                 err = -ENOBUFS;
1098         } else {
1099                 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1100                 err = 0;
1101         }
1102
1103         spin_unlock_bh(&mfc_unres_lock);
1104         return err;
1105 }
1106
1107 /*
1108  *      MFC cache manipulation by user space mroute daemon
1109  */
1110
1111 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1112 {
1113         int line;
1114         struct mfc_cache *c, *next;
1115
1116         line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1117
1118         list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1119                 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1120                     c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1121                     (parent == -1 || parent == c->mfc_parent)) {
1122                         list_del_rcu(&c->list);
1123                         mroute_netlink_event(mrt, c, RTM_DELROUTE);
1124                         ipmr_cache_free(c);
1125                         return 0;
1126                 }
1127         }
1128         return -ENOENT;
1129 }
1130
1131 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1132                         struct mfcctl *mfc, int mrtsock, int parent)
1133 {
1134         bool found = false;
1135         int line;
1136         struct mfc_cache *uc, *c;
1137
1138         if (mfc->mfcc_parent >= MAXVIFS)
1139                 return -ENFILE;
1140
1141         line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1142
1143         list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
1144                 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1145                     c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1146                     (parent == -1 || parent == c->mfc_parent)) {
1147                         found = true;
1148                         break;
1149                 }
1150         }
1151
1152         if (found) {
1153                 write_lock_bh(&mrt_lock);
1154                 c->mfc_parent = mfc->mfcc_parent;
1155                 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1156                 if (!mrtsock)
1157                         c->mfc_flags |= MFC_STATIC;
1158                 write_unlock_bh(&mrt_lock);
1159                 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1160                 return 0;
1161         }
1162
1163         if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
1164             !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1165                 return -EINVAL;
1166
1167         c = ipmr_cache_alloc();
1168         if (!c)
1169                 return -ENOMEM;
1170
1171         c->mfc_origin = mfc->mfcc_origin.s_addr;
1172         c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1173         c->mfc_parent = mfc->mfcc_parent;
1174         ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1175         if (!mrtsock)
1176                 c->mfc_flags |= MFC_STATIC;
1177
1178         list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
1179
1180         /*
1181          *      Check to see if we resolved a queued list. If so we
1182          *      need to send on the frames and tidy up.
1183          */
1184         found = false;
1185         spin_lock_bh(&mfc_unres_lock);
1186         list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1187                 if (uc->mfc_origin == c->mfc_origin &&
1188                     uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1189                         list_del(&uc->list);
1190                         atomic_dec(&mrt->cache_resolve_queue_len);
1191                         found = true;
1192                         break;
1193                 }
1194         }
1195         if (list_empty(&mrt->mfc_unres_queue))
1196                 del_timer(&mrt->ipmr_expire_timer);
1197         spin_unlock_bh(&mfc_unres_lock);
1198
1199         if (found) {
1200                 ipmr_cache_resolve(net, mrt, uc, c);
1201                 ipmr_cache_free(uc);
1202         }
1203         mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1204         return 0;
1205 }
1206
1207 /*
1208  *      Close the multicast socket, and clear the vif tables etc
1209  */
1210
1211 static void mroute_clean_tables(struct mr_table *mrt)
1212 {
1213         int i;
1214         LIST_HEAD(list);
1215         struct mfc_cache *c, *next;
1216
1217         /* Shut down all active vif entries */
1218
1219         for (i = 0; i < mrt->maxvif; i++) {
1220                 if (!(mrt->vif_table[i].flags & VIFF_STATIC))
1221                         vif_delete(mrt, i, 0, &list);
1222         }
1223         unregister_netdevice_many(&list);
1224
1225         /* Wipe the cache */
1226
1227         for (i = 0; i < MFC_LINES; i++) {
1228                 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1229                         if (c->mfc_flags & MFC_STATIC)
1230                                 continue;
1231                         list_del_rcu(&c->list);
1232                         mroute_netlink_event(mrt, c, RTM_DELROUTE);
1233                         ipmr_cache_free(c);
1234                 }
1235         }
1236
1237         if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1238                 spin_lock_bh(&mfc_unres_lock);
1239                 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1240                         list_del(&c->list);
1241                         mroute_netlink_event(mrt, c, RTM_DELROUTE);
1242                         ipmr_destroy_unres(mrt, c);
1243                 }
1244                 spin_unlock_bh(&mfc_unres_lock);
1245         }
1246 }
1247
1248 /* called from ip_ra_control(), before an RCU grace period,
1249  * we dont need to call synchronize_rcu() here
1250  */
1251 static void mrtsock_destruct(struct sock *sk)
1252 {
1253         struct net *net = sock_net(sk);
1254         struct mr_table *mrt;
1255
1256         rtnl_lock();
1257         ipmr_for_each_table(mrt, net) {
1258                 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1259                         IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1260                         inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1261                                                     NETCONFA_IFINDEX_ALL,
1262                                                     net->ipv4.devconf_all);
1263                         RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1264                         mroute_clean_tables(mrt);
1265                 }
1266         }
1267         rtnl_unlock();
1268 }
1269
1270 /*
1271  *      Socket options and virtual interface manipulation. The whole
1272  *      virtual interface system is a complete heap, but unfortunately
1273  *      that's how BSD mrouted happens to think. Maybe one day with a proper
1274  *      MOSPF/PIM router set up we can clean this up.
1275  */
1276
1277 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1278 {
1279         int ret, parent = 0;
1280         struct vifctl vif;
1281         struct mfcctl mfc;
1282         struct net *net = sock_net(sk);
1283         struct mr_table *mrt;
1284
1285         if (sk->sk_type != SOCK_RAW ||
1286             inet_sk(sk)->inet_num != IPPROTO_IGMP)
1287                 return -EOPNOTSUPP;
1288
1289         mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1290         if (!mrt)
1291                 return -ENOENT;
1292
1293         if (optname != MRT_INIT) {
1294                 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1295                     !ns_capable(net->user_ns, CAP_NET_ADMIN))
1296                         return -EACCES;
1297         }
1298
1299         switch (optname) {
1300         case MRT_INIT:
1301                 if (optlen != sizeof(int))
1302                         return -EINVAL;
1303
1304                 rtnl_lock();
1305                 if (rtnl_dereference(mrt->mroute_sk)) {
1306                         rtnl_unlock();
1307                         return -EADDRINUSE;
1308                 }
1309
1310                 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1311                 if (ret == 0) {
1312                         rcu_assign_pointer(mrt->mroute_sk, sk);
1313                         IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1314                         inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1315                                                     NETCONFA_IFINDEX_ALL,
1316                                                     net->ipv4.devconf_all);
1317                 }
1318                 rtnl_unlock();
1319                 return ret;
1320         case MRT_DONE:
1321                 if (sk != rcu_access_pointer(mrt->mroute_sk))
1322                         return -EACCES;
1323                 return ip_ra_control(sk, 0, NULL);
1324         case MRT_ADD_VIF:
1325         case MRT_DEL_VIF:
1326                 if (optlen != sizeof(vif))
1327                         return -EINVAL;
1328                 if (copy_from_user(&vif, optval, sizeof(vif)))
1329                         return -EFAULT;
1330                 if (vif.vifc_vifi >= MAXVIFS)
1331                         return -ENFILE;
1332                 rtnl_lock();
1333                 if (optname == MRT_ADD_VIF) {
1334                         ret = vif_add(net, mrt, &vif,
1335                                       sk == rtnl_dereference(mrt->mroute_sk));
1336                 } else {
1337                         ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1338                 }
1339                 rtnl_unlock();
1340                 return ret;
1341
1342                 /*
1343                  *      Manipulate the forwarding caches. These live
1344                  *      in a sort of kernel/user symbiosis.
1345                  */
1346         case MRT_ADD_MFC:
1347         case MRT_DEL_MFC:
1348                 parent = -1;
1349         case MRT_ADD_MFC_PROXY:
1350         case MRT_DEL_MFC_PROXY:
1351                 if (optlen != sizeof(mfc))
1352                         return -EINVAL;
1353                 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1354                         return -EFAULT;
1355                 if (parent == 0)
1356                         parent = mfc.mfcc_parent;
1357                 rtnl_lock();
1358                 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1359                         ret = ipmr_mfc_delete(mrt, &mfc, parent);
1360                 else
1361                         ret = ipmr_mfc_add(net, mrt, &mfc,
1362                                            sk == rtnl_dereference(mrt->mroute_sk),
1363                                            parent);
1364                 rtnl_unlock();
1365                 return ret;
1366                 /*
1367                  *      Control PIM assert.
1368                  */
1369         case MRT_ASSERT:
1370         {
1371                 int v;
1372                 if (optlen != sizeof(v))
1373                         return -EINVAL;
1374                 if (get_user(v, (int __user *)optval))
1375                         return -EFAULT;
1376                 mrt->mroute_do_assert = v;
1377                 return 0;
1378         }
1379 #ifdef CONFIG_IP_PIMSM
1380         case MRT_PIM:
1381         {
1382                 int v;
1383
1384                 if (optlen != sizeof(v))
1385                         return -EINVAL;
1386                 if (get_user(v, (int __user *)optval))
1387                         return -EFAULT;
1388                 v = !!v;
1389
1390                 rtnl_lock();
1391                 ret = 0;
1392                 if (v != mrt->mroute_do_pim) {
1393                         mrt->mroute_do_pim = v;
1394                         mrt->mroute_do_assert = v;
1395                 }
1396                 rtnl_unlock();
1397                 return ret;
1398         }
1399 #endif
1400 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
1401         case MRT_TABLE:
1402         {
1403                 u32 v;
1404
1405                 if (optlen != sizeof(u32))
1406                         return -EINVAL;
1407                 if (get_user(v, (u32 __user *)optval))
1408                         return -EFAULT;
1409
1410                 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
1411                 if (v != RT_TABLE_DEFAULT && v >= 1000000000)
1412                         return -EINVAL;
1413
1414                 rtnl_lock();
1415                 ret = 0;
1416                 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1417                         ret = -EBUSY;
1418                 } else {
1419                         if (!ipmr_new_table(net, v))
1420                                 ret = -ENOMEM;
1421                         else
1422                                 raw_sk(sk)->ipmr_table = v;
1423                 }
1424                 rtnl_unlock();
1425                 return ret;
1426         }
1427 #endif
1428         /*
1429          *      Spurious command, or MRT_VERSION which you cannot
1430          *      set.
1431          */
1432         default:
1433                 return -ENOPROTOOPT;
1434         }
1435 }
1436
1437 /*
1438  *      Getsock opt support for the multicast routing system.
1439  */
1440
1441 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1442 {
1443         int olr;
1444         int val;
1445         struct net *net = sock_net(sk);
1446         struct mr_table *mrt;
1447
1448         if (sk->sk_type != SOCK_RAW ||
1449             inet_sk(sk)->inet_num != IPPROTO_IGMP)
1450                 return -EOPNOTSUPP;
1451
1452         mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1453         if (!mrt)
1454                 return -ENOENT;
1455
1456         if (optname != MRT_VERSION &&
1457 #ifdef CONFIG_IP_PIMSM
1458            optname != MRT_PIM &&
1459 #endif
1460            optname != MRT_ASSERT)
1461                 return -ENOPROTOOPT;
1462
1463         if (get_user(olr, optlen))
1464                 return -EFAULT;
1465
1466         olr = min_t(unsigned int, olr, sizeof(int));
1467         if (olr < 0)
1468                 return -EINVAL;
1469
1470         if (put_user(olr, optlen))
1471                 return -EFAULT;
1472         if (optname == MRT_VERSION)
1473                 val = 0x0305;
1474 #ifdef CONFIG_IP_PIMSM
1475         else if (optname == MRT_PIM)
1476                 val = mrt->mroute_do_pim;
1477 #endif
1478         else
1479                 val = mrt->mroute_do_assert;
1480         if (copy_to_user(optval, &val, olr))
1481                 return -EFAULT;
1482         return 0;
1483 }
1484
1485 /*
1486  *      The IP multicast ioctl support routines.
1487  */
1488
1489 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1490 {
1491         struct sioc_sg_req sr;
1492         struct sioc_vif_req vr;
1493         struct vif_device *vif;
1494         struct mfc_cache *c;
1495         struct net *net = sock_net(sk);
1496         struct mr_table *mrt;
1497
1498         mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1499         if (!mrt)
1500                 return -ENOENT;
1501
1502         switch (cmd) {
1503         case SIOCGETVIFCNT:
1504                 if (copy_from_user(&vr, arg, sizeof(vr)))
1505                         return -EFAULT;
1506                 if (vr.vifi >= mrt->maxvif)
1507                         return -EINVAL;
1508                 read_lock(&mrt_lock);
1509                 vif = &mrt->vif_table[vr.vifi];
1510                 if (VIF_EXISTS(mrt, vr.vifi)) {
1511                         vr.icount = vif->pkt_in;
1512                         vr.ocount = vif->pkt_out;
1513                         vr.ibytes = vif->bytes_in;
1514                         vr.obytes = vif->bytes_out;
1515                         read_unlock(&mrt_lock);
1516
1517                         if (copy_to_user(arg, &vr, sizeof(vr)))
1518                                 return -EFAULT;
1519                         return 0;
1520                 }
1521                 read_unlock(&mrt_lock);
1522                 return -EADDRNOTAVAIL;
1523         case SIOCGETSGCNT:
1524                 if (copy_from_user(&sr, arg, sizeof(sr)))
1525                         return -EFAULT;
1526
1527                 rcu_read_lock();
1528                 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1529                 if (c) {
1530                         sr.pktcnt = c->mfc_un.res.pkt;
1531                         sr.bytecnt = c->mfc_un.res.bytes;
1532                         sr.wrong_if = c->mfc_un.res.wrong_if;
1533                         rcu_read_unlock();
1534
1535                         if (copy_to_user(arg, &sr, sizeof(sr)))
1536                                 return -EFAULT;
1537                         return 0;
1538                 }
1539                 rcu_read_unlock();
1540                 return -EADDRNOTAVAIL;
1541         default:
1542                 return -ENOIOCTLCMD;
1543         }
1544 }
1545
1546 #ifdef CONFIG_COMPAT
1547 struct compat_sioc_sg_req {
1548         struct in_addr src;
1549         struct in_addr grp;
1550         compat_ulong_t pktcnt;
1551         compat_ulong_t bytecnt;
1552         compat_ulong_t wrong_if;
1553 };
1554
1555 struct compat_sioc_vif_req {
1556         vifi_t  vifi;           /* Which iface */
1557         compat_ulong_t icount;
1558         compat_ulong_t ocount;
1559         compat_ulong_t ibytes;
1560         compat_ulong_t obytes;
1561 };
1562
1563 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1564 {
1565         struct compat_sioc_sg_req sr;
1566         struct compat_sioc_vif_req vr;
1567         struct vif_device *vif;
1568         struct mfc_cache *c;
1569         struct net *net = sock_net(sk);
1570         struct mr_table *mrt;
1571
1572         mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1573         if (!mrt)
1574                 return -ENOENT;
1575
1576         switch (cmd) {
1577         case SIOCGETVIFCNT:
1578                 if (copy_from_user(&vr, arg, sizeof(vr)))
1579                         return -EFAULT;
1580                 if (vr.vifi >= mrt->maxvif)
1581                         return -EINVAL;
1582                 read_lock(&mrt_lock);
1583                 vif = &mrt->vif_table[vr.vifi];
1584                 if (VIF_EXISTS(mrt, vr.vifi)) {
1585                         vr.icount = vif->pkt_in;
1586                         vr.ocount = vif->pkt_out;
1587                         vr.ibytes = vif->bytes_in;
1588                         vr.obytes = vif->bytes_out;
1589                         read_unlock(&mrt_lock);
1590
1591                         if (copy_to_user(arg, &vr, sizeof(vr)))
1592                                 return -EFAULT;
1593                         return 0;
1594                 }
1595                 read_unlock(&mrt_lock);
1596                 return -EADDRNOTAVAIL;
1597         case SIOCGETSGCNT:
1598                 if (copy_from_user(&sr, arg, sizeof(sr)))
1599                         return -EFAULT;
1600
1601                 rcu_read_lock();
1602                 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1603                 if (c) {
1604                         sr.pktcnt = c->mfc_un.res.pkt;
1605                         sr.bytecnt = c->mfc_un.res.bytes;
1606                         sr.wrong_if = c->mfc_un.res.wrong_if;
1607                         rcu_read_unlock();
1608
1609                         if (copy_to_user(arg, &sr, sizeof(sr)))
1610                                 return -EFAULT;
1611                         return 0;
1612                 }
1613                 rcu_read_unlock();
1614                 return -EADDRNOTAVAIL;
1615         default:
1616                 return -ENOIOCTLCMD;
1617         }
1618 }
1619 #endif
1620
1621
1622 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1623 {
1624         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1625         struct net *net = dev_net(dev);
1626         struct mr_table *mrt;
1627         struct vif_device *v;
1628         int ct;
1629
1630         if (event != NETDEV_UNREGISTER)
1631                 return NOTIFY_DONE;
1632
1633         ipmr_for_each_table(mrt, net) {
1634                 v = &mrt->vif_table[0];
1635                 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1636                         if (v->dev == dev)
1637                                 vif_delete(mrt, ct, 1, NULL);
1638                 }
1639         }
1640         return NOTIFY_DONE;
1641 }
1642
1643
1644 static struct notifier_block ip_mr_notifier = {
1645         .notifier_call = ipmr_device_event,
1646 };
1647
1648 /*
1649  *      Encapsulate a packet by attaching a valid IPIP header to it.
1650  *      This avoids tunnel drivers and other mess and gives us the speed so
1651  *      important for multicast video.
1652  */
1653
1654 static void ip_encap(struct net *net, struct sk_buff *skb,
1655                      __be32 saddr, __be32 daddr)
1656 {
1657         struct iphdr *iph;
1658         const struct iphdr *old_iph = ip_hdr(skb);
1659
1660         skb_push(skb, sizeof(struct iphdr));
1661         skb->transport_header = skb->network_header;
1662         skb_reset_network_header(skb);
1663         iph = ip_hdr(skb);
1664
1665         iph->version    =       4;
1666         iph->tos        =       old_iph->tos;
1667         iph->ttl        =       old_iph->ttl;
1668         iph->frag_off   =       0;
1669         iph->daddr      =       daddr;
1670         iph->saddr      =       saddr;
1671         iph->protocol   =       IPPROTO_IPIP;
1672         iph->ihl        =       5;
1673         iph->tot_len    =       htons(skb->len);
1674         ip_select_ident(net, skb, NULL);
1675         ip_send_check(iph);
1676
1677         memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1678         nf_reset(skb);
1679 }
1680
1681 static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
1682                                       struct sk_buff *skb)
1683 {
1684         struct ip_options *opt = &(IPCB(skb)->opt);
1685
1686         IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
1687         IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
1688
1689         if (unlikely(opt->optlen))
1690                 ip_forward_options(skb);
1691
1692         return dst_output(net, sk, skb);
1693 }
1694
1695 /*
1696  *      Processing handlers for ipmr_forward
1697  */
1698
1699 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1700                             struct sk_buff *skb, struct mfc_cache *c, int vifi)
1701 {
1702         const struct iphdr *iph = ip_hdr(skb);
1703         struct vif_device *vif = &mrt->vif_table[vifi];
1704         struct net_device *dev;
1705         struct rtable *rt;
1706         struct flowi4 fl4;
1707         int    encap = 0;
1708
1709         if (!vif->dev)
1710                 goto out_free;
1711
1712 #ifdef CONFIG_IP_PIMSM
1713         if (vif->flags & VIFF_REGISTER) {
1714                 vif->pkt_out++;
1715                 vif->bytes_out += skb->len;
1716                 vif->dev->stats.tx_bytes += skb->len;
1717                 vif->dev->stats.tx_packets++;
1718                 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1719                 goto out_free;
1720         }
1721 #endif
1722
1723         if (vif->flags & VIFF_TUNNEL) {
1724                 rt = ip_route_output_ports(net, &fl4, NULL,
1725                                            vif->remote, vif->local,
1726                                            0, 0,
1727                                            IPPROTO_IPIP,
1728                                            RT_TOS(iph->tos), vif->link);
1729                 if (IS_ERR(rt))
1730                         goto out_free;
1731                 encap = sizeof(struct iphdr);
1732         } else {
1733                 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1734                                            0, 0,
1735                                            IPPROTO_IPIP,
1736                                            RT_TOS(iph->tos), vif->link);
1737                 if (IS_ERR(rt))
1738                         goto out_free;
1739         }
1740
1741         dev = rt->dst.dev;
1742
1743         if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1744                 /* Do not fragment multicasts. Alas, IPv4 does not
1745                  * allow to send ICMP, so that packets will disappear
1746                  * to blackhole.
1747                  */
1748
1749                 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
1750                 ip_rt_put(rt);
1751                 goto out_free;
1752         }
1753
1754         encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1755
1756         if (skb_cow(skb, encap)) {
1757                 ip_rt_put(rt);
1758                 goto out_free;
1759         }
1760
1761         vif->pkt_out++;
1762         vif->bytes_out += skb->len;
1763
1764         skb_dst_drop(skb);
1765         skb_dst_set(skb, &rt->dst);
1766         ip_decrease_ttl(ip_hdr(skb));
1767
1768         /* FIXME: forward and output firewalls used to be called here.
1769          * What do we do with netfilter? -- RR
1770          */
1771         if (vif->flags & VIFF_TUNNEL) {
1772                 ip_encap(net, skb, vif->local, vif->remote);
1773                 /* FIXME: extra output firewall step used to be here. --RR */
1774                 vif->dev->stats.tx_packets++;
1775                 vif->dev->stats.tx_bytes += skb->len;
1776         }
1777
1778         IPCB(skb)->flags |= IPSKB_FORWARDED;
1779
1780         /*
1781          * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1782          * not only before forwarding, but after forwarding on all output
1783          * interfaces. It is clear, if mrouter runs a multicasting
1784          * program, it should receive packets not depending to what interface
1785          * program is joined.
1786          * If we will not make it, the program will have to join on all
1787          * interfaces. On the other hand, multihoming host (or router, but
1788          * not mrouter) cannot join to more than one interface - it will
1789          * result in receiving multiple packets.
1790          */
1791         NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
1792                 net, NULL, skb, skb->dev, dev,
1793                 ipmr_forward_finish);
1794         return;
1795
1796 out_free:
1797         kfree_skb(skb);
1798 }
1799
1800 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1801 {
1802         int ct;
1803
1804         for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1805                 if (mrt->vif_table[ct].dev == dev)
1806                         break;
1807         }
1808         return ct;
1809 }
1810
1811 /* "local" means that we should preserve one skb (for local delivery) */
1812
1813 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1814                           struct sk_buff *skb, struct mfc_cache *cache,
1815                           int local)
1816 {
1817         int psend = -1;
1818         int vif, ct;
1819         int true_vifi = ipmr_find_vif(mrt, skb->dev);
1820
1821         vif = cache->mfc_parent;
1822         cache->mfc_un.res.pkt++;
1823         cache->mfc_un.res.bytes += skb->len;
1824
1825         if (cache->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
1826                 struct mfc_cache *cache_proxy;
1827
1828                 /* For an (*,G) entry, we only check that the incomming
1829                  * interface is part of the static tree.
1830                  */
1831                 cache_proxy = ipmr_cache_find_any_parent(mrt, vif);
1832                 if (cache_proxy &&
1833                     cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
1834                         goto forward;
1835         }
1836
1837         /*
1838          * Wrong interface: drop packet and (maybe) send PIM assert.
1839          */
1840         if (mrt->vif_table[vif].dev != skb->dev) {
1841                 if (rt_is_output_route(skb_rtable(skb))) {
1842                         /* It is our own packet, looped back.
1843                          * Very complicated situation...
1844                          *
1845                          * The best workaround until routing daemons will be
1846                          * fixed is not to redistribute packet, if it was
1847                          * send through wrong interface. It means, that
1848                          * multicast applications WILL NOT work for
1849                          * (S,G), which have default multicast route pointing
1850                          * to wrong oif. In any case, it is not a good
1851                          * idea to use multicasting applications on router.
1852                          */
1853                         goto dont_forward;
1854                 }
1855
1856                 cache->mfc_un.res.wrong_if++;
1857
1858                 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1859                     /* pimsm uses asserts, when switching from RPT to SPT,
1860                      * so that we cannot check that packet arrived on an oif.
1861                      * It is bad, but otherwise we would need to move pretty
1862                      * large chunk of pimd to kernel. Ough... --ANK
1863                      */
1864                     (mrt->mroute_do_pim ||
1865                      cache->mfc_un.res.ttls[true_vifi] < 255) &&
1866                     time_after(jiffies,
1867                                cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1868                         cache->mfc_un.res.last_assert = jiffies;
1869                         ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1870                 }
1871                 goto dont_forward;
1872         }
1873
1874 forward:
1875         mrt->vif_table[vif].pkt_in++;
1876         mrt->vif_table[vif].bytes_in += skb->len;
1877
1878         /*
1879          *      Forward the frame
1880          */
1881         if (cache->mfc_origin == htonl(INADDR_ANY) &&
1882             cache->mfc_mcastgrp == htonl(INADDR_ANY)) {
1883                 if (true_vifi >= 0 &&
1884                     true_vifi != cache->mfc_parent &&
1885                     ip_hdr(skb)->ttl >
1886                                 cache->mfc_un.res.ttls[cache->mfc_parent]) {
1887                         /* It's an (*,*) entry and the packet is not coming from
1888                          * the upstream: forward the packet to the upstream
1889                          * only.
1890                          */
1891                         psend = cache->mfc_parent;
1892                         goto last_forward;
1893                 }
1894                 goto dont_forward;
1895         }
1896         for (ct = cache->mfc_un.res.maxvif - 1;
1897              ct >= cache->mfc_un.res.minvif; ct--) {
1898                 /* For (*,G) entry, don't forward to the incoming interface */
1899                 if ((cache->mfc_origin != htonl(INADDR_ANY) ||
1900                      ct != true_vifi) &&
1901                     ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1902                         if (psend != -1) {
1903                                 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1904
1905                                 if (skb2)
1906                                         ipmr_queue_xmit(net, mrt, skb2, cache,
1907                                                         psend);
1908                         }
1909                         psend = ct;
1910                 }
1911         }
1912 last_forward:
1913         if (psend != -1) {
1914                 if (local) {
1915                         struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1916
1917                         if (skb2)
1918                                 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1919                 } else {
1920                         ipmr_queue_xmit(net, mrt, skb, cache, psend);
1921                         return;
1922                 }
1923         }
1924
1925 dont_forward:
1926         if (!local)
1927                 kfree_skb(skb);
1928 }
1929
1930 static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1931 {
1932         struct rtable *rt = skb_rtable(skb);
1933         struct iphdr *iph = ip_hdr(skb);
1934         struct flowi4 fl4 = {
1935                 .daddr = iph->daddr,
1936                 .saddr = iph->saddr,
1937                 .flowi4_tos = RT_TOS(iph->tos),
1938                 .flowi4_oif = (rt_is_output_route(rt) ?
1939                                skb->dev->ifindex : 0),
1940                 .flowi4_iif = (rt_is_output_route(rt) ?
1941                                LOOPBACK_IFINDEX :
1942                                skb->dev->ifindex),
1943                 .flowi4_mark = skb->mark,
1944         };
1945         struct mr_table *mrt;
1946         int err;
1947
1948         err = ipmr_fib_lookup(net, &fl4, &mrt);
1949         if (err)
1950                 return ERR_PTR(err);
1951         return mrt;
1952 }
1953
1954 /*
1955  *      Multicast packets for forwarding arrive here
1956  *      Called with rcu_read_lock();
1957  */
1958
1959 int ip_mr_input(struct sk_buff *skb)
1960 {
1961         struct mfc_cache *cache;
1962         struct net *net = dev_net(skb->dev);
1963         int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1964         struct mr_table *mrt;
1965
1966         /* Packet is looped back after forward, it should not be
1967          * forwarded second time, but still can be delivered locally.
1968          */
1969         if (IPCB(skb)->flags & IPSKB_FORWARDED)
1970                 goto dont_forward;
1971
1972         mrt = ipmr_rt_fib_lookup(net, skb);
1973         if (IS_ERR(mrt)) {
1974                 kfree_skb(skb);
1975                 return PTR_ERR(mrt);
1976         }
1977         if (!local) {
1978                 if (IPCB(skb)->opt.router_alert) {
1979                         if (ip_call_ra_chain(skb))
1980                                 return 0;
1981                 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
1982                         /* IGMPv1 (and broken IGMPv2 implementations sort of
1983                          * Cisco IOS <= 11.2(8)) do not put router alert
1984                          * option to IGMP packets destined to routable
1985                          * groups. It is very bad, because it means
1986                          * that we can forward NO IGMP messages.
1987                          */
1988                         struct sock *mroute_sk;
1989
1990                         mroute_sk = rcu_dereference(mrt->mroute_sk);
1991                         if (mroute_sk) {
1992                                 nf_reset(skb);
1993                                 raw_rcv(mroute_sk, skb);
1994                                 return 0;
1995                         }
1996                     }
1997         }
1998
1999         /* already under rcu_read_lock() */
2000         cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
2001         if (!cache) {
2002                 int vif = ipmr_find_vif(mrt, skb->dev);
2003
2004                 if (vif >= 0)
2005                         cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
2006                                                     vif);
2007         }
2008
2009         /*
2010          *      No usable cache entry
2011          */
2012         if (!cache) {
2013                 int vif;
2014
2015                 if (local) {
2016                         struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2017                         ip_local_deliver(skb);
2018                         if (!skb2)
2019                                 return -ENOBUFS;
2020                         skb = skb2;
2021                 }
2022
2023                 read_lock(&mrt_lock);
2024                 vif = ipmr_find_vif(mrt, skb->dev);
2025                 if (vif >= 0) {
2026                         int err2 = ipmr_cache_unresolved(mrt, vif, skb);
2027                         read_unlock(&mrt_lock);
2028
2029                         return err2;
2030                 }
2031                 read_unlock(&mrt_lock);
2032                 kfree_skb(skb);
2033                 return -ENODEV;
2034         }
2035
2036         read_lock(&mrt_lock);
2037         ip_mr_forward(net, mrt, skb, cache, local);
2038         read_unlock(&mrt_lock);
2039
2040         if (local)
2041                 return ip_local_deliver(skb);
2042
2043         return 0;
2044
2045 dont_forward:
2046         if (local)
2047                 return ip_local_deliver(skb);
2048         kfree_skb(skb);
2049         return 0;
2050 }
2051
2052 #ifdef CONFIG_IP_PIMSM
2053 /* called with rcu_read_lock() */
2054 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
2055                      unsigned int pimlen)
2056 {
2057         struct net_device *reg_dev = NULL;
2058         struct iphdr *encap;
2059
2060         encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
2061         /*
2062          * Check that:
2063          * a. packet is really sent to a multicast group
2064          * b. packet is not a NULL-REGISTER
2065          * c. packet is not truncated
2066          */
2067         if (!ipv4_is_multicast(encap->daddr) ||
2068             encap->tot_len == 0 ||
2069             ntohs(encap->tot_len) + pimlen > skb->len)
2070                 return 1;
2071
2072         read_lock(&mrt_lock);
2073         if (mrt->mroute_reg_vif_num >= 0)
2074                 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
2075         read_unlock(&mrt_lock);
2076
2077         if (!reg_dev)
2078                 return 1;
2079
2080         skb->mac_header = skb->network_header;
2081         skb_pull(skb, (u8 *)encap - skb->data);
2082         skb_reset_network_header(skb);
2083         skb->protocol = htons(ETH_P_IP);
2084         skb->ip_summed = CHECKSUM_NONE;
2085
2086         skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
2087
2088         netif_rx(skb);
2089
2090         return NET_RX_SUCCESS;
2091 }
2092 #endif
2093
2094 #ifdef CONFIG_IP_PIMSM_V1
2095 /*
2096  * Handle IGMP messages of PIMv1
2097  */
2098
2099 int pim_rcv_v1(struct sk_buff *skb)
2100 {
2101         struct igmphdr *pim;
2102         struct net *net = dev_net(skb->dev);
2103         struct mr_table *mrt;
2104
2105         if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2106                 goto drop;
2107
2108         pim = igmp_hdr(skb);
2109
2110         mrt = ipmr_rt_fib_lookup(net, skb);
2111         if (IS_ERR(mrt))
2112                 goto drop;
2113         if (!mrt->mroute_do_pim ||
2114             pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2115                 goto drop;
2116
2117         if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2118 drop:
2119                 kfree_skb(skb);
2120         }
2121         return 0;
2122 }
2123 #endif
2124
2125 #ifdef CONFIG_IP_PIMSM_V2
2126 static int pim_rcv(struct sk_buff *skb)
2127 {
2128         struct pimreghdr *pim;
2129         struct net *net = dev_net(skb->dev);
2130         struct mr_table *mrt;
2131
2132         if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2133                 goto drop;
2134
2135         pim = (struct pimreghdr *)skb_transport_header(skb);
2136         if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) ||
2137             (pim->flags & PIM_NULL_REGISTER) ||
2138             (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
2139              csum_fold(skb_checksum(skb, 0, skb->len, 0))))
2140                 goto drop;
2141
2142         mrt = ipmr_rt_fib_lookup(net, skb);
2143         if (IS_ERR(mrt))
2144                 goto drop;
2145         if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2146 drop:
2147                 kfree_skb(skb);
2148         }
2149         return 0;
2150 }
2151 #endif
2152
2153 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2154                               struct mfc_cache *c, struct rtmsg *rtm)
2155 {
2156         int ct;
2157         struct rtnexthop *nhp;
2158         struct nlattr *mp_attr;
2159         struct rta_mfc_stats mfcs;
2160
2161         /* If cache is unresolved, don't try to parse IIF and OIF */
2162         if (c->mfc_parent >= MAXVIFS)
2163                 return -ENOENT;
2164
2165         if (VIF_EXISTS(mrt, c->mfc_parent) &&
2166             nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
2167                 return -EMSGSIZE;
2168
2169         if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
2170                 return -EMSGSIZE;
2171
2172         for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2173                 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2174                         if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
2175                                 nla_nest_cancel(skb, mp_attr);
2176                                 return -EMSGSIZE;
2177                         }
2178
2179                         nhp->rtnh_flags = 0;
2180                         nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2181                         nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
2182                         nhp->rtnh_len = sizeof(*nhp);
2183                 }
2184         }
2185
2186         nla_nest_end(skb, mp_attr);
2187
2188         mfcs.mfcs_packets = c->mfc_un.res.pkt;
2189         mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2190         mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2191         if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
2192                 return -EMSGSIZE;
2193
2194         rtm->rtm_type = RTN_MULTICAST;
2195         return 1;
2196 }
2197
2198 int ipmr_get_route(struct net *net, struct sk_buff *skb,
2199                    __be32 saddr, __be32 daddr,
2200                    struct rtmsg *rtm, int nowait)
2201 {
2202         struct mfc_cache *cache;
2203         struct mr_table *mrt;
2204         int err;
2205
2206         mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2207         if (!mrt)
2208                 return -ENOENT;
2209
2210         rcu_read_lock();
2211         cache = ipmr_cache_find(mrt, saddr, daddr);
2212         if (!cache && skb->dev) {
2213                 int vif = ipmr_find_vif(mrt, skb->dev);
2214
2215                 if (vif >= 0)
2216                         cache = ipmr_cache_find_any(mrt, daddr, vif);
2217         }
2218         if (!cache) {
2219                 struct sk_buff *skb2;
2220                 struct iphdr *iph;
2221                 struct net_device *dev;
2222                 int vif = -1;
2223
2224                 if (nowait) {
2225                         rcu_read_unlock();
2226                         return -EAGAIN;
2227                 }
2228
2229                 dev = skb->dev;
2230                 read_lock(&mrt_lock);
2231                 if (dev)
2232                         vif = ipmr_find_vif(mrt, dev);
2233                 if (vif < 0) {
2234                         read_unlock(&mrt_lock);
2235                         rcu_read_unlock();
2236                         return -ENODEV;
2237                 }
2238                 skb2 = skb_clone(skb, GFP_ATOMIC);
2239                 if (!skb2) {
2240                         read_unlock(&mrt_lock);
2241                         rcu_read_unlock();
2242                         return -ENOMEM;
2243                 }
2244
2245                 skb_push(skb2, sizeof(struct iphdr));
2246                 skb_reset_network_header(skb2);
2247                 iph = ip_hdr(skb2);
2248                 iph->ihl = sizeof(struct iphdr) >> 2;
2249                 iph->saddr = saddr;
2250                 iph->daddr = daddr;
2251                 iph->version = 0;
2252                 err = ipmr_cache_unresolved(mrt, vif, skb2);
2253                 read_unlock(&mrt_lock);
2254                 rcu_read_unlock();
2255                 return err;
2256         }
2257
2258         read_lock(&mrt_lock);
2259         if (!nowait && (rtm->rtm_flags & RTM_F_NOTIFY))
2260                 cache->mfc_flags |= MFC_NOTIFY;
2261         err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
2262         read_unlock(&mrt_lock);
2263         rcu_read_unlock();
2264         return err;
2265 }
2266
2267 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2268                             u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2269                             int flags)
2270 {
2271         struct nlmsghdr *nlh;
2272         struct rtmsg *rtm;
2273         int err;
2274
2275         nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2276         if (!nlh)
2277                 return -EMSGSIZE;
2278
2279         rtm = nlmsg_data(nlh);
2280         rtm->rtm_family   = RTNL_FAMILY_IPMR;
2281         rtm->rtm_dst_len  = 32;
2282         rtm->rtm_src_len  = 32;
2283         rtm->rtm_tos      = 0;
2284         rtm->rtm_table    = mrt->id;
2285         if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2286                 goto nla_put_failure;
2287         rtm->rtm_type     = RTN_MULTICAST;
2288         rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
2289         if (c->mfc_flags & MFC_STATIC)
2290                 rtm->rtm_protocol = RTPROT_STATIC;
2291         else
2292                 rtm->rtm_protocol = RTPROT_MROUTED;
2293         rtm->rtm_flags    = 0;
2294
2295         if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2296             nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
2297                 goto nla_put_failure;
2298         err = __ipmr_fill_mroute(mrt, skb, c, rtm);
2299         /* do not break the dump if cache is unresolved */
2300         if (err < 0 && err != -ENOENT)
2301                 goto nla_put_failure;
2302
2303         nlmsg_end(skb, nlh);
2304         return 0;
2305
2306 nla_put_failure:
2307         nlmsg_cancel(skb, nlh);
2308         return -EMSGSIZE;
2309 }
2310
2311 static size_t mroute_msgsize(bool unresolved, int maxvif)
2312 {
2313         size_t len =
2314                 NLMSG_ALIGN(sizeof(struct rtmsg))
2315                 + nla_total_size(4)     /* RTA_TABLE */
2316                 + nla_total_size(4)     /* RTA_SRC */
2317                 + nla_total_size(4)     /* RTA_DST */
2318                 ;
2319
2320         if (!unresolved)
2321                 len = len
2322                       + nla_total_size(4)       /* RTA_IIF */
2323                       + nla_total_size(0)       /* RTA_MULTIPATH */
2324                       + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2325                                                 /* RTA_MFC_STATS */
2326                       + nla_total_size(sizeof(struct rta_mfc_stats))
2327                 ;
2328
2329         return len;
2330 }
2331
2332 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2333                                  int cmd)
2334 {
2335         struct net *net = read_pnet(&mrt->net);
2336         struct sk_buff *skb;
2337         int err = -ENOBUFS;
2338
2339         skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
2340                         GFP_ATOMIC);
2341         if (!skb)
2342                 goto errout;
2343
2344         err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2345         if (err < 0)
2346                 goto errout;
2347
2348         rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2349         return;
2350
2351 errout:
2352         kfree_skb(skb);
2353         if (err < 0)
2354                 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2355 }
2356
2357 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2358 {
2359         struct net *net = sock_net(skb->sk);
2360         struct mr_table *mrt;
2361         struct mfc_cache *mfc;
2362         unsigned int t = 0, s_t;
2363         unsigned int h = 0, s_h;
2364         unsigned int e = 0, s_e;
2365
2366         s_t = cb->args[0];
2367         s_h = cb->args[1];
2368         s_e = cb->args[2];
2369
2370         rcu_read_lock();
2371         ipmr_for_each_table(mrt, net) {
2372                 if (t < s_t)
2373                         goto next_table;
2374                 if (t > s_t)
2375                         s_h = 0;
2376                 for (h = s_h; h < MFC_LINES; h++) {
2377                         list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
2378                                 if (e < s_e)
2379                                         goto next_entry;
2380                                 if (ipmr_fill_mroute(mrt, skb,
2381                                                      NETLINK_CB(cb->skb).portid,
2382                                                      cb->nlh->nlmsg_seq,
2383                                                      mfc, RTM_NEWROUTE,
2384                                                      NLM_F_MULTI) < 0)
2385                                         goto done;
2386 next_entry:
2387                                 e++;
2388                         }
2389                         e = s_e = 0;
2390                 }
2391                 spin_lock_bh(&mfc_unres_lock);
2392                 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2393                         if (e < s_e)
2394                                 goto next_entry2;
2395                         if (ipmr_fill_mroute(mrt, skb,
2396                                              NETLINK_CB(cb->skb).portid,
2397                                              cb->nlh->nlmsg_seq,
2398                                              mfc, RTM_NEWROUTE,
2399                                              NLM_F_MULTI) < 0) {
2400                                 spin_unlock_bh(&mfc_unres_lock);
2401                                 goto done;
2402                         }
2403 next_entry2:
2404                         e++;
2405                 }
2406                 spin_unlock_bh(&mfc_unres_lock);
2407                 e = s_e = 0;
2408                 s_h = 0;
2409 next_table:
2410                 t++;
2411         }
2412 done:
2413         rcu_read_unlock();
2414
2415         cb->args[2] = e;
2416         cb->args[1] = h;
2417         cb->args[0] = t;
2418
2419         return skb->len;
2420 }
2421
2422 #ifdef CONFIG_PROC_FS
2423 /*
2424  *      The /proc interfaces to multicast routing :
2425  *      /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2426  */
2427 struct ipmr_vif_iter {
2428         struct seq_net_private p;
2429         struct mr_table *mrt;
2430         int ct;
2431 };
2432
2433 static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2434                                            struct ipmr_vif_iter *iter,
2435                                            loff_t pos)
2436 {
2437         struct mr_table *mrt = iter->mrt;
2438
2439         for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2440                 if (!VIF_EXISTS(mrt, iter->ct))
2441                         continue;
2442                 if (pos-- == 0)
2443                         return &mrt->vif_table[iter->ct];
2444         }
2445         return NULL;
2446 }
2447
2448 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2449         __acquires(mrt_lock)
2450 {
2451         struct ipmr_vif_iter *iter = seq->private;
2452         struct net *net = seq_file_net(seq);
2453         struct mr_table *mrt;
2454
2455         mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2456         if (!mrt)
2457                 return ERR_PTR(-ENOENT);
2458
2459         iter->mrt = mrt;
2460
2461         read_lock(&mrt_lock);
2462         return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
2463                 : SEQ_START_TOKEN;
2464 }
2465
2466 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2467 {
2468         struct ipmr_vif_iter *iter = seq->private;
2469         struct net *net = seq_file_net(seq);
2470         struct mr_table *mrt = iter->mrt;
2471
2472         ++*pos;
2473         if (v == SEQ_START_TOKEN)
2474                 return ipmr_vif_seq_idx(net, iter, 0);
2475
2476         while (++iter->ct < mrt->maxvif) {
2477                 if (!VIF_EXISTS(mrt, iter->ct))
2478                         continue;
2479                 return &mrt->vif_table[iter->ct];
2480         }
2481         return NULL;
2482 }
2483
2484 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2485         __releases(mrt_lock)
2486 {
2487         read_unlock(&mrt_lock);
2488 }
2489
2490 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2491 {
2492         struct ipmr_vif_iter *iter = seq->private;
2493         struct mr_table *mrt = iter->mrt;
2494
2495         if (v == SEQ_START_TOKEN) {
2496                 seq_puts(seq,
2497                          "Interface      BytesIn  PktsIn  BytesOut PktsOut Flags Local    Remote\n");
2498         } else {
2499                 const struct vif_device *vif = v;
2500                 const char *name =  vif->dev ? vif->dev->name : "none";
2501
2502                 seq_printf(seq,
2503                            "%2Zd %-10s %8ld %7ld  %8ld %7ld %05X %08X %08X\n",
2504                            vif - mrt->vif_table,
2505                            name, vif->bytes_in, vif->pkt_in,
2506                            vif->bytes_out, vif->pkt_out,
2507                            vif->flags, vif->local, vif->remote);
2508         }
2509         return 0;
2510 }
2511
2512 static const struct seq_operations ipmr_vif_seq_ops = {
2513         .start = ipmr_vif_seq_start,
2514         .next  = ipmr_vif_seq_next,
2515         .stop  = ipmr_vif_seq_stop,
2516         .show  = ipmr_vif_seq_show,
2517 };
2518
2519 static int ipmr_vif_open(struct inode *inode, struct file *file)
2520 {
2521         return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2522                             sizeof(struct ipmr_vif_iter));
2523 }
2524
2525 static const struct file_operations ipmr_vif_fops = {
2526         .owner   = THIS_MODULE,
2527         .open    = ipmr_vif_open,
2528         .read    = seq_read,
2529         .llseek  = seq_lseek,
2530         .release = seq_release_net,
2531 };
2532
2533 struct ipmr_mfc_iter {
2534         struct seq_net_private p;
2535         struct mr_table *mrt;
2536         struct list_head *cache;
2537         int ct;
2538 };
2539
2540
2541 static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2542                                           struct ipmr_mfc_iter *it, loff_t pos)
2543 {
2544         struct mr_table *mrt = it->mrt;
2545         struct mfc_cache *mfc;
2546
2547         rcu_read_lock();
2548         for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
2549                 it->cache = &mrt->mfc_cache_array[it->ct];
2550                 list_for_each_entry_rcu(mfc, it->cache, list)
2551                         if (pos-- == 0)
2552                                 return mfc;
2553         }
2554         rcu_read_unlock();
2555
2556         spin_lock_bh(&mfc_unres_lock);
2557         it->cache = &mrt->mfc_unres_queue;
2558         list_for_each_entry(mfc, it->cache, list)
2559                 if (pos-- == 0)
2560                         return mfc;
2561         spin_unlock_bh(&mfc_unres_lock);
2562
2563         it->cache = NULL;
2564         return NULL;
2565 }
2566
2567
2568 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2569 {
2570         struct ipmr_mfc_iter *it = seq->private;
2571         struct net *net = seq_file_net(seq);
2572         struct mr_table *mrt;
2573
2574         mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2575         if (!mrt)
2576                 return ERR_PTR(-ENOENT);
2577
2578         it->mrt = mrt;
2579         it->cache = NULL;
2580         it->ct = 0;
2581         return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
2582                 : SEQ_START_TOKEN;
2583 }
2584
2585 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2586 {
2587         struct mfc_cache *mfc = v;
2588         struct ipmr_mfc_iter *it = seq->private;
2589         struct net *net = seq_file_net(seq);
2590         struct mr_table *mrt = it->mrt;
2591
2592         ++*pos;
2593
2594         if (v == SEQ_START_TOKEN)
2595                 return ipmr_mfc_seq_idx(net, seq->private, 0);
2596
2597         if (mfc->list.next != it->cache)
2598                 return list_entry(mfc->list.next, struct mfc_cache, list);
2599
2600         if (it->cache == &mrt->mfc_unres_queue)
2601                 goto end_of_list;
2602
2603         BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
2604
2605         while (++it->ct < MFC_LINES) {
2606                 it->cache = &mrt->mfc_cache_array[it->ct];
2607                 if (list_empty(it->cache))
2608                         continue;
2609                 return list_first_entry(it->cache, struct mfc_cache, list);
2610         }
2611
2612         /* exhausted cache_array, show unresolved */
2613         rcu_read_unlock();
2614         it->cache = &mrt->mfc_unres_queue;
2615         it->ct = 0;
2616
2617         spin_lock_bh(&mfc_unres_lock);
2618         if (!list_empty(it->cache))
2619                 return list_first_entry(it->cache, struct mfc_cache, list);
2620
2621 end_of_list:
2622         spin_unlock_bh(&mfc_unres_lock);
2623         it->cache = NULL;
2624
2625         return NULL;
2626 }
2627
2628 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2629 {
2630         struct ipmr_mfc_iter *it = seq->private;
2631         struct mr_table *mrt = it->mrt;
2632
2633         if (it->cache == &mrt->mfc_unres_queue)
2634                 spin_unlock_bh(&mfc_unres_lock);
2635         else if (it->cache == &mrt->mfc_cache_array[it->ct])
2636                 rcu_read_unlock();
2637 }
2638
2639 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2640 {
2641         int n;
2642
2643         if (v == SEQ_START_TOKEN) {
2644                 seq_puts(seq,
2645                  "Group    Origin   Iif     Pkts    Bytes    Wrong Oifs\n");
2646         } else {
2647                 const struct mfc_cache *mfc = v;
2648                 const struct ipmr_mfc_iter *it = seq->private;
2649                 const struct mr_table *mrt = it->mrt;
2650
2651                 seq_printf(seq, "%08X %08X %-3hd",
2652                            (__force u32) mfc->mfc_mcastgrp,
2653                            (__force u32) mfc->mfc_origin,
2654                            mfc->mfc_parent);
2655
2656                 if (it->cache != &mrt->mfc_unres_queue) {
2657                         seq_printf(seq, " %8lu %8lu %8lu",
2658                                    mfc->mfc_un.res.pkt,
2659                                    mfc->mfc_un.res.bytes,
2660                                    mfc->mfc_un.res.wrong_if);
2661                         for (n = mfc->mfc_un.res.minvif;
2662                              n < mfc->mfc_un.res.maxvif; n++) {
2663                                 if (VIF_EXISTS(mrt, n) &&
2664                                     mfc->mfc_un.res.ttls[n] < 255)
2665                                         seq_printf(seq,
2666                                            " %2d:%-3d",
2667                                            n, mfc->mfc_un.res.ttls[n]);
2668                         }
2669                 } else {
2670                         /* unresolved mfc_caches don't contain
2671                          * pkt, bytes and wrong_if values
2672                          */
2673                         seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2674                 }
2675                 seq_putc(seq, '\n');
2676         }
2677         return 0;
2678 }
2679
2680 static const struct seq_operations ipmr_mfc_seq_ops = {
2681         .start = ipmr_mfc_seq_start,
2682         .next  = ipmr_mfc_seq_next,
2683         .stop  = ipmr_mfc_seq_stop,
2684         .show  = ipmr_mfc_seq_show,
2685 };
2686
2687 static int ipmr_mfc_open(struct inode *inode, struct file *file)
2688 {
2689         return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2690                             sizeof(struct ipmr_mfc_iter));
2691 }
2692
2693 static const struct file_operations ipmr_mfc_fops = {
2694         .owner   = THIS_MODULE,
2695         .open    = ipmr_mfc_open,
2696         .read    = seq_read,
2697         .llseek  = seq_lseek,
2698         .release = seq_release_net,
2699 };
2700 #endif
2701
2702 #ifdef CONFIG_IP_PIMSM_V2
2703 static const struct net_protocol pim_protocol = {
2704         .handler        =       pim_rcv,
2705         .netns_ok       =       1,
2706 };
2707 #endif
2708
2709
2710 /*
2711  *      Setup for IP multicast routing
2712  */
2713 static int __net_init ipmr_net_init(struct net *net)
2714 {
2715         int err;
2716
2717         err = ipmr_rules_init(net);
2718         if (err < 0)
2719                 goto fail;
2720
2721 #ifdef CONFIG_PROC_FS
2722         err = -ENOMEM;
2723         if (!proc_create("ip_mr_vif", 0, net->proc_net, &ipmr_vif_fops))
2724                 goto proc_vif_fail;
2725         if (!proc_create("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_fops))
2726                 goto proc_cache_fail;
2727 #endif
2728         return 0;
2729
2730 #ifdef CONFIG_PROC_FS
2731 proc_cache_fail:
2732         remove_proc_entry("ip_mr_vif", net->proc_net);
2733 proc_vif_fail:
2734         ipmr_rules_exit(net);
2735 #endif
2736 fail:
2737         return err;
2738 }
2739
2740 static void __net_exit ipmr_net_exit(struct net *net)
2741 {
2742 #ifdef CONFIG_PROC_FS
2743         remove_proc_entry("ip_mr_cache", net->proc_net);
2744         remove_proc_entry("ip_mr_vif", net->proc_net);
2745 #endif
2746         ipmr_rules_exit(net);
2747 }
2748
2749 static struct pernet_operations ipmr_net_ops = {
2750         .init = ipmr_net_init,
2751         .exit = ipmr_net_exit,
2752 };
2753
2754 int __init ip_mr_init(void)
2755 {
2756         int err;
2757
2758         mrt_cachep = kmem_cache_create("ip_mrt_cache",
2759                                        sizeof(struct mfc_cache),
2760                                        0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
2761                                        NULL);
2762         if (!mrt_cachep)
2763                 return -ENOMEM;
2764
2765         err = register_pernet_subsys(&ipmr_net_ops);
2766         if (err)
2767                 goto reg_pernet_fail;
2768
2769         err = register_netdevice_notifier(&ip_mr_notifier);
2770         if (err)
2771                 goto reg_notif_fail;
2772 #ifdef CONFIG_IP_PIMSM_V2
2773         if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2774                 pr_err("%s: can't add PIM protocol\n", __func__);
2775                 err = -EAGAIN;
2776                 goto add_proto_fail;
2777         }
2778 #endif
2779         rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
2780                       NULL, ipmr_rtm_dumproute, NULL);
2781         return 0;
2782
2783 #ifdef CONFIG_IP_PIMSM_V2
2784 add_proto_fail:
2785         unregister_netdevice_notifier(&ip_mr_notifier);
2786 #endif
2787 reg_notif_fail:
2788         unregister_pernet_subsys(&ipmr_net_ops);
2789 reg_pernet_fail:
2790         kmem_cache_destroy(mrt_cachep);
2791         return err;
2792 }