]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/ipv6/route.c
Merge tag 'metag-for-v4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan...
[karo-tx-linux.git] / net / ipv6 / route.c
1 /*
2  *      Linux INET6 implementation
3  *      FIB front-end.
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  */
13
14 /*      Changes:
15  *
16  *      YOSHIFUJI Hideaki @USAGI
17  *              reworked default router selection.
18  *              - respect outgoing interface
19  *              - select from (probably) reachable routers (i.e.
20  *              routers in REACHABLE, STALE, DELAY or PROBE states).
21  *              - always select the same router if it is (probably)
22  *              reachable.  otherwise, round-robin the list.
23  *      Ville Nuorvala
24  *              Fixed routing subtrees.
25  */
26
27 #define pr_fmt(fmt) "IPv6: " fmt
28
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <net/net_namespace.h>
48 #include <net/snmp.h>
49 #include <net/ipv6.h>
50 #include <net/ip6_fib.h>
51 #include <net/ip6_route.h>
52 #include <net/ndisc.h>
53 #include <net/addrconf.h>
54 #include <net/tcp.h>
55 #include <linux/rtnetlink.h>
56 #include <net/dst.h>
57 #include <net/dst_metadata.h>
58 #include <net/xfrm.h>
59 #include <net/netevent.h>
60 #include <net/netlink.h>
61 #include <net/nexthop.h>
62 #include <net/lwtunnel.h>
63 #include <net/ip_tunnels.h>
64
65 #include <asm/uaccess.h>
66
67 #ifdef CONFIG_SYSCTL
68 #include <linux/sysctl.h>
69 #endif
70
71 enum rt6_nud_state {
72         RT6_NUD_FAIL_HARD = -3,
73         RT6_NUD_FAIL_PROBE = -2,
74         RT6_NUD_FAIL_DO_RR = -1,
75         RT6_NUD_SUCCEED = 1
76 };
77
78 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort);
79 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
80 static unsigned int      ip6_default_advmss(const struct dst_entry *dst);
81 static unsigned int      ip6_mtu(const struct dst_entry *dst);
82 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
83 static void             ip6_dst_destroy(struct dst_entry *);
84 static void             ip6_dst_ifdown(struct dst_entry *,
85                                        struct net_device *dev, int how);
86 static int               ip6_dst_gc(struct dst_ops *ops);
87
88 static int              ip6_pkt_discard(struct sk_buff *skb);
89 static int              ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb);
90 static int              ip6_pkt_prohibit(struct sk_buff *skb);
91 static int              ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb);
92 static void             ip6_link_failure(struct sk_buff *skb);
93 static void             ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
94                                            struct sk_buff *skb, u32 mtu);
95 static void             rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
96                                         struct sk_buff *skb);
97 static void             rt6_dst_from_metrics_check(struct rt6_info *rt);
98 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
99
100 #ifdef CONFIG_IPV6_ROUTE_INFO
101 static struct rt6_info *rt6_add_route_info(struct net *net,
102                                            const struct in6_addr *prefix, int prefixlen,
103                                            const struct in6_addr *gwaddr, int ifindex,
104                                            unsigned int pref);
105 static struct rt6_info *rt6_get_route_info(struct net *net,
106                                            const struct in6_addr *prefix, int prefixlen,
107                                            const struct in6_addr *gwaddr, int ifindex);
108 #endif
109
110 struct uncached_list {
111         spinlock_t              lock;
112         struct list_head        head;
113 };
114
115 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
116
117 static void rt6_uncached_list_add(struct rt6_info *rt)
118 {
119         struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
120
121         rt->dst.flags |= DST_NOCACHE;
122         rt->rt6i_uncached_list = ul;
123
124         spin_lock_bh(&ul->lock);
125         list_add_tail(&rt->rt6i_uncached, &ul->head);
126         spin_unlock_bh(&ul->lock);
127 }
128
129 static void rt6_uncached_list_del(struct rt6_info *rt)
130 {
131         if (!list_empty(&rt->rt6i_uncached)) {
132                 struct uncached_list *ul = rt->rt6i_uncached_list;
133
134                 spin_lock_bh(&ul->lock);
135                 list_del(&rt->rt6i_uncached);
136                 spin_unlock_bh(&ul->lock);
137         }
138 }
139
140 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
141 {
142         struct net_device *loopback_dev = net->loopback_dev;
143         int cpu;
144
145         for_each_possible_cpu(cpu) {
146                 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
147                 struct rt6_info *rt;
148
149                 spin_lock_bh(&ul->lock);
150                 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
151                         struct inet6_dev *rt_idev = rt->rt6i_idev;
152                         struct net_device *rt_dev = rt->dst.dev;
153
154                         if (rt_idev && (rt_idev->dev == dev || !dev) &&
155                             rt_idev->dev != loopback_dev) {
156                                 rt->rt6i_idev = in6_dev_get(loopback_dev);
157                                 in6_dev_put(rt_idev);
158                         }
159
160                         if (rt_dev && (rt_dev == dev || !dev) &&
161                             rt_dev != loopback_dev) {
162                                 rt->dst.dev = loopback_dev;
163                                 dev_hold(rt->dst.dev);
164                                 dev_put(rt_dev);
165                         }
166                 }
167                 spin_unlock_bh(&ul->lock);
168         }
169 }
170
171 static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt)
172 {
173         return dst_metrics_write_ptr(rt->dst.from);
174 }
175
176 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
177 {
178         struct rt6_info *rt = (struct rt6_info *)dst;
179
180         if (rt->rt6i_flags & RTF_PCPU)
181                 return rt6_pcpu_cow_metrics(rt);
182         else if (rt->rt6i_flags & RTF_CACHE)
183                 return NULL;
184         else
185                 return dst_cow_metrics_generic(dst, old);
186 }
187
188 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
189                                              struct sk_buff *skb,
190                                              const void *daddr)
191 {
192         struct in6_addr *p = &rt->rt6i_gateway;
193
194         if (!ipv6_addr_any(p))
195                 return (const void *) p;
196         else if (skb)
197                 return &ipv6_hdr(skb)->daddr;
198         return daddr;
199 }
200
201 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
202                                           struct sk_buff *skb,
203                                           const void *daddr)
204 {
205         struct rt6_info *rt = (struct rt6_info *) dst;
206         struct neighbour *n;
207
208         daddr = choose_neigh_daddr(rt, skb, daddr);
209         n = __ipv6_neigh_lookup(dst->dev, daddr);
210         if (n)
211                 return n;
212         return neigh_create(&nd_tbl, daddr, dst->dev);
213 }
214
215 static struct dst_ops ip6_dst_ops_template = {
216         .family                 =       AF_INET6,
217         .gc                     =       ip6_dst_gc,
218         .gc_thresh              =       1024,
219         .check                  =       ip6_dst_check,
220         .default_advmss         =       ip6_default_advmss,
221         .mtu                    =       ip6_mtu,
222         .cow_metrics            =       ipv6_cow_metrics,
223         .destroy                =       ip6_dst_destroy,
224         .ifdown                 =       ip6_dst_ifdown,
225         .negative_advice        =       ip6_negative_advice,
226         .link_failure           =       ip6_link_failure,
227         .update_pmtu            =       ip6_rt_update_pmtu,
228         .redirect               =       rt6_do_redirect,
229         .local_out              =       __ip6_local_out,
230         .neigh_lookup           =       ip6_neigh_lookup,
231 };
232
233 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
234 {
235         unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
236
237         return mtu ? : dst->dev->mtu;
238 }
239
240 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
241                                          struct sk_buff *skb, u32 mtu)
242 {
243 }
244
245 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
246                                       struct sk_buff *skb)
247 {
248 }
249
250 static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
251                                          unsigned long old)
252 {
253         return NULL;
254 }
255
256 static struct dst_ops ip6_dst_blackhole_ops = {
257         .family                 =       AF_INET6,
258         .destroy                =       ip6_dst_destroy,
259         .check                  =       ip6_dst_check,
260         .mtu                    =       ip6_blackhole_mtu,
261         .default_advmss         =       ip6_default_advmss,
262         .update_pmtu            =       ip6_rt_blackhole_update_pmtu,
263         .redirect               =       ip6_rt_blackhole_redirect,
264         .cow_metrics            =       ip6_rt_blackhole_cow_metrics,
265         .neigh_lookup           =       ip6_neigh_lookup,
266 };
267
268 static const u32 ip6_template_metrics[RTAX_MAX] = {
269         [RTAX_HOPLIMIT - 1] = 0,
270 };
271
272 static const struct rt6_info ip6_null_entry_template = {
273         .dst = {
274                 .__refcnt       = ATOMIC_INIT(1),
275                 .__use          = 1,
276                 .obsolete       = DST_OBSOLETE_FORCE_CHK,
277                 .error          = -ENETUNREACH,
278                 .input          = ip6_pkt_discard,
279                 .output         = ip6_pkt_discard_out,
280         },
281         .rt6i_flags     = (RTF_REJECT | RTF_NONEXTHOP),
282         .rt6i_protocol  = RTPROT_KERNEL,
283         .rt6i_metric    = ~(u32) 0,
284         .rt6i_ref       = ATOMIC_INIT(1),
285 };
286
287 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
288
289 static const struct rt6_info ip6_prohibit_entry_template = {
290         .dst = {
291                 .__refcnt       = ATOMIC_INIT(1),
292                 .__use          = 1,
293                 .obsolete       = DST_OBSOLETE_FORCE_CHK,
294                 .error          = -EACCES,
295                 .input          = ip6_pkt_prohibit,
296                 .output         = ip6_pkt_prohibit_out,
297         },
298         .rt6i_flags     = (RTF_REJECT | RTF_NONEXTHOP),
299         .rt6i_protocol  = RTPROT_KERNEL,
300         .rt6i_metric    = ~(u32) 0,
301         .rt6i_ref       = ATOMIC_INIT(1),
302 };
303
304 static const struct rt6_info ip6_blk_hole_entry_template = {
305         .dst = {
306                 .__refcnt       = ATOMIC_INIT(1),
307                 .__use          = 1,
308                 .obsolete       = DST_OBSOLETE_FORCE_CHK,
309                 .error          = -EINVAL,
310                 .input          = dst_discard,
311                 .output         = dst_discard_sk,
312         },
313         .rt6i_flags     = (RTF_REJECT | RTF_NONEXTHOP),
314         .rt6i_protocol  = RTPROT_KERNEL,
315         .rt6i_metric    = ~(u32) 0,
316         .rt6i_ref       = ATOMIC_INIT(1),
317 };
318
319 #endif
320
321 /* allocate dst with ip6_dst_ops */
322 static struct rt6_info *__ip6_dst_alloc(struct net *net,
323                                         struct net_device *dev,
324                                         int flags)
325 {
326         struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
327                                         0, DST_OBSOLETE_FORCE_CHK, flags);
328
329         if (rt) {
330                 struct dst_entry *dst = &rt->dst;
331
332                 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
333                 INIT_LIST_HEAD(&rt->rt6i_siblings);
334                 INIT_LIST_HEAD(&rt->rt6i_uncached);
335         }
336         return rt;
337 }
338
339 static struct rt6_info *ip6_dst_alloc(struct net *net,
340                                       struct net_device *dev,
341                                       int flags)
342 {
343         struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
344
345         if (rt) {
346                 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
347                 if (rt->rt6i_pcpu) {
348                         int cpu;
349
350                         for_each_possible_cpu(cpu) {
351                                 struct rt6_info **p;
352
353                                 p = per_cpu_ptr(rt->rt6i_pcpu, cpu);
354                                 /* no one shares rt */
355                                 *p =  NULL;
356                         }
357                 } else {
358                         dst_destroy((struct dst_entry *)rt);
359                         return NULL;
360                 }
361         }
362
363         return rt;
364 }
365
366 static void ip6_dst_destroy(struct dst_entry *dst)
367 {
368         struct rt6_info *rt = (struct rt6_info *)dst;
369         struct dst_entry *from = dst->from;
370         struct inet6_dev *idev;
371
372         dst_destroy_metrics_generic(dst);
373         free_percpu(rt->rt6i_pcpu);
374         rt6_uncached_list_del(rt);
375
376         idev = rt->rt6i_idev;
377         if (idev) {
378                 rt->rt6i_idev = NULL;
379                 in6_dev_put(idev);
380         }
381
382         dst->from = NULL;
383         dst_release(from);
384 }
385
386 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
387                            int how)
388 {
389         struct rt6_info *rt = (struct rt6_info *)dst;
390         struct inet6_dev *idev = rt->rt6i_idev;
391         struct net_device *loopback_dev =
392                 dev_net(dev)->loopback_dev;
393
394         if (dev != loopback_dev) {
395                 if (idev && idev->dev == dev) {
396                         struct inet6_dev *loopback_idev =
397                                 in6_dev_get(loopback_dev);
398                         if (loopback_idev) {
399                                 rt->rt6i_idev = loopback_idev;
400                                 in6_dev_put(idev);
401                         }
402                 }
403         }
404 }
405
406 static bool rt6_check_expired(const struct rt6_info *rt)
407 {
408         if (rt->rt6i_flags & RTF_EXPIRES) {
409                 if (time_after(jiffies, rt->dst.expires))
410                         return true;
411         } else if (rt->dst.from) {
412                 return rt6_check_expired((struct rt6_info *) rt->dst.from);
413         }
414         return false;
415 }
416
417 /* Multipath route selection:
418  *   Hash based function using packet header and flowlabel.
419  * Adapted from fib_info_hashfn()
420  */
421 static int rt6_info_hash_nhsfn(unsigned int candidate_count,
422                                const struct flowi6 *fl6)
423 {
424         unsigned int val = fl6->flowi6_proto;
425
426         val ^= ipv6_addr_hash(&fl6->daddr);
427         val ^= ipv6_addr_hash(&fl6->saddr);
428
429         /* Work only if this not encapsulated */
430         switch (fl6->flowi6_proto) {
431         case IPPROTO_UDP:
432         case IPPROTO_TCP:
433         case IPPROTO_SCTP:
434                 val ^= (__force u16)fl6->fl6_sport;
435                 val ^= (__force u16)fl6->fl6_dport;
436                 break;
437
438         case IPPROTO_ICMPV6:
439                 val ^= (__force u16)fl6->fl6_icmp_type;
440                 val ^= (__force u16)fl6->fl6_icmp_code;
441                 break;
442         }
443         /* RFC6438 recommands to use flowlabel */
444         val ^= (__force u32)fl6->flowlabel;
445
446         /* Perhaps, we need to tune, this function? */
447         val = val ^ (val >> 7) ^ (val >> 12);
448         return val % candidate_count;
449 }
450
451 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
452                                              struct flowi6 *fl6, int oif,
453                                              int strict)
454 {
455         struct rt6_info *sibling, *next_sibling;
456         int route_choosen;
457
458         route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6);
459         /* Don't change the route, if route_choosen == 0
460          * (siblings does not include ourself)
461          */
462         if (route_choosen)
463                 list_for_each_entry_safe(sibling, next_sibling,
464                                 &match->rt6i_siblings, rt6i_siblings) {
465                         route_choosen--;
466                         if (route_choosen == 0) {
467                                 if (rt6_score_route(sibling, oif, strict) < 0)
468                                         break;
469                                 match = sibling;
470                                 break;
471                         }
472                 }
473         return match;
474 }
475
476 /*
477  *      Route lookup. Any table->tb6_lock is implied.
478  */
479
480 static inline struct rt6_info *rt6_device_match(struct net *net,
481                                                     struct rt6_info *rt,
482                                                     const struct in6_addr *saddr,
483                                                     int oif,
484                                                     int flags)
485 {
486         struct rt6_info *local = NULL;
487         struct rt6_info *sprt;
488
489         if (!oif && ipv6_addr_any(saddr))
490                 goto out;
491
492         for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
493                 struct net_device *dev = sprt->dst.dev;
494
495                 if (oif) {
496                         if (dev->ifindex == oif)
497                                 return sprt;
498                         if (dev->flags & IFF_LOOPBACK) {
499                                 if (!sprt->rt6i_idev ||
500                                     sprt->rt6i_idev->dev->ifindex != oif) {
501                                         if (flags & RT6_LOOKUP_F_IFACE && oif)
502                                                 continue;
503                                         if (local && (!oif ||
504                                                       local->rt6i_idev->dev->ifindex == oif))
505                                                 continue;
506                                 }
507                                 local = sprt;
508                         }
509                 } else {
510                         if (ipv6_chk_addr(net, saddr, dev,
511                                           flags & RT6_LOOKUP_F_IFACE))
512                                 return sprt;
513                 }
514         }
515
516         if (oif) {
517                 if (local)
518                         return local;
519
520                 if (flags & RT6_LOOKUP_F_IFACE)
521                         return net->ipv6.ip6_null_entry;
522         }
523 out:
524         return rt;
525 }
526
527 #ifdef CONFIG_IPV6_ROUTER_PREF
528 struct __rt6_probe_work {
529         struct work_struct work;
530         struct in6_addr target;
531         struct net_device *dev;
532 };
533
534 static void rt6_probe_deferred(struct work_struct *w)
535 {
536         struct in6_addr mcaddr;
537         struct __rt6_probe_work *work =
538                 container_of(w, struct __rt6_probe_work, work);
539
540         addrconf_addr_solict_mult(&work->target, &mcaddr);
541         ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL, NULL);
542         dev_put(work->dev);
543         kfree(work);
544 }
545
546 static void rt6_probe(struct rt6_info *rt)
547 {
548         struct __rt6_probe_work *work;
549         struct neighbour *neigh;
550         /*
551          * Okay, this does not seem to be appropriate
552          * for now, however, we need to check if it
553          * is really so; aka Router Reachability Probing.
554          *
555          * Router Reachability Probe MUST be rate-limited
556          * to no more than one per minute.
557          */
558         if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
559                 return;
560         rcu_read_lock_bh();
561         neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
562         if (neigh) {
563                 if (neigh->nud_state & NUD_VALID)
564                         goto out;
565
566                 work = NULL;
567                 write_lock(&neigh->lock);
568                 if (!(neigh->nud_state & NUD_VALID) &&
569                     time_after(jiffies,
570                                neigh->updated +
571                                rt->rt6i_idev->cnf.rtr_probe_interval)) {
572                         work = kmalloc(sizeof(*work), GFP_ATOMIC);
573                         if (work)
574                                 __neigh_set_probe_once(neigh);
575                 }
576                 write_unlock(&neigh->lock);
577         } else {
578                 work = kmalloc(sizeof(*work), GFP_ATOMIC);
579         }
580
581         if (work) {
582                 INIT_WORK(&work->work, rt6_probe_deferred);
583                 work->target = rt->rt6i_gateway;
584                 dev_hold(rt->dst.dev);
585                 work->dev = rt->dst.dev;
586                 schedule_work(&work->work);
587         }
588
589 out:
590         rcu_read_unlock_bh();
591 }
592 #else
593 static inline void rt6_probe(struct rt6_info *rt)
594 {
595 }
596 #endif
597
598 /*
599  * Default Router Selection (RFC 2461 6.3.6)
600  */
601 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
602 {
603         struct net_device *dev = rt->dst.dev;
604         if (!oif || dev->ifindex == oif)
605                 return 2;
606         if ((dev->flags & IFF_LOOPBACK) &&
607             rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
608                 return 1;
609         return 0;
610 }
611
612 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
613 {
614         struct neighbour *neigh;
615         enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
616
617         if (rt->rt6i_flags & RTF_NONEXTHOP ||
618             !(rt->rt6i_flags & RTF_GATEWAY))
619                 return RT6_NUD_SUCCEED;
620
621         rcu_read_lock_bh();
622         neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
623         if (neigh) {
624                 read_lock(&neigh->lock);
625                 if (neigh->nud_state & NUD_VALID)
626                         ret = RT6_NUD_SUCCEED;
627 #ifdef CONFIG_IPV6_ROUTER_PREF
628                 else if (!(neigh->nud_state & NUD_FAILED))
629                         ret = RT6_NUD_SUCCEED;
630                 else
631                         ret = RT6_NUD_FAIL_PROBE;
632 #endif
633                 read_unlock(&neigh->lock);
634         } else {
635                 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
636                       RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
637         }
638         rcu_read_unlock_bh();
639
640         return ret;
641 }
642
643 static int rt6_score_route(struct rt6_info *rt, int oif,
644                            int strict)
645 {
646         int m;
647
648         m = rt6_check_dev(rt, oif);
649         if (!m && (strict & RT6_LOOKUP_F_IFACE))
650                 return RT6_NUD_FAIL_HARD;
651 #ifdef CONFIG_IPV6_ROUTER_PREF
652         m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
653 #endif
654         if (strict & RT6_LOOKUP_F_REACHABLE) {
655                 int n = rt6_check_neigh(rt);
656                 if (n < 0)
657                         return n;
658         }
659         return m;
660 }
661
662 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
663                                    int *mpri, struct rt6_info *match,
664                                    bool *do_rr)
665 {
666         int m;
667         bool match_do_rr = false;
668         struct inet6_dev *idev = rt->rt6i_idev;
669         struct net_device *dev = rt->dst.dev;
670
671         if (dev && !netif_carrier_ok(dev) &&
672             idev->cnf.ignore_routes_with_linkdown)
673                 goto out;
674
675         if (rt6_check_expired(rt))
676                 goto out;
677
678         m = rt6_score_route(rt, oif, strict);
679         if (m == RT6_NUD_FAIL_DO_RR) {
680                 match_do_rr = true;
681                 m = 0; /* lowest valid score */
682         } else if (m == RT6_NUD_FAIL_HARD) {
683                 goto out;
684         }
685
686         if (strict & RT6_LOOKUP_F_REACHABLE)
687                 rt6_probe(rt);
688
689         /* note that m can be RT6_NUD_FAIL_PROBE at this point */
690         if (m > *mpri) {
691                 *do_rr = match_do_rr;
692                 *mpri = m;
693                 match = rt;
694         }
695 out:
696         return match;
697 }
698
699 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
700                                      struct rt6_info *rr_head,
701                                      u32 metric, int oif, int strict,
702                                      bool *do_rr)
703 {
704         struct rt6_info *rt, *match, *cont;
705         int mpri = -1;
706
707         match = NULL;
708         cont = NULL;
709         for (rt = rr_head; rt; rt = rt->dst.rt6_next) {
710                 if (rt->rt6i_metric != metric) {
711                         cont = rt;
712                         break;
713                 }
714
715                 match = find_match(rt, oif, strict, &mpri, match, do_rr);
716         }
717
718         for (rt = fn->leaf; rt && rt != rr_head; rt = rt->dst.rt6_next) {
719                 if (rt->rt6i_metric != metric) {
720                         cont = rt;
721                         break;
722                 }
723
724                 match = find_match(rt, oif, strict, &mpri, match, do_rr);
725         }
726
727         if (match || !cont)
728                 return match;
729
730         for (rt = cont; rt; rt = rt->dst.rt6_next)
731                 match = find_match(rt, oif, strict, &mpri, match, do_rr);
732
733         return match;
734 }
735
736 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
737 {
738         struct rt6_info *match, *rt0;
739         struct net *net;
740         bool do_rr = false;
741
742         rt0 = fn->rr_ptr;
743         if (!rt0)
744                 fn->rr_ptr = rt0 = fn->leaf;
745
746         match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
747                              &do_rr);
748
749         if (do_rr) {
750                 struct rt6_info *next = rt0->dst.rt6_next;
751
752                 /* no entries matched; do round-robin */
753                 if (!next || next->rt6i_metric != rt0->rt6i_metric)
754                         next = fn->leaf;
755
756                 if (next != rt0)
757                         fn->rr_ptr = next;
758         }
759
760         net = dev_net(rt0->dst.dev);
761         return match ? match : net->ipv6.ip6_null_entry;
762 }
763
764 static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt)
765 {
766         return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
767 }
768
769 #ifdef CONFIG_IPV6_ROUTE_INFO
770 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
771                   const struct in6_addr *gwaddr)
772 {
773         struct net *net = dev_net(dev);
774         struct route_info *rinfo = (struct route_info *) opt;
775         struct in6_addr prefix_buf, *prefix;
776         unsigned int pref;
777         unsigned long lifetime;
778         struct rt6_info *rt;
779
780         if (len < sizeof(struct route_info)) {
781                 return -EINVAL;
782         }
783
784         /* Sanity check for prefix_len and length */
785         if (rinfo->length > 3) {
786                 return -EINVAL;
787         } else if (rinfo->prefix_len > 128) {
788                 return -EINVAL;
789         } else if (rinfo->prefix_len > 64) {
790                 if (rinfo->length < 2) {
791                         return -EINVAL;
792                 }
793         } else if (rinfo->prefix_len > 0) {
794                 if (rinfo->length < 1) {
795                         return -EINVAL;
796                 }
797         }
798
799         pref = rinfo->route_pref;
800         if (pref == ICMPV6_ROUTER_PREF_INVALID)
801                 return -EINVAL;
802
803         lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
804
805         if (rinfo->length == 3)
806                 prefix = (struct in6_addr *)rinfo->prefix;
807         else {
808                 /* this function is safe */
809                 ipv6_addr_prefix(&prefix_buf,
810                                  (struct in6_addr *)rinfo->prefix,
811                                  rinfo->prefix_len);
812                 prefix = &prefix_buf;
813         }
814
815         if (rinfo->prefix_len == 0)
816                 rt = rt6_get_dflt_router(gwaddr, dev);
817         else
818                 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
819                                         gwaddr, dev->ifindex);
820
821         if (rt && !lifetime) {
822                 ip6_del_rt(rt);
823                 rt = NULL;
824         }
825
826         if (!rt && lifetime)
827                 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
828                                         pref);
829         else if (rt)
830                 rt->rt6i_flags = RTF_ROUTEINFO |
831                                  (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
832
833         if (rt) {
834                 if (!addrconf_finite_timeout(lifetime))
835                         rt6_clean_expires(rt);
836                 else
837                         rt6_set_expires(rt, jiffies + HZ * lifetime);
838
839                 ip6_rt_put(rt);
840         }
841         return 0;
842 }
843 #endif
844
845 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
846                                         struct in6_addr *saddr)
847 {
848         struct fib6_node *pn;
849         while (1) {
850                 if (fn->fn_flags & RTN_TL_ROOT)
851                         return NULL;
852                 pn = fn->parent;
853                 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn)
854                         fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr);
855                 else
856                         fn = pn;
857                 if (fn->fn_flags & RTN_RTINFO)
858                         return fn;
859         }
860 }
861
862 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
863                                              struct fib6_table *table,
864                                              struct flowi6 *fl6, int flags)
865 {
866         struct fib6_node *fn;
867         struct rt6_info *rt;
868
869         read_lock_bh(&table->tb6_lock);
870         fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
871 restart:
872         rt = fn->leaf;
873         rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
874         if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
875                 rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags);
876         if (rt == net->ipv6.ip6_null_entry) {
877                 fn = fib6_backtrack(fn, &fl6->saddr);
878                 if (fn)
879                         goto restart;
880         }
881         dst_use(&rt->dst, jiffies);
882         read_unlock_bh(&table->tb6_lock);
883         return rt;
884
885 }
886
887 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
888                                     int flags)
889 {
890         return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
891 }
892 EXPORT_SYMBOL_GPL(ip6_route_lookup);
893
894 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
895                             const struct in6_addr *saddr, int oif, int strict)
896 {
897         struct flowi6 fl6 = {
898                 .flowi6_oif = oif,
899                 .daddr = *daddr,
900         };
901         struct dst_entry *dst;
902         int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
903
904         if (saddr) {
905                 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
906                 flags |= RT6_LOOKUP_F_HAS_SADDR;
907         }
908
909         dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
910         if (dst->error == 0)
911                 return (struct rt6_info *) dst;
912
913         dst_release(dst);
914
915         return NULL;
916 }
917 EXPORT_SYMBOL(rt6_lookup);
918
919 /* ip6_ins_rt is called with FREE table->tb6_lock.
920    It takes new route entry, the addition fails by any reason the
921    route is freed. In any case, if caller does not hold it, it may
922    be destroyed.
923  */
924
925 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
926                         struct mx6_config *mxc)
927 {
928         int err;
929         struct fib6_table *table;
930
931         table = rt->rt6i_table;
932         write_lock_bh(&table->tb6_lock);
933         err = fib6_add(&table->tb6_root, rt, info, mxc);
934         write_unlock_bh(&table->tb6_lock);
935
936         return err;
937 }
938
939 int ip6_ins_rt(struct rt6_info *rt)
940 {
941         struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
942         struct mx6_config mxc = { .mx = NULL, };
943
944         return __ip6_ins_rt(rt, &info, &mxc);
945 }
946
947 static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
948                                            const struct in6_addr *daddr,
949                                            const struct in6_addr *saddr)
950 {
951         struct rt6_info *rt;
952
953         /*
954          *      Clone the route.
955          */
956
957         if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
958                 ort = (struct rt6_info *)ort->dst.from;
959
960         rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
961
962         if (!rt)
963                 return NULL;
964
965         ip6_rt_copy_init(rt, ort);
966         rt->rt6i_flags |= RTF_CACHE;
967         rt->rt6i_metric = 0;
968         rt->dst.flags |= DST_HOST;
969         rt->rt6i_dst.addr = *daddr;
970         rt->rt6i_dst.plen = 128;
971
972         if (!rt6_is_gw_or_nonexthop(ort)) {
973                 if (ort->rt6i_dst.plen != 128 &&
974                     ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
975                         rt->rt6i_flags |= RTF_ANYCAST;
976 #ifdef CONFIG_IPV6_SUBTREES
977                 if (rt->rt6i_src.plen && saddr) {
978                         rt->rt6i_src.addr = *saddr;
979                         rt->rt6i_src.plen = 128;
980                 }
981 #endif
982         }
983
984         return rt;
985 }
986
987 static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
988 {
989         struct rt6_info *pcpu_rt;
990
991         pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
992                                   rt->dst.dev, rt->dst.flags);
993
994         if (!pcpu_rt)
995                 return NULL;
996         ip6_rt_copy_init(pcpu_rt, rt);
997         pcpu_rt->rt6i_protocol = rt->rt6i_protocol;
998         pcpu_rt->rt6i_flags |= RTF_PCPU;
999         return pcpu_rt;
1000 }
1001
1002 /* It should be called with read_lock_bh(&tb6_lock) acquired */
1003 static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
1004 {
1005         struct rt6_info *pcpu_rt, **p;
1006
1007         p = this_cpu_ptr(rt->rt6i_pcpu);
1008         pcpu_rt = *p;
1009
1010         if (pcpu_rt) {
1011                 dst_hold(&pcpu_rt->dst);
1012                 rt6_dst_from_metrics_check(pcpu_rt);
1013         }
1014         return pcpu_rt;
1015 }
1016
1017 static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
1018 {
1019         struct fib6_table *table = rt->rt6i_table;
1020         struct rt6_info *pcpu_rt, *prev, **p;
1021
1022         pcpu_rt = ip6_rt_pcpu_alloc(rt);
1023         if (!pcpu_rt) {
1024                 struct net *net = dev_net(rt->dst.dev);
1025
1026                 dst_hold(&net->ipv6.ip6_null_entry->dst);
1027                 return net->ipv6.ip6_null_entry;
1028         }
1029
1030         read_lock_bh(&table->tb6_lock);
1031         if (rt->rt6i_pcpu) {
1032                 p = this_cpu_ptr(rt->rt6i_pcpu);
1033                 prev = cmpxchg(p, NULL, pcpu_rt);
1034                 if (prev) {
1035                         /* If someone did it before us, return prev instead */
1036                         dst_destroy(&pcpu_rt->dst);
1037                         pcpu_rt = prev;
1038                 }
1039         } else {
1040                 /* rt has been removed from the fib6 tree
1041                  * before we have a chance to acquire the read_lock.
1042                  * In this case, don't brother to create a pcpu rt
1043                  * since rt is going away anyway.  The next
1044                  * dst_check() will trigger a re-lookup.
1045                  */
1046                 dst_destroy(&pcpu_rt->dst);
1047                 pcpu_rt = rt;
1048         }
1049         dst_hold(&pcpu_rt->dst);
1050         rt6_dst_from_metrics_check(pcpu_rt);
1051         read_unlock_bh(&table->tb6_lock);
1052         return pcpu_rt;
1053 }
1054
1055 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
1056                                       struct flowi6 *fl6, int flags)
1057 {
1058         struct fib6_node *fn, *saved_fn;
1059         struct rt6_info *rt;
1060         int strict = 0;
1061
1062         strict |= flags & RT6_LOOKUP_F_IFACE;
1063         if (net->ipv6.devconf_all->forwarding == 0)
1064                 strict |= RT6_LOOKUP_F_REACHABLE;
1065
1066         read_lock_bh(&table->tb6_lock);
1067
1068         fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1069         saved_fn = fn;
1070
1071 redo_rt6_select:
1072         rt = rt6_select(fn, oif, strict);
1073         if (rt->rt6i_nsiblings)
1074                 rt = rt6_multipath_select(rt, fl6, oif, strict);
1075         if (rt == net->ipv6.ip6_null_entry) {
1076                 fn = fib6_backtrack(fn, &fl6->saddr);
1077                 if (fn)
1078                         goto redo_rt6_select;
1079                 else if (strict & RT6_LOOKUP_F_REACHABLE) {
1080                         /* also consider unreachable route */
1081                         strict &= ~RT6_LOOKUP_F_REACHABLE;
1082                         fn = saved_fn;
1083                         goto redo_rt6_select;
1084                 }
1085         }
1086
1087
1088         if (rt == net->ipv6.ip6_null_entry || (rt->rt6i_flags & RTF_CACHE)) {
1089                 dst_use(&rt->dst, jiffies);
1090                 read_unlock_bh(&table->tb6_lock);
1091
1092                 rt6_dst_from_metrics_check(rt);
1093                 return rt;
1094         } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1095                             !(rt->rt6i_flags & RTF_GATEWAY))) {
1096                 /* Create a RTF_CACHE clone which will not be
1097                  * owned by the fib6 tree.  It is for the special case where
1098                  * the daddr in the skb during the neighbor look-up is different
1099                  * from the fl6->daddr used to look-up route here.
1100                  */
1101
1102                 struct rt6_info *uncached_rt;
1103
1104                 dst_use(&rt->dst, jiffies);
1105                 read_unlock_bh(&table->tb6_lock);
1106
1107                 uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL);
1108                 dst_release(&rt->dst);
1109
1110                 if (uncached_rt)
1111                         rt6_uncached_list_add(uncached_rt);
1112                 else
1113                         uncached_rt = net->ipv6.ip6_null_entry;
1114
1115                 dst_hold(&uncached_rt->dst);
1116                 return uncached_rt;
1117
1118         } else {
1119                 /* Get a percpu copy */
1120
1121                 struct rt6_info *pcpu_rt;
1122
1123                 rt->dst.lastuse = jiffies;
1124                 rt->dst.__use++;
1125                 pcpu_rt = rt6_get_pcpu_route(rt);
1126
1127                 if (pcpu_rt) {
1128                         read_unlock_bh(&table->tb6_lock);
1129                 } else {
1130                         /* We have to do the read_unlock first
1131                          * because rt6_make_pcpu_route() may trigger
1132                          * ip6_dst_gc() which will take the write_lock.
1133                          */
1134                         dst_hold(&rt->dst);
1135                         read_unlock_bh(&table->tb6_lock);
1136                         pcpu_rt = rt6_make_pcpu_route(rt);
1137                         dst_release(&rt->dst);
1138                 }
1139
1140                 return pcpu_rt;
1141
1142         }
1143 }
1144
1145 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
1146                                             struct flowi6 *fl6, int flags)
1147 {
1148         return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
1149 }
1150
1151 static struct dst_entry *ip6_route_input_lookup(struct net *net,
1152                                                 struct net_device *dev,
1153                                                 struct flowi6 *fl6, int flags)
1154 {
1155         if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1156                 flags |= RT6_LOOKUP_F_IFACE;
1157
1158         return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
1159 }
1160
1161 void ip6_route_input(struct sk_buff *skb)
1162 {
1163         const struct ipv6hdr *iph = ipv6_hdr(skb);
1164         struct net *net = dev_net(skb->dev);
1165         int flags = RT6_LOOKUP_F_HAS_SADDR;
1166         struct ip_tunnel_info *tun_info;
1167         struct flowi6 fl6 = {
1168                 .flowi6_iif = skb->dev->ifindex,
1169                 .daddr = iph->daddr,
1170                 .saddr = iph->saddr,
1171                 .flowlabel = ip6_flowinfo(iph),
1172                 .flowi6_mark = skb->mark,
1173                 .flowi6_proto = iph->nexthdr,
1174         };
1175
1176         tun_info = skb_tunnel_info(skb);
1177         if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1178                 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
1179         skb_dst_drop(skb);
1180         skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
1181 }
1182
1183 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
1184                                              struct flowi6 *fl6, int flags)
1185 {
1186         return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
1187 }
1188
1189 struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
1190                                     struct flowi6 *fl6)
1191 {
1192         int flags = 0;
1193
1194         fl6->flowi6_iif = LOOPBACK_IFINDEX;
1195
1196         if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
1197                 flags |= RT6_LOOKUP_F_IFACE;
1198
1199         if (!ipv6_addr_any(&fl6->saddr))
1200                 flags |= RT6_LOOKUP_F_HAS_SADDR;
1201         else if (sk)
1202                 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1203
1204         return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1205 }
1206 EXPORT_SYMBOL(ip6_route_output);
1207
1208 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1209 {
1210         struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1211         struct dst_entry *new = NULL;
1212
1213         rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
1214         if (rt) {
1215                 new = &rt->dst;
1216
1217                 memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
1218
1219                 new->__use = 1;
1220                 new->input = dst_discard;
1221                 new->output = dst_discard_sk;
1222
1223                 if (dst_metrics_read_only(&ort->dst))
1224                         new->_metrics = ort->dst._metrics;
1225                 else
1226                         dst_copy_metrics(new, &ort->dst);
1227                 rt->rt6i_idev = ort->rt6i_idev;
1228                 if (rt->rt6i_idev)
1229                         in6_dev_hold(rt->rt6i_idev);
1230
1231                 rt->rt6i_gateway = ort->rt6i_gateway;
1232                 rt->rt6i_flags = ort->rt6i_flags;
1233                 rt->rt6i_metric = 0;
1234
1235                 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1236 #ifdef CONFIG_IPV6_SUBTREES
1237                 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1238 #endif
1239
1240                 dst_free(new);
1241         }
1242
1243         dst_release(dst_orig);
1244         return new ? new : ERR_PTR(-ENOMEM);
1245 }
1246
1247 /*
1248  *      Destination cache support functions
1249  */
1250
1251 static void rt6_dst_from_metrics_check(struct rt6_info *rt)
1252 {
1253         if (rt->dst.from &&
1254             dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from))
1255                 dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true);
1256 }
1257
1258 static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
1259 {
1260         if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
1261                 return NULL;
1262
1263         if (rt6_check_expired(rt))
1264                 return NULL;
1265
1266         return &rt->dst;
1267 }
1268
1269 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
1270 {
1271         if (rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1272             rt6_check((struct rt6_info *)(rt->dst.from), cookie))
1273                 return &rt->dst;
1274         else
1275                 return NULL;
1276 }
1277
1278 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1279 {
1280         struct rt6_info *rt;
1281
1282         rt = (struct rt6_info *) dst;
1283
1284         /* All IPV6 dsts are created with ->obsolete set to the value
1285          * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1286          * into this function always.
1287          */
1288
1289         rt6_dst_from_metrics_check(rt);
1290
1291         if ((rt->rt6i_flags & RTF_PCPU) || unlikely(dst->flags & DST_NOCACHE))
1292                 return rt6_dst_from_check(rt, cookie);
1293         else
1294                 return rt6_check(rt, cookie);
1295 }
1296
1297 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
1298 {
1299         struct rt6_info *rt = (struct rt6_info *) dst;
1300
1301         if (rt) {
1302                 if (rt->rt6i_flags & RTF_CACHE) {
1303                         if (rt6_check_expired(rt)) {
1304                                 ip6_del_rt(rt);
1305                                 dst = NULL;
1306                         }
1307                 } else {
1308                         dst_release(dst);
1309                         dst = NULL;
1310                 }
1311         }
1312         return dst;
1313 }
1314
1315 static void ip6_link_failure(struct sk_buff *skb)
1316 {
1317         struct rt6_info *rt;
1318
1319         icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
1320
1321         rt = (struct rt6_info *) skb_dst(skb);
1322         if (rt) {
1323                 if (rt->rt6i_flags & RTF_CACHE) {
1324                         dst_hold(&rt->dst);
1325                         if (ip6_del_rt(rt))
1326                                 dst_free(&rt->dst);
1327                 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
1328                         rt->rt6i_node->fn_sernum = -1;
1329                 }
1330         }
1331 }
1332
1333 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
1334 {
1335         struct net *net = dev_net(rt->dst.dev);
1336
1337         rt->rt6i_flags |= RTF_MODIFIED;
1338         rt->rt6i_pmtu = mtu;
1339         rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1340 }
1341
1342 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1343                                  const struct ipv6hdr *iph, u32 mtu)
1344 {
1345         struct rt6_info *rt6 = (struct rt6_info *)dst;
1346
1347         if (rt6->rt6i_flags & RTF_LOCAL)
1348                 return;
1349
1350         dst_confirm(dst);
1351         mtu = max_t(u32, mtu, IPV6_MIN_MTU);
1352         if (mtu >= dst_mtu(dst))
1353                 return;
1354
1355         if (rt6->rt6i_flags & RTF_CACHE) {
1356                 rt6_do_update_pmtu(rt6, mtu);
1357         } else {
1358                 const struct in6_addr *daddr, *saddr;
1359                 struct rt6_info *nrt6;
1360
1361                 if (iph) {
1362                         daddr = &iph->daddr;
1363                         saddr = &iph->saddr;
1364                 } else if (sk) {
1365                         daddr = &sk->sk_v6_daddr;
1366                         saddr = &inet6_sk(sk)->saddr;
1367                 } else {
1368                         return;
1369                 }
1370                 nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr);
1371                 if (nrt6) {
1372                         rt6_do_update_pmtu(nrt6, mtu);
1373
1374                         /* ip6_ins_rt(nrt6) will bump the
1375                          * rt6->rt6i_node->fn_sernum
1376                          * which will fail the next rt6_check() and
1377                          * invalidate the sk->sk_dst_cache.
1378                          */
1379                         ip6_ins_rt(nrt6);
1380                 }
1381         }
1382 }
1383
1384 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1385                                struct sk_buff *skb, u32 mtu)
1386 {
1387         __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
1388 }
1389
1390 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1391                      int oif, u32 mark)
1392 {
1393         const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1394         struct dst_entry *dst;
1395         struct flowi6 fl6;
1396
1397         memset(&fl6, 0, sizeof(fl6));
1398         fl6.flowi6_oif = oif;
1399         fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
1400         fl6.daddr = iph->daddr;
1401         fl6.saddr = iph->saddr;
1402         fl6.flowlabel = ip6_flowinfo(iph);
1403
1404         dst = ip6_route_output(net, NULL, &fl6);
1405         if (!dst->error)
1406                 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
1407         dst_release(dst);
1408 }
1409 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1410
1411 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1412 {
1413         ip6_update_pmtu(skb, sock_net(sk), mtu,
1414                         sk->sk_bound_dev_if, sk->sk_mark);
1415 }
1416 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1417
1418 /* Handle redirects */
1419 struct ip6rd_flowi {
1420         struct flowi6 fl6;
1421         struct in6_addr gateway;
1422 };
1423
1424 static struct rt6_info *__ip6_route_redirect(struct net *net,
1425                                              struct fib6_table *table,
1426                                              struct flowi6 *fl6,
1427                                              int flags)
1428 {
1429         struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
1430         struct rt6_info *rt;
1431         struct fib6_node *fn;
1432
1433         /* Get the "current" route for this destination and
1434          * check if the redirect has come from approriate router.
1435          *
1436          * RFC 4861 specifies that redirects should only be
1437          * accepted if they come from the nexthop to the target.
1438          * Due to the way the routes are chosen, this notion
1439          * is a bit fuzzy and one might need to check all possible
1440          * routes.
1441          */
1442
1443         read_lock_bh(&table->tb6_lock);
1444         fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1445 restart:
1446         for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1447                 if (rt6_check_expired(rt))
1448                         continue;
1449                 if (rt->dst.error)
1450                         break;
1451                 if (!(rt->rt6i_flags & RTF_GATEWAY))
1452                         continue;
1453                 if (fl6->flowi6_oif != rt->dst.dev->ifindex)
1454                         continue;
1455                 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1456                         continue;
1457                 break;
1458         }
1459
1460         if (!rt)
1461                 rt = net->ipv6.ip6_null_entry;
1462         else if (rt->dst.error) {
1463                 rt = net->ipv6.ip6_null_entry;
1464                 goto out;
1465         }
1466
1467         if (rt == net->ipv6.ip6_null_entry) {
1468                 fn = fib6_backtrack(fn, &fl6->saddr);
1469                 if (fn)
1470                         goto restart;
1471         }
1472
1473 out:
1474         dst_hold(&rt->dst);
1475
1476         read_unlock_bh(&table->tb6_lock);
1477
1478         return rt;
1479 };
1480
1481 static struct dst_entry *ip6_route_redirect(struct net *net,
1482                                         const struct flowi6 *fl6,
1483                                         const struct in6_addr *gateway)
1484 {
1485         int flags = RT6_LOOKUP_F_HAS_SADDR;
1486         struct ip6rd_flowi rdfl;
1487
1488         rdfl.fl6 = *fl6;
1489         rdfl.gateway = *gateway;
1490
1491         return fib6_rule_lookup(net, &rdfl.fl6,
1492                                 flags, __ip6_route_redirect);
1493 }
1494
1495 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
1496 {
1497         const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1498         struct dst_entry *dst;
1499         struct flowi6 fl6;
1500
1501         memset(&fl6, 0, sizeof(fl6));
1502         fl6.flowi6_iif = LOOPBACK_IFINDEX;
1503         fl6.flowi6_oif = oif;
1504         fl6.flowi6_mark = mark;
1505         fl6.daddr = iph->daddr;
1506         fl6.saddr = iph->saddr;
1507         fl6.flowlabel = ip6_flowinfo(iph);
1508
1509         dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
1510         rt6_do_redirect(dst, NULL, skb);
1511         dst_release(dst);
1512 }
1513 EXPORT_SYMBOL_GPL(ip6_redirect);
1514
1515 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
1516                             u32 mark)
1517 {
1518         const struct ipv6hdr *iph = ipv6_hdr(skb);
1519         const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
1520         struct dst_entry *dst;
1521         struct flowi6 fl6;
1522
1523         memset(&fl6, 0, sizeof(fl6));
1524         fl6.flowi6_iif = LOOPBACK_IFINDEX;
1525         fl6.flowi6_oif = oif;
1526         fl6.flowi6_mark = mark;
1527         fl6.daddr = msg->dest;
1528         fl6.saddr = iph->daddr;
1529
1530         dst = ip6_route_redirect(net, &fl6, &iph->saddr);
1531         rt6_do_redirect(dst, NULL, skb);
1532         dst_release(dst);
1533 }
1534
1535 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1536 {
1537         ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
1538 }
1539 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
1540
1541 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1542 {
1543         struct net_device *dev = dst->dev;
1544         unsigned int mtu = dst_mtu(dst);
1545         struct net *net = dev_net(dev);
1546
1547         mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1548
1549         if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1550                 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1551
1552         /*
1553          * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1554          * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1555          * IPV6_MAXPLEN is also valid and means: "any MSS,
1556          * rely only on pmtu discovery"
1557          */
1558         if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1559                 mtu = IPV6_MAXPLEN;
1560         return mtu;
1561 }
1562
1563 static unsigned int ip6_mtu(const struct dst_entry *dst)
1564 {
1565         const struct rt6_info *rt = (const struct rt6_info *)dst;
1566         unsigned int mtu = rt->rt6i_pmtu;
1567         struct inet6_dev *idev;
1568
1569         if (mtu)
1570                 goto out;
1571
1572         mtu = dst_metric_raw(dst, RTAX_MTU);
1573         if (mtu)
1574                 goto out;
1575
1576         mtu = IPV6_MIN_MTU;
1577
1578         rcu_read_lock();
1579         idev = __in6_dev_get(dst->dev);
1580         if (idev)
1581                 mtu = idev->cnf.mtu6;
1582         rcu_read_unlock();
1583
1584 out:
1585         return min_t(unsigned int, mtu, IP6_MAX_MTU);
1586 }
1587
1588 static struct dst_entry *icmp6_dst_gc_list;
1589 static DEFINE_SPINLOCK(icmp6_dst_lock);
1590
1591 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1592                                   struct flowi6 *fl6)
1593 {
1594         struct dst_entry *dst;
1595         struct rt6_info *rt;
1596         struct inet6_dev *idev = in6_dev_get(dev);
1597         struct net *net = dev_net(dev);
1598
1599         if (unlikely(!idev))
1600                 return ERR_PTR(-ENODEV);
1601
1602         rt = ip6_dst_alloc(net, dev, 0);
1603         if (unlikely(!rt)) {
1604                 in6_dev_put(idev);
1605                 dst = ERR_PTR(-ENOMEM);
1606                 goto out;
1607         }
1608
1609         rt->dst.flags |= DST_HOST;
1610         rt->dst.output  = ip6_output;
1611         atomic_set(&rt->dst.__refcnt, 1);
1612         rt->rt6i_gateway  = fl6->daddr;
1613         rt->rt6i_dst.addr = fl6->daddr;
1614         rt->rt6i_dst.plen = 128;
1615         rt->rt6i_idev     = idev;
1616         dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
1617
1618         spin_lock_bh(&icmp6_dst_lock);
1619         rt->dst.next = icmp6_dst_gc_list;
1620         icmp6_dst_gc_list = &rt->dst;
1621         spin_unlock_bh(&icmp6_dst_lock);
1622
1623         fib6_force_start_gc(net);
1624
1625         dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
1626
1627 out:
1628         return dst;
1629 }
1630
1631 int icmp6_dst_gc(void)
1632 {
1633         struct dst_entry *dst, **pprev;
1634         int more = 0;
1635
1636         spin_lock_bh(&icmp6_dst_lock);
1637         pprev = &icmp6_dst_gc_list;
1638
1639         while ((dst = *pprev) != NULL) {
1640                 if (!atomic_read(&dst->__refcnt)) {
1641                         *pprev = dst->next;
1642                         dst_free(dst);
1643                 } else {
1644                         pprev = &dst->next;
1645                         ++more;
1646                 }
1647         }
1648
1649         spin_unlock_bh(&icmp6_dst_lock);
1650
1651         return more;
1652 }
1653
1654 static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1655                             void *arg)
1656 {
1657         struct dst_entry *dst, **pprev;
1658
1659         spin_lock_bh(&icmp6_dst_lock);
1660         pprev = &icmp6_dst_gc_list;
1661         while ((dst = *pprev) != NULL) {
1662                 struct rt6_info *rt = (struct rt6_info *) dst;
1663                 if (func(rt, arg)) {
1664                         *pprev = dst->next;
1665                         dst_free(dst);
1666                 } else {
1667                         pprev = &dst->next;
1668                 }
1669         }
1670         spin_unlock_bh(&icmp6_dst_lock);
1671 }
1672
1673 static int ip6_dst_gc(struct dst_ops *ops)
1674 {
1675         struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1676         int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1677         int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1678         int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1679         int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1680         unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1681         int entries;
1682
1683         entries = dst_entries_get_fast(ops);
1684         if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
1685             entries <= rt_max_size)
1686                 goto out;
1687
1688         net->ipv6.ip6_rt_gc_expire++;
1689         fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
1690         entries = dst_entries_get_slow(ops);
1691         if (entries < ops->gc_thresh)
1692                 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1693 out:
1694         net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1695         return entries > rt_max_size;
1696 }
1697
1698 static int ip6_convert_metrics(struct mx6_config *mxc,
1699                                const struct fib6_config *cfg)
1700 {
1701         bool ecn_ca = false;
1702         struct nlattr *nla;
1703         int remaining;
1704         u32 *mp;
1705
1706         if (!cfg->fc_mx)
1707                 return 0;
1708
1709         mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1710         if (unlikely(!mp))
1711                 return -ENOMEM;
1712
1713         nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1714                 int type = nla_type(nla);
1715                 u32 val;
1716
1717                 if (!type)
1718                         continue;
1719                 if (unlikely(type > RTAX_MAX))
1720                         goto err;
1721
1722                 if (type == RTAX_CC_ALGO) {
1723                         char tmp[TCP_CA_NAME_MAX];
1724
1725                         nla_strlcpy(tmp, nla, sizeof(tmp));
1726                         val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
1727                         if (val == TCP_CA_UNSPEC)
1728                                 goto err;
1729                 } else {
1730                         val = nla_get_u32(nla);
1731                 }
1732                 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
1733                         goto err;
1734
1735                 mp[type - 1] = val;
1736                 __set_bit(type - 1, mxc->mx_valid);
1737         }
1738
1739         if (ecn_ca) {
1740                 __set_bit(RTAX_FEATURES - 1, mxc->mx_valid);
1741                 mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
1742         }
1743
1744         mxc->mx = mp;
1745         return 0;
1746  err:
1747         kfree(mp);
1748         return -EINVAL;
1749 }
1750
1751 int ip6_route_add(struct fib6_config *cfg)
1752 {
1753         int err;
1754         struct net *net = cfg->fc_nlinfo.nl_net;
1755         struct rt6_info *rt = NULL;
1756         struct net_device *dev = NULL;
1757         struct inet6_dev *idev = NULL;
1758         struct fib6_table *table;
1759         struct mx6_config mxc = { .mx = NULL, };
1760         int addr_type;
1761
1762         if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1763                 return -EINVAL;
1764 #ifndef CONFIG_IPV6_SUBTREES
1765         if (cfg->fc_src_len)
1766                 return -EINVAL;
1767 #endif
1768         if (cfg->fc_ifindex) {
1769                 err = -ENODEV;
1770                 dev = dev_get_by_index(net, cfg->fc_ifindex);
1771                 if (!dev)
1772                         goto out;
1773                 idev = in6_dev_get(dev);
1774                 if (!idev)
1775                         goto out;
1776         }
1777
1778         if (cfg->fc_metric == 0)
1779                 cfg->fc_metric = IP6_RT_PRIO_USER;
1780
1781         err = -ENOBUFS;
1782         if (cfg->fc_nlinfo.nlh &&
1783             !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
1784                 table = fib6_get_table(net, cfg->fc_table);
1785                 if (!table) {
1786                         pr_warn("NLM_F_CREATE should be specified when creating new route\n");
1787                         table = fib6_new_table(net, cfg->fc_table);
1788                 }
1789         } else {
1790                 table = fib6_new_table(net, cfg->fc_table);
1791         }
1792
1793         if (!table)
1794                 goto out;
1795
1796         rt = ip6_dst_alloc(net, NULL,
1797                            (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
1798
1799         if (!rt) {
1800                 err = -ENOMEM;
1801                 goto out;
1802         }
1803
1804         if (cfg->fc_flags & RTF_EXPIRES)
1805                 rt6_set_expires(rt, jiffies +
1806                                 clock_t_to_jiffies(cfg->fc_expires));
1807         else
1808                 rt6_clean_expires(rt);
1809
1810         if (cfg->fc_protocol == RTPROT_UNSPEC)
1811                 cfg->fc_protocol = RTPROT_BOOT;
1812         rt->rt6i_protocol = cfg->fc_protocol;
1813
1814         addr_type = ipv6_addr_type(&cfg->fc_dst);
1815
1816         if (addr_type & IPV6_ADDR_MULTICAST)
1817                 rt->dst.input = ip6_mc_input;
1818         else if (cfg->fc_flags & RTF_LOCAL)
1819                 rt->dst.input = ip6_input;
1820         else
1821                 rt->dst.input = ip6_forward;
1822
1823         rt->dst.output = ip6_output;
1824
1825         if (cfg->fc_encap) {
1826                 struct lwtunnel_state *lwtstate;
1827
1828                 err = lwtunnel_build_state(dev, cfg->fc_encap_type,
1829                                            cfg->fc_encap, AF_INET6, cfg,
1830                                            &lwtstate);
1831                 if (err)
1832                         goto out;
1833                 rt->dst.lwtstate = lwtstate_get(lwtstate);
1834                 if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
1835                         rt->dst.lwtstate->orig_output = rt->dst.output;
1836                         rt->dst.output = lwtunnel_output;
1837                 }
1838                 if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
1839                         rt->dst.lwtstate->orig_input = rt->dst.input;
1840                         rt->dst.input = lwtunnel_input;
1841                 }
1842         }
1843
1844         ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1845         rt->rt6i_dst.plen = cfg->fc_dst_len;
1846         if (rt->rt6i_dst.plen == 128)
1847                 rt->dst.flags |= DST_HOST;
1848
1849 #ifdef CONFIG_IPV6_SUBTREES
1850         ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1851         rt->rt6i_src.plen = cfg->fc_src_len;
1852 #endif
1853
1854         rt->rt6i_metric = cfg->fc_metric;
1855
1856         /* We cannot add true routes via loopback here,
1857            they would result in kernel looping; promote them to reject routes
1858          */
1859         if ((cfg->fc_flags & RTF_REJECT) ||
1860             (dev && (dev->flags & IFF_LOOPBACK) &&
1861              !(addr_type & IPV6_ADDR_LOOPBACK) &&
1862              !(cfg->fc_flags & RTF_LOCAL))) {
1863                 /* hold loopback dev/idev if we haven't done so. */
1864                 if (dev != net->loopback_dev) {
1865                         if (dev) {
1866                                 dev_put(dev);
1867                                 in6_dev_put(idev);
1868                         }
1869                         dev = net->loopback_dev;
1870                         dev_hold(dev);
1871                         idev = in6_dev_get(dev);
1872                         if (!idev) {
1873                                 err = -ENODEV;
1874                                 goto out;
1875                         }
1876                 }
1877                 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1878                 switch (cfg->fc_type) {
1879                 case RTN_BLACKHOLE:
1880                         rt->dst.error = -EINVAL;
1881                         rt->dst.output = dst_discard_sk;
1882                         rt->dst.input = dst_discard;
1883                         break;
1884                 case RTN_PROHIBIT:
1885                         rt->dst.error = -EACCES;
1886                         rt->dst.output = ip6_pkt_prohibit_out;
1887                         rt->dst.input = ip6_pkt_prohibit;
1888                         break;
1889                 case RTN_THROW:
1890                 default:
1891                         rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
1892                                         : -ENETUNREACH;
1893                         rt->dst.output = ip6_pkt_discard_out;
1894                         rt->dst.input = ip6_pkt_discard;
1895                         break;
1896                 }
1897                 goto install_route;
1898         }
1899
1900         if (cfg->fc_flags & RTF_GATEWAY) {
1901                 const struct in6_addr *gw_addr;
1902                 int gwa_type;
1903
1904                 gw_addr = &cfg->fc_gateway;
1905                 gwa_type = ipv6_addr_type(gw_addr);
1906
1907                 /* if gw_addr is local we will fail to detect this in case
1908                  * address is still TENTATIVE (DAD in progress). rt6_lookup()
1909                  * will return already-added prefix route via interface that
1910                  * prefix route was assigned to, which might be non-loopback.
1911                  */
1912                 err = -EINVAL;
1913                 if (ipv6_chk_addr_and_flags(net, gw_addr,
1914                                             gwa_type & IPV6_ADDR_LINKLOCAL ?
1915                                             dev : NULL, 0, 0))
1916                         goto out;
1917
1918                 rt->rt6i_gateway = *gw_addr;
1919
1920                 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1921                         struct rt6_info *grt;
1922
1923                         /* IPv6 strictly inhibits using not link-local
1924                            addresses as nexthop address.
1925                            Otherwise, router will not able to send redirects.
1926                            It is very good, but in some (rare!) circumstances
1927                            (SIT, PtP, NBMA NOARP links) it is handy to allow
1928                            some exceptions. --ANK
1929                          */
1930                         if (!(gwa_type & IPV6_ADDR_UNICAST))
1931                                 goto out;
1932
1933                         grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1934
1935                         err = -EHOSTUNREACH;
1936                         if (!grt)
1937                                 goto out;
1938                         if (dev) {
1939                                 if (dev != grt->dst.dev) {
1940                                         ip6_rt_put(grt);
1941                                         goto out;
1942                                 }
1943                         } else {
1944                                 dev = grt->dst.dev;
1945                                 idev = grt->rt6i_idev;
1946                                 dev_hold(dev);
1947                                 in6_dev_hold(grt->rt6i_idev);
1948                         }
1949                         if (!(grt->rt6i_flags & RTF_GATEWAY))
1950                                 err = 0;
1951                         ip6_rt_put(grt);
1952
1953                         if (err)
1954                                 goto out;
1955                 }
1956                 err = -EINVAL;
1957                 if (!dev || (dev->flags & IFF_LOOPBACK))
1958                         goto out;
1959         }
1960
1961         err = -ENODEV;
1962         if (!dev)
1963                 goto out;
1964
1965         if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
1966                 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
1967                         err = -EINVAL;
1968                         goto out;
1969                 }
1970                 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
1971                 rt->rt6i_prefsrc.plen = 128;
1972         } else
1973                 rt->rt6i_prefsrc.plen = 0;
1974
1975         rt->rt6i_flags = cfg->fc_flags;
1976
1977 install_route:
1978         rt->dst.dev = dev;
1979         rt->rt6i_idev = idev;
1980         rt->rt6i_table = table;
1981
1982         cfg->fc_nlinfo.nl_net = dev_net(dev);
1983
1984         err = ip6_convert_metrics(&mxc, cfg);
1985         if (err)
1986                 goto out;
1987
1988         err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc);
1989
1990         kfree(mxc.mx);
1991         return err;
1992 out:
1993         if (dev)
1994                 dev_put(dev);
1995         if (idev)
1996                 in6_dev_put(idev);
1997         if (rt)
1998                 dst_free(&rt->dst);
1999         return err;
2000 }
2001
2002 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
2003 {
2004         int err;
2005         struct fib6_table *table;
2006         struct net *net = dev_net(rt->dst.dev);
2007
2008         if (rt == net->ipv6.ip6_null_entry) {
2009                 err = -ENOENT;
2010                 goto out;
2011         }
2012
2013         table = rt->rt6i_table;
2014         write_lock_bh(&table->tb6_lock);
2015         err = fib6_del(rt, info);
2016         write_unlock_bh(&table->tb6_lock);
2017
2018 out:
2019         ip6_rt_put(rt);
2020         return err;
2021 }
2022
2023 int ip6_del_rt(struct rt6_info *rt)
2024 {
2025         struct nl_info info = {
2026                 .nl_net = dev_net(rt->dst.dev),
2027         };
2028         return __ip6_del_rt(rt, &info);
2029 }
2030
2031 static int ip6_route_del(struct fib6_config *cfg)
2032 {
2033         struct fib6_table *table;
2034         struct fib6_node *fn;
2035         struct rt6_info *rt;
2036         int err = -ESRCH;
2037
2038         table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
2039         if (!table)
2040                 return err;
2041
2042         read_lock_bh(&table->tb6_lock);
2043
2044         fn = fib6_locate(&table->tb6_root,
2045                          &cfg->fc_dst, cfg->fc_dst_len,
2046                          &cfg->fc_src, cfg->fc_src_len);
2047
2048         if (fn) {
2049                 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2050                         if ((rt->rt6i_flags & RTF_CACHE) &&
2051                             !(cfg->fc_flags & RTF_CACHE))
2052                                 continue;
2053                         if (cfg->fc_ifindex &&
2054                             (!rt->dst.dev ||
2055                              rt->dst.dev->ifindex != cfg->fc_ifindex))
2056                                 continue;
2057                         if (cfg->fc_flags & RTF_GATEWAY &&
2058                             !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
2059                                 continue;
2060                         if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
2061                                 continue;
2062                         dst_hold(&rt->dst);
2063                         read_unlock_bh(&table->tb6_lock);
2064
2065                         return __ip6_del_rt(rt, &cfg->fc_nlinfo);
2066                 }
2067         }
2068         read_unlock_bh(&table->tb6_lock);
2069
2070         return err;
2071 }
2072
2073 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
2074 {
2075         struct net *net = dev_net(skb->dev);
2076         struct netevent_redirect netevent;
2077         struct rt6_info *rt, *nrt = NULL;
2078         struct ndisc_options ndopts;
2079         struct inet6_dev *in6_dev;
2080         struct neighbour *neigh;
2081         struct rd_msg *msg;
2082         int optlen, on_link;
2083         u8 *lladdr;
2084
2085         optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
2086         optlen -= sizeof(*msg);
2087
2088         if (optlen < 0) {
2089                 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
2090                 return;
2091         }
2092
2093         msg = (struct rd_msg *)icmp6_hdr(skb);
2094
2095         if (ipv6_addr_is_multicast(&msg->dest)) {
2096                 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
2097                 return;
2098         }
2099
2100         on_link = 0;
2101         if (ipv6_addr_equal(&msg->dest, &msg->target)) {
2102                 on_link = 1;
2103         } else if (ipv6_addr_type(&msg->target) !=
2104                    (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
2105                 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
2106                 return;
2107         }
2108
2109         in6_dev = __in6_dev_get(skb->dev);
2110         if (!in6_dev)
2111                 return;
2112         if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
2113                 return;
2114
2115         /* RFC2461 8.1:
2116          *      The IP source address of the Redirect MUST be the same as the current
2117          *      first-hop router for the specified ICMP Destination Address.
2118          */
2119
2120         if (!ndisc_parse_options(msg->opt, optlen, &ndopts)) {
2121                 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
2122                 return;
2123         }
2124
2125         lladdr = NULL;
2126         if (ndopts.nd_opts_tgt_lladdr) {
2127                 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
2128                                              skb->dev);
2129                 if (!lladdr) {
2130                         net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
2131                         return;
2132                 }
2133         }
2134
2135         rt = (struct rt6_info *) dst;
2136         if (rt == net->ipv6.ip6_null_entry) {
2137                 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
2138                 return;
2139         }
2140
2141         /* Redirect received -> path was valid.
2142          * Look, redirects are sent only in response to data packets,
2143          * so that this nexthop apparently is reachable. --ANK
2144          */
2145         dst_confirm(&rt->dst);
2146
2147         neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
2148         if (!neigh)
2149                 return;
2150
2151         /*
2152          *      We have finally decided to accept it.
2153          */
2154
2155         neigh_update(neigh, lladdr, NUD_STALE,
2156                      NEIGH_UPDATE_F_WEAK_OVERRIDE|
2157                      NEIGH_UPDATE_F_OVERRIDE|
2158                      (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
2159                                      NEIGH_UPDATE_F_ISROUTER))
2160                      );
2161
2162         nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL);
2163         if (!nrt)
2164                 goto out;
2165
2166         nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
2167         if (on_link)
2168                 nrt->rt6i_flags &= ~RTF_GATEWAY;
2169
2170         nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
2171
2172         if (ip6_ins_rt(nrt))
2173                 goto out;
2174
2175         netevent.old = &rt->dst;
2176         netevent.new = &nrt->dst;
2177         netevent.daddr = &msg->dest;
2178         netevent.neigh = neigh;
2179         call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
2180
2181         if (rt->rt6i_flags & RTF_CACHE) {
2182                 rt = (struct rt6_info *) dst_clone(&rt->dst);
2183                 ip6_del_rt(rt);
2184         }
2185
2186 out:
2187         neigh_release(neigh);
2188 }
2189
2190 /*
2191  *      Misc support functions
2192  */
2193
2194 static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
2195 {
2196         BUG_ON(from->dst.from);
2197
2198         rt->rt6i_flags &= ~RTF_EXPIRES;
2199         dst_hold(&from->dst);
2200         rt->dst.from = &from->dst;
2201         dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true);
2202 }
2203
2204 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
2205 {
2206         rt->dst.input = ort->dst.input;
2207         rt->dst.output = ort->dst.output;
2208         rt->rt6i_dst = ort->rt6i_dst;
2209         rt->dst.error = ort->dst.error;
2210         rt->rt6i_idev = ort->rt6i_idev;
2211         if (rt->rt6i_idev)
2212                 in6_dev_hold(rt->rt6i_idev);
2213         rt->dst.lastuse = jiffies;
2214         rt->rt6i_gateway = ort->rt6i_gateway;
2215         rt->rt6i_flags = ort->rt6i_flags;
2216         rt6_set_from(rt, ort);
2217         rt->rt6i_metric = ort->rt6i_metric;
2218 #ifdef CONFIG_IPV6_SUBTREES
2219         rt->rt6i_src = ort->rt6i_src;
2220 #endif
2221         rt->rt6i_prefsrc = ort->rt6i_prefsrc;
2222         rt->rt6i_table = ort->rt6i_table;
2223         rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
2224 }
2225
2226 #ifdef CONFIG_IPV6_ROUTE_INFO
2227 static struct rt6_info *rt6_get_route_info(struct net *net,
2228                                            const struct in6_addr *prefix, int prefixlen,
2229                                            const struct in6_addr *gwaddr, int ifindex)
2230 {
2231         struct fib6_node *fn;
2232         struct rt6_info *rt = NULL;
2233         struct fib6_table *table;
2234
2235         table = fib6_get_table(net, RT6_TABLE_INFO);
2236         if (!table)
2237                 return NULL;
2238
2239         read_lock_bh(&table->tb6_lock);
2240         fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0);
2241         if (!fn)
2242                 goto out;
2243
2244         for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2245                 if (rt->dst.dev->ifindex != ifindex)
2246                         continue;
2247                 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
2248                         continue;
2249                 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
2250                         continue;
2251                 dst_hold(&rt->dst);
2252                 break;
2253         }
2254 out:
2255         read_unlock_bh(&table->tb6_lock);
2256         return rt;
2257 }
2258
2259 static struct rt6_info *rt6_add_route_info(struct net *net,
2260                                            const struct in6_addr *prefix, int prefixlen,
2261                                            const struct in6_addr *gwaddr, int ifindex,
2262                                            unsigned int pref)
2263 {
2264         struct fib6_config cfg = {
2265                 .fc_table       = RT6_TABLE_INFO,
2266                 .fc_metric      = IP6_RT_PRIO_USER,
2267                 .fc_ifindex     = ifindex,
2268                 .fc_dst_len     = prefixlen,
2269                 .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
2270                                   RTF_UP | RTF_PREF(pref),
2271                 .fc_nlinfo.portid = 0,
2272                 .fc_nlinfo.nlh = NULL,
2273                 .fc_nlinfo.nl_net = net,
2274         };
2275
2276         cfg.fc_dst = *prefix;
2277         cfg.fc_gateway = *gwaddr;
2278
2279         /* We should treat it as a default route if prefix length is 0. */
2280         if (!prefixlen)
2281                 cfg.fc_flags |= RTF_DEFAULT;
2282
2283         ip6_route_add(&cfg);
2284
2285         return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
2286 }
2287 #endif
2288
2289 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
2290 {
2291         struct rt6_info *rt;
2292         struct fib6_table *table;
2293
2294         table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
2295         if (!table)
2296                 return NULL;
2297
2298         read_lock_bh(&table->tb6_lock);
2299         for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2300                 if (dev == rt->dst.dev &&
2301                     ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
2302                     ipv6_addr_equal(&rt->rt6i_gateway, addr))
2303                         break;
2304         }
2305         if (rt)
2306                 dst_hold(&rt->dst);
2307         read_unlock_bh(&table->tb6_lock);
2308         return rt;
2309 }
2310
2311 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
2312                                      struct net_device *dev,
2313                                      unsigned int pref)
2314 {
2315         struct fib6_config cfg = {
2316                 .fc_table       = RT6_TABLE_DFLT,
2317                 .fc_metric      = IP6_RT_PRIO_USER,
2318                 .fc_ifindex     = dev->ifindex,
2319                 .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
2320                                   RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
2321                 .fc_nlinfo.portid = 0,
2322                 .fc_nlinfo.nlh = NULL,
2323                 .fc_nlinfo.nl_net = dev_net(dev),
2324         };
2325
2326         cfg.fc_gateway = *gwaddr;
2327
2328         ip6_route_add(&cfg);
2329
2330         return rt6_get_dflt_router(gwaddr, dev);
2331 }
2332
2333 void rt6_purge_dflt_routers(struct net *net)
2334 {
2335         struct rt6_info *rt;
2336         struct fib6_table *table;
2337
2338         /* NOTE: Keep consistent with rt6_get_dflt_router */
2339         table = fib6_get_table(net, RT6_TABLE_DFLT);
2340         if (!table)
2341                 return;
2342
2343 restart:
2344         read_lock_bh(&table->tb6_lock);
2345         for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2346                 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
2347                     (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
2348                         dst_hold(&rt->dst);
2349                         read_unlock_bh(&table->tb6_lock);
2350                         ip6_del_rt(rt);
2351                         goto restart;
2352                 }
2353         }
2354         read_unlock_bh(&table->tb6_lock);
2355 }
2356
2357 static void rtmsg_to_fib6_config(struct net *net,
2358                                  struct in6_rtmsg *rtmsg,
2359                                  struct fib6_config *cfg)
2360 {
2361         memset(cfg, 0, sizeof(*cfg));
2362
2363         cfg->fc_table = RT6_TABLE_MAIN;
2364         cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
2365         cfg->fc_metric = rtmsg->rtmsg_metric;
2366         cfg->fc_expires = rtmsg->rtmsg_info;
2367         cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
2368         cfg->fc_src_len = rtmsg->rtmsg_src_len;
2369         cfg->fc_flags = rtmsg->rtmsg_flags;
2370
2371         cfg->fc_nlinfo.nl_net = net;
2372
2373         cfg->fc_dst = rtmsg->rtmsg_dst;
2374         cfg->fc_src = rtmsg->rtmsg_src;
2375         cfg->fc_gateway = rtmsg->rtmsg_gateway;
2376 }
2377
2378 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
2379 {
2380         struct fib6_config cfg;
2381         struct in6_rtmsg rtmsg;
2382         int err;
2383
2384         switch (cmd) {
2385         case SIOCADDRT:         /* Add a route */
2386         case SIOCDELRT:         /* Delete a route */
2387                 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2388                         return -EPERM;
2389                 err = copy_from_user(&rtmsg, arg,
2390                                      sizeof(struct in6_rtmsg));
2391                 if (err)
2392                         return -EFAULT;
2393
2394                 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
2395
2396                 rtnl_lock();
2397                 switch (cmd) {
2398                 case SIOCADDRT:
2399                         err = ip6_route_add(&cfg);
2400                         break;
2401                 case SIOCDELRT:
2402                         err = ip6_route_del(&cfg);
2403                         break;
2404                 default:
2405                         err = -EINVAL;
2406                 }
2407                 rtnl_unlock();
2408
2409                 return err;
2410         }
2411
2412         return -EINVAL;
2413 }
2414
2415 /*
2416  *      Drop the packet on the floor
2417  */
2418
2419 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
2420 {
2421         int type;
2422         struct dst_entry *dst = skb_dst(skb);
2423         switch (ipstats_mib_noroutes) {
2424         case IPSTATS_MIB_INNOROUTES:
2425                 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
2426                 if (type == IPV6_ADDR_ANY) {
2427                         IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2428                                       IPSTATS_MIB_INADDRERRORS);
2429                         break;
2430                 }
2431                 /* FALLTHROUGH */
2432         case IPSTATS_MIB_OUTNOROUTES:
2433                 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2434                               ipstats_mib_noroutes);
2435                 break;
2436         }
2437         icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
2438         kfree_skb(skb);
2439         return 0;
2440 }
2441
2442 static int ip6_pkt_discard(struct sk_buff *skb)
2443 {
2444         return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2445 }
2446
2447 static int ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb)
2448 {
2449         skb->dev = skb_dst(skb)->dev;
2450         return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2451 }
2452
2453 static int ip6_pkt_prohibit(struct sk_buff *skb)
2454 {
2455         return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2456 }
2457
2458 static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb)
2459 {
2460         skb->dev = skb_dst(skb)->dev;
2461         return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2462 }
2463
2464 /*
2465  *      Allocate a dst for local (unicast / anycast) address.
2466  */
2467
2468 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2469                                     const struct in6_addr *addr,
2470                                     bool anycast)
2471 {
2472         struct net *net = dev_net(idev->dev);
2473         struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
2474                                             DST_NOCOUNT);
2475         if (!rt)
2476                 return ERR_PTR(-ENOMEM);
2477
2478         in6_dev_hold(idev);
2479
2480         rt->dst.flags |= DST_HOST;
2481         rt->dst.input = ip6_input;
2482         rt->dst.output = ip6_output;
2483         rt->rt6i_idev = idev;
2484
2485         rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2486         if (anycast)
2487                 rt->rt6i_flags |= RTF_ANYCAST;
2488         else
2489                 rt->rt6i_flags |= RTF_LOCAL;
2490
2491         rt->rt6i_gateway  = *addr;
2492         rt->rt6i_dst.addr = *addr;
2493         rt->rt6i_dst.plen = 128;
2494         rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
2495
2496         atomic_set(&rt->dst.__refcnt, 1);
2497
2498         return rt;
2499 }
2500
2501 int ip6_route_get_saddr(struct net *net,
2502                         struct rt6_info *rt,
2503                         const struct in6_addr *daddr,
2504                         unsigned int prefs,
2505                         struct in6_addr *saddr)
2506 {
2507         struct inet6_dev *idev =
2508                 rt ? ip6_dst_idev((struct dst_entry *)rt) : NULL;
2509         int err = 0;
2510         if (rt && rt->rt6i_prefsrc.plen)
2511                 *saddr = rt->rt6i_prefsrc.addr;
2512         else
2513                 err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2514                                          daddr, prefs, saddr);
2515         return err;
2516 }
2517
2518 /* remove deleted ip from prefsrc entries */
2519 struct arg_dev_net_ip {
2520         struct net_device *dev;
2521         struct net *net;
2522         struct in6_addr *addr;
2523 };
2524
2525 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2526 {
2527         struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2528         struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2529         struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2530
2531         if (((void *)rt->dst.dev == dev || !dev) &&
2532             rt != net->ipv6.ip6_null_entry &&
2533             ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2534                 /* remove prefsrc entry */
2535                 rt->rt6i_prefsrc.plen = 0;
2536         }
2537         return 0;
2538 }
2539
2540 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2541 {
2542         struct net *net = dev_net(ifp->idev->dev);
2543         struct arg_dev_net_ip adni = {
2544                 .dev = ifp->idev->dev,
2545                 .net = net,
2546                 .addr = &ifp->addr,
2547         };
2548         fib6_clean_all(net, fib6_remove_prefsrc, &adni);
2549 }
2550
2551 #define RTF_RA_ROUTER           (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
2552 #define RTF_CACHE_GATEWAY       (RTF_GATEWAY | RTF_CACHE)
2553
2554 /* Remove routers and update dst entries when gateway turn into host. */
2555 static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
2556 {
2557         struct in6_addr *gateway = (struct in6_addr *)arg;
2558
2559         if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
2560              ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
2561              ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
2562                 return -1;
2563         }
2564         return 0;
2565 }
2566
2567 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
2568 {
2569         fib6_clean_all(net, fib6_clean_tohost, gateway);
2570 }
2571
2572 struct arg_dev_net {
2573         struct net_device *dev;
2574         struct net *net;
2575 };
2576
2577 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2578 {
2579         const struct arg_dev_net *adn = arg;
2580         const struct net_device *dev = adn->dev;
2581
2582         if ((rt->dst.dev == dev || !dev) &&
2583             rt != adn->net->ipv6.ip6_null_entry)
2584                 return -1;
2585
2586         return 0;
2587 }
2588
2589 void rt6_ifdown(struct net *net, struct net_device *dev)
2590 {
2591         struct arg_dev_net adn = {
2592                 .dev = dev,
2593                 .net = net,
2594         };
2595
2596         fib6_clean_all(net, fib6_ifdown, &adn);
2597         icmp6_clean_all(fib6_ifdown, &adn);
2598         rt6_uncached_list_flush_dev(net, dev);
2599 }
2600
2601 struct rt6_mtu_change_arg {
2602         struct net_device *dev;
2603         unsigned int mtu;
2604 };
2605
2606 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2607 {
2608         struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2609         struct inet6_dev *idev;
2610
2611         /* In IPv6 pmtu discovery is not optional,
2612            so that RTAX_MTU lock cannot disable it.
2613            We still use this lock to block changes
2614            caused by addrconf/ndisc.
2615         */
2616
2617         idev = __in6_dev_get(arg->dev);
2618         if (!idev)
2619                 return 0;
2620
2621         /* For administrative MTU increase, there is no way to discover
2622            IPv6 PMTU increase, so PMTU increase should be updated here.
2623            Since RFC 1981 doesn't include administrative MTU increase
2624            update PMTU increase is a MUST. (i.e. jumbo frame)
2625          */
2626         /*
2627            If new MTU is less than route PMTU, this new MTU will be the
2628            lowest MTU in the path, update the route PMTU to reflect PMTU
2629            decreases; if new MTU is greater than route PMTU, and the
2630            old MTU is the lowest MTU in the path, update the route PMTU
2631            to reflect the increase. In this case if the other nodes' MTU
2632            also have the lowest MTU, TOO BIG MESSAGE will be lead to
2633            PMTU discouvery.
2634          */
2635         if (rt->dst.dev == arg->dev &&
2636             !dst_metric_locked(&rt->dst, RTAX_MTU)) {
2637                 if (rt->rt6i_flags & RTF_CACHE) {
2638                         /* For RTF_CACHE with rt6i_pmtu == 0
2639                          * (i.e. a redirected route),
2640                          * the metrics of its rt->dst.from has already
2641                          * been updated.
2642                          */
2643                         if (rt->rt6i_pmtu && rt->rt6i_pmtu > arg->mtu)
2644                                 rt->rt6i_pmtu = arg->mtu;
2645                 } else if (dst_mtu(&rt->dst) >= arg->mtu ||
2646                            (dst_mtu(&rt->dst) < arg->mtu &&
2647                             dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
2648                         dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2649                 }
2650         }
2651         return 0;
2652 }
2653
2654 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2655 {
2656         struct rt6_mtu_change_arg arg = {
2657                 .dev = dev,
2658                 .mtu = mtu,
2659         };
2660
2661         fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
2662 }
2663
2664 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2665         [RTA_GATEWAY]           = { .len = sizeof(struct in6_addr) },
2666         [RTA_OIF]               = { .type = NLA_U32 },
2667         [RTA_IIF]               = { .type = NLA_U32 },
2668         [RTA_PRIORITY]          = { .type = NLA_U32 },
2669         [RTA_METRICS]           = { .type = NLA_NESTED },
2670         [RTA_MULTIPATH]         = { .len = sizeof(struct rtnexthop) },
2671         [RTA_PREF]              = { .type = NLA_U8 },
2672         [RTA_ENCAP_TYPE]        = { .type = NLA_U16 },
2673         [RTA_ENCAP]             = { .type = NLA_NESTED },
2674 };
2675
2676 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2677                               struct fib6_config *cfg)
2678 {
2679         struct rtmsg *rtm;
2680         struct nlattr *tb[RTA_MAX+1];
2681         unsigned int pref;
2682         int err;
2683
2684         err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2685         if (err < 0)
2686                 goto errout;
2687
2688         err = -EINVAL;
2689         rtm = nlmsg_data(nlh);
2690         memset(cfg, 0, sizeof(*cfg));
2691
2692         cfg->fc_table = rtm->rtm_table;
2693         cfg->fc_dst_len = rtm->rtm_dst_len;
2694         cfg->fc_src_len = rtm->rtm_src_len;
2695         cfg->fc_flags = RTF_UP;
2696         cfg->fc_protocol = rtm->rtm_protocol;
2697         cfg->fc_type = rtm->rtm_type;
2698
2699         if (rtm->rtm_type == RTN_UNREACHABLE ||
2700             rtm->rtm_type == RTN_BLACKHOLE ||
2701             rtm->rtm_type == RTN_PROHIBIT ||
2702             rtm->rtm_type == RTN_THROW)
2703                 cfg->fc_flags |= RTF_REJECT;
2704
2705         if (rtm->rtm_type == RTN_LOCAL)
2706                 cfg->fc_flags |= RTF_LOCAL;
2707
2708         if (rtm->rtm_flags & RTM_F_CLONED)
2709                 cfg->fc_flags |= RTF_CACHE;
2710
2711         cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
2712         cfg->fc_nlinfo.nlh = nlh;
2713         cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2714
2715         if (tb[RTA_GATEWAY]) {
2716                 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
2717                 cfg->fc_flags |= RTF_GATEWAY;
2718         }
2719
2720         if (tb[RTA_DST]) {
2721                 int plen = (rtm->rtm_dst_len + 7) >> 3;
2722
2723                 if (nla_len(tb[RTA_DST]) < plen)
2724                         goto errout;
2725
2726                 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2727         }
2728
2729         if (tb[RTA_SRC]) {
2730                 int plen = (rtm->rtm_src_len + 7) >> 3;
2731
2732                 if (nla_len(tb[RTA_SRC]) < plen)
2733                         goto errout;
2734
2735                 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2736         }
2737
2738         if (tb[RTA_PREFSRC])
2739                 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
2740
2741         if (tb[RTA_OIF])
2742                 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2743
2744         if (tb[RTA_PRIORITY])
2745                 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2746
2747         if (tb[RTA_METRICS]) {
2748                 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2749                 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2750         }
2751
2752         if (tb[RTA_TABLE])
2753                 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2754
2755         if (tb[RTA_MULTIPATH]) {
2756                 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2757                 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2758         }
2759
2760         if (tb[RTA_PREF]) {
2761                 pref = nla_get_u8(tb[RTA_PREF]);
2762                 if (pref != ICMPV6_ROUTER_PREF_LOW &&
2763                     pref != ICMPV6_ROUTER_PREF_HIGH)
2764                         pref = ICMPV6_ROUTER_PREF_MEDIUM;
2765                 cfg->fc_flags |= RTF_PREF(pref);
2766         }
2767
2768         if (tb[RTA_ENCAP])
2769                 cfg->fc_encap = tb[RTA_ENCAP];
2770
2771         if (tb[RTA_ENCAP_TYPE])
2772                 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
2773
2774         err = 0;
2775 errout:
2776         return err;
2777 }
2778
2779 static int ip6_route_multipath(struct fib6_config *cfg, int add)
2780 {
2781         struct fib6_config r_cfg;
2782         struct rtnexthop *rtnh;
2783         int remaining;
2784         int attrlen;
2785         int err = 0, last_err = 0;
2786
2787         remaining = cfg->fc_mp_len;
2788 beginning:
2789         rtnh = (struct rtnexthop *)cfg->fc_mp;
2790
2791         /* Parse a Multipath Entry */
2792         while (rtnh_ok(rtnh, remaining)) {
2793                 memcpy(&r_cfg, cfg, sizeof(*cfg));
2794                 if (rtnh->rtnh_ifindex)
2795                         r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
2796
2797                 attrlen = rtnh_attrlen(rtnh);
2798                 if (attrlen > 0) {
2799                         struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
2800
2801                         nla = nla_find(attrs, attrlen, RTA_GATEWAY);
2802                         if (nla) {
2803                                 r_cfg.fc_gateway = nla_get_in6_addr(nla);
2804                                 r_cfg.fc_flags |= RTF_GATEWAY;
2805                         }
2806                         r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
2807                         nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
2808                         if (nla)
2809                                 r_cfg.fc_encap_type = nla_get_u16(nla);
2810                 }
2811                 err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
2812                 if (err) {
2813                         last_err = err;
2814                         /* If we are trying to remove a route, do not stop the
2815                          * loop when ip6_route_del() fails (because next hop is
2816                          * already gone), we should try to remove all next hops.
2817                          */
2818                         if (add) {
2819                                 /* If add fails, we should try to delete all
2820                                  * next hops that have been already added.
2821                                  */
2822                                 add = 0;
2823                                 remaining = cfg->fc_mp_len - remaining;
2824                                 goto beginning;
2825                         }
2826                 }
2827                 /* Because each route is added like a single route we remove
2828                  * these flags after the first nexthop: if there is a collision,
2829                  * we have already failed to add the first nexthop:
2830                  * fib6_add_rt2node() has rejected it; when replacing, old
2831                  * nexthops have been replaced by first new, the rest should
2832                  * be added to it.
2833                  */
2834                 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
2835                                                      NLM_F_REPLACE);
2836                 rtnh = rtnh_next(rtnh, &remaining);
2837         }
2838
2839         return last_err;
2840 }
2841
2842 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
2843 {
2844         struct fib6_config cfg;
2845         int err;
2846
2847         err = rtm_to_fib6_config(skb, nlh, &cfg);
2848         if (err < 0)
2849                 return err;
2850
2851         if (cfg.fc_mp)
2852                 return ip6_route_multipath(&cfg, 0);
2853         else
2854                 return ip6_route_del(&cfg);
2855 }
2856
2857 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
2858 {
2859         struct fib6_config cfg;
2860         int err;
2861
2862         err = rtm_to_fib6_config(skb, nlh, &cfg);
2863         if (err < 0)
2864                 return err;
2865
2866         if (cfg.fc_mp)
2867                 return ip6_route_multipath(&cfg, 1);
2868         else
2869                 return ip6_route_add(&cfg);
2870 }
2871
2872 static inline size_t rt6_nlmsg_size(struct rt6_info *rt)
2873 {
2874         return NLMSG_ALIGN(sizeof(struct rtmsg))
2875                + nla_total_size(16) /* RTA_SRC */
2876                + nla_total_size(16) /* RTA_DST */
2877                + nla_total_size(16) /* RTA_GATEWAY */
2878                + nla_total_size(16) /* RTA_PREFSRC */
2879                + nla_total_size(4) /* RTA_TABLE */
2880                + nla_total_size(4) /* RTA_IIF */
2881                + nla_total_size(4) /* RTA_OIF */
2882                + nla_total_size(4) /* RTA_PRIORITY */
2883                + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2884                + nla_total_size(sizeof(struct rta_cacheinfo))
2885                + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
2886                + nla_total_size(1) /* RTA_PREF */
2887                + lwtunnel_get_encap_size(rt->dst.lwtstate);
2888 }
2889
2890 static int rt6_fill_node(struct net *net,
2891                          struct sk_buff *skb, struct rt6_info *rt,
2892                          struct in6_addr *dst, struct in6_addr *src,
2893                          int iif, int type, u32 portid, u32 seq,
2894                          int prefix, int nowait, unsigned int flags)
2895 {
2896         u32 metrics[RTAX_MAX];
2897         struct rtmsg *rtm;
2898         struct nlmsghdr *nlh;
2899         long expires;
2900         u32 table;
2901
2902         if (prefix) {   /* user wants prefix routes only */
2903                 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2904                         /* success since this is not a prefix route */
2905                         return 1;
2906                 }
2907         }
2908
2909         nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
2910         if (!nlh)
2911                 return -EMSGSIZE;
2912
2913         rtm = nlmsg_data(nlh);
2914         rtm->rtm_family = AF_INET6;
2915         rtm->rtm_dst_len = rt->rt6i_dst.plen;
2916         rtm->rtm_src_len = rt->rt6i_src.plen;
2917         rtm->rtm_tos = 0;
2918         if (rt->rt6i_table)
2919                 table = rt->rt6i_table->tb6_id;
2920         else
2921                 table = RT6_TABLE_UNSPEC;
2922         rtm->rtm_table = table;
2923         if (nla_put_u32(skb, RTA_TABLE, table))
2924                 goto nla_put_failure;
2925         if (rt->rt6i_flags & RTF_REJECT) {
2926                 switch (rt->dst.error) {
2927                 case -EINVAL:
2928                         rtm->rtm_type = RTN_BLACKHOLE;
2929                         break;
2930                 case -EACCES:
2931                         rtm->rtm_type = RTN_PROHIBIT;
2932                         break;
2933                 case -EAGAIN:
2934                         rtm->rtm_type = RTN_THROW;
2935                         break;
2936                 default:
2937                         rtm->rtm_type = RTN_UNREACHABLE;
2938                         break;
2939                 }
2940         }
2941         else if (rt->rt6i_flags & RTF_LOCAL)
2942                 rtm->rtm_type = RTN_LOCAL;
2943         else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
2944                 rtm->rtm_type = RTN_LOCAL;
2945         else
2946                 rtm->rtm_type = RTN_UNICAST;
2947         rtm->rtm_flags = 0;
2948         if (!netif_carrier_ok(rt->dst.dev)) {
2949                 rtm->rtm_flags |= RTNH_F_LINKDOWN;
2950                 if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
2951                         rtm->rtm_flags |= RTNH_F_DEAD;
2952         }
2953         rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2954         rtm->rtm_protocol = rt->rt6i_protocol;
2955         if (rt->rt6i_flags & RTF_DYNAMIC)
2956                 rtm->rtm_protocol = RTPROT_REDIRECT;
2957         else if (rt->rt6i_flags & RTF_ADDRCONF) {
2958                 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
2959                         rtm->rtm_protocol = RTPROT_RA;
2960                 else
2961                         rtm->rtm_protocol = RTPROT_KERNEL;
2962         }
2963
2964         if (rt->rt6i_flags & RTF_CACHE)
2965                 rtm->rtm_flags |= RTM_F_CLONED;
2966
2967         if (dst) {
2968                 if (nla_put_in6_addr(skb, RTA_DST, dst))
2969                         goto nla_put_failure;
2970                 rtm->rtm_dst_len = 128;
2971         } else if (rtm->rtm_dst_len)
2972                 if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr))
2973                         goto nla_put_failure;
2974 #ifdef CONFIG_IPV6_SUBTREES
2975         if (src) {
2976                 if (nla_put_in6_addr(skb, RTA_SRC, src))
2977                         goto nla_put_failure;
2978                 rtm->rtm_src_len = 128;
2979         } else if (rtm->rtm_src_len &&
2980                    nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr))
2981                 goto nla_put_failure;
2982 #endif
2983         if (iif) {
2984 #ifdef CONFIG_IPV6_MROUTE
2985                 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
2986                         int err = ip6mr_get_route(net, skb, rtm, nowait);
2987                         if (err <= 0) {
2988                                 if (!nowait) {
2989                                         if (err == 0)
2990                                                 return 0;
2991                                         goto nla_put_failure;
2992                                 } else {
2993                                         if (err == -EMSGSIZE)
2994                                                 goto nla_put_failure;
2995                                 }
2996                         }
2997                 } else
2998 #endif
2999                         if (nla_put_u32(skb, RTA_IIF, iif))
3000                                 goto nla_put_failure;
3001         } else if (dst) {
3002                 struct in6_addr saddr_buf;
3003                 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
3004                     nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
3005                         goto nla_put_failure;
3006         }
3007
3008         if (rt->rt6i_prefsrc.plen) {
3009                 struct in6_addr saddr_buf;
3010                 saddr_buf = rt->rt6i_prefsrc.addr;
3011                 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
3012                         goto nla_put_failure;
3013         }
3014
3015         memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
3016         if (rt->rt6i_pmtu)
3017                 metrics[RTAX_MTU - 1] = rt->rt6i_pmtu;
3018         if (rtnetlink_put_metrics(skb, metrics) < 0)
3019                 goto nla_put_failure;
3020
3021         if (rt->rt6i_flags & RTF_GATEWAY) {
3022                 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
3023                         goto nla_put_failure;
3024         }
3025
3026         if (rt->dst.dev &&
3027             nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
3028                 goto nla_put_failure;
3029         if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
3030                 goto nla_put_failure;
3031
3032         expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
3033
3034         if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
3035                 goto nla_put_failure;
3036
3037         if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
3038                 goto nla_put_failure;
3039
3040         lwtunnel_fill_encap(skb, rt->dst.lwtstate);
3041
3042         nlmsg_end(skb, nlh);
3043         return 0;
3044
3045 nla_put_failure:
3046         nlmsg_cancel(skb, nlh);
3047         return -EMSGSIZE;
3048 }
3049
3050 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
3051 {
3052         struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
3053         int prefix;
3054
3055         if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
3056                 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
3057                 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
3058         } else
3059                 prefix = 0;
3060
3061         return rt6_fill_node(arg->net,
3062                      arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
3063                      NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
3064                      prefix, 0, NLM_F_MULTI);
3065 }
3066
3067 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
3068 {
3069         struct net *net = sock_net(in_skb->sk);
3070         struct nlattr *tb[RTA_MAX+1];
3071         struct rt6_info *rt;
3072         struct sk_buff *skb;
3073         struct rtmsg *rtm;
3074         struct flowi6 fl6;
3075         int err, iif = 0, oif = 0;
3076
3077         err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
3078         if (err < 0)
3079                 goto errout;
3080
3081         err = -EINVAL;
3082         memset(&fl6, 0, sizeof(fl6));
3083
3084         if (tb[RTA_SRC]) {
3085                 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
3086                         goto errout;
3087
3088                 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
3089         }
3090
3091         if (tb[RTA_DST]) {
3092                 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
3093                         goto errout;
3094
3095                 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
3096         }
3097
3098         if (tb[RTA_IIF])
3099                 iif = nla_get_u32(tb[RTA_IIF]);
3100
3101         if (tb[RTA_OIF])
3102                 oif = nla_get_u32(tb[RTA_OIF]);
3103
3104         if (tb[RTA_MARK])
3105                 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
3106
3107         if (iif) {
3108                 struct net_device *dev;
3109                 int flags = 0;
3110
3111                 dev = __dev_get_by_index(net, iif);
3112                 if (!dev) {
3113                         err = -ENODEV;
3114                         goto errout;
3115                 }
3116
3117                 fl6.flowi6_iif = iif;
3118
3119                 if (!ipv6_addr_any(&fl6.saddr))
3120                         flags |= RT6_LOOKUP_F_HAS_SADDR;
3121
3122                 rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6,
3123                                                                flags);
3124         } else {
3125                 fl6.flowi6_oif = oif;
3126
3127                 rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
3128         }
3129
3130         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3131         if (!skb) {
3132                 ip6_rt_put(rt);
3133                 err = -ENOBUFS;
3134                 goto errout;
3135         }
3136
3137         /* Reserve room for dummy headers, this skb can pass
3138            through good chunk of routing engine.
3139          */
3140         skb_reset_mac_header(skb);
3141         skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
3142
3143         skb_dst_set(skb, &rt->dst);
3144
3145         err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
3146                             RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
3147                             nlh->nlmsg_seq, 0, 0, 0);
3148         if (err < 0) {
3149                 kfree_skb(skb);
3150                 goto errout;
3151         }
3152
3153         err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3154 errout:
3155         return err;
3156 }
3157
3158 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
3159 {
3160         struct sk_buff *skb;
3161         struct net *net = info->nl_net;
3162         u32 seq;
3163         int err;
3164
3165         err = -ENOBUFS;
3166         seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3167
3168         skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3169         if (!skb)
3170                 goto errout;
3171
3172         err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
3173                                 event, info->portid, seq, 0, 0, 0);
3174         if (err < 0) {
3175                 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
3176                 WARN_ON(err == -EMSGSIZE);
3177                 kfree_skb(skb);
3178                 goto errout;
3179         }
3180         rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3181                     info->nlh, gfp_any());
3182         return;
3183 errout:
3184         if (err < 0)
3185                 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
3186 }
3187
3188 static int ip6_route_dev_notify(struct notifier_block *this,
3189                                 unsigned long event, void *ptr)
3190 {
3191         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3192         struct net *net = dev_net(dev);
3193
3194         if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
3195                 net->ipv6.ip6_null_entry->dst.dev = dev;
3196                 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
3197 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3198                 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
3199                 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
3200                 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
3201                 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
3202 #endif
3203         }
3204
3205         return NOTIFY_OK;
3206 }
3207
3208 /*
3209  *      /proc
3210  */
3211
3212 #ifdef CONFIG_PROC_FS
3213
3214 static const struct file_operations ipv6_route_proc_fops = {
3215         .owner          = THIS_MODULE,
3216         .open           = ipv6_route_open,
3217         .read           = seq_read,
3218         .llseek         = seq_lseek,
3219         .release        = seq_release_net,
3220 };
3221
3222 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
3223 {
3224         struct net *net = (struct net *)seq->private;
3225         seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
3226                    net->ipv6.rt6_stats->fib_nodes,
3227                    net->ipv6.rt6_stats->fib_route_nodes,
3228                    net->ipv6.rt6_stats->fib_rt_alloc,
3229                    net->ipv6.rt6_stats->fib_rt_entries,
3230                    net->ipv6.rt6_stats->fib_rt_cache,
3231                    dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
3232                    net->ipv6.rt6_stats->fib_discarded_routes);
3233
3234         return 0;
3235 }
3236
3237 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
3238 {
3239         return single_open_net(inode, file, rt6_stats_seq_show);
3240 }
3241
3242 static const struct file_operations rt6_stats_seq_fops = {
3243         .owner   = THIS_MODULE,
3244         .open    = rt6_stats_seq_open,
3245         .read    = seq_read,
3246         .llseek  = seq_lseek,
3247         .release = single_release_net,
3248 };
3249 #endif  /* CONFIG_PROC_FS */
3250
3251 #ifdef CONFIG_SYSCTL
3252
3253 static
3254 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
3255                               void __user *buffer, size_t *lenp, loff_t *ppos)
3256 {
3257         struct net *net;
3258         int delay;
3259         if (!write)
3260                 return -EINVAL;
3261
3262         net = (struct net *)ctl->extra1;
3263         delay = net->ipv6.sysctl.flush_delay;
3264         proc_dointvec(ctl, write, buffer, lenp, ppos);
3265         fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
3266         return 0;
3267 }
3268
3269 struct ctl_table ipv6_route_table_template[] = {
3270         {
3271                 .procname       =       "flush",
3272                 .data           =       &init_net.ipv6.sysctl.flush_delay,
3273                 .maxlen         =       sizeof(int),
3274                 .mode           =       0200,
3275                 .proc_handler   =       ipv6_sysctl_rtcache_flush
3276         },
3277         {
3278                 .procname       =       "gc_thresh",
3279                 .data           =       &ip6_dst_ops_template.gc_thresh,
3280                 .maxlen         =       sizeof(int),
3281                 .mode           =       0644,
3282                 .proc_handler   =       proc_dointvec,
3283         },
3284         {
3285                 .procname       =       "max_size",
3286                 .data           =       &init_net.ipv6.sysctl.ip6_rt_max_size,
3287                 .maxlen         =       sizeof(int),
3288                 .mode           =       0644,
3289                 .proc_handler   =       proc_dointvec,
3290         },
3291         {
3292                 .procname       =       "gc_min_interval",
3293                 .data           =       &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3294                 .maxlen         =       sizeof(int),
3295                 .mode           =       0644,
3296                 .proc_handler   =       proc_dointvec_jiffies,
3297         },
3298         {
3299                 .procname       =       "gc_timeout",
3300                 .data           =       &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
3301                 .maxlen         =       sizeof(int),
3302                 .mode           =       0644,
3303                 .proc_handler   =       proc_dointvec_jiffies,
3304         },
3305         {
3306                 .procname       =       "gc_interval",
3307                 .data           =       &init_net.ipv6.sysctl.ip6_rt_gc_interval,
3308                 .maxlen         =       sizeof(int),
3309                 .mode           =       0644,
3310                 .proc_handler   =       proc_dointvec_jiffies,
3311         },
3312         {
3313                 .procname       =       "gc_elasticity",
3314                 .data           =       &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
3315                 .maxlen         =       sizeof(int),
3316                 .mode           =       0644,
3317                 .proc_handler   =       proc_dointvec,
3318         },
3319         {
3320                 .procname       =       "mtu_expires",
3321                 .data           =       &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
3322                 .maxlen         =       sizeof(int),
3323                 .mode           =       0644,
3324                 .proc_handler   =       proc_dointvec_jiffies,
3325         },
3326         {
3327                 .procname       =       "min_adv_mss",
3328                 .data           =       &init_net.ipv6.sysctl.ip6_rt_min_advmss,
3329                 .maxlen         =       sizeof(int),
3330                 .mode           =       0644,
3331                 .proc_handler   =       proc_dointvec,
3332         },
3333         {
3334                 .procname       =       "gc_min_interval_ms",
3335                 .data           =       &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3336                 .maxlen         =       sizeof(int),
3337                 .mode           =       0644,
3338                 .proc_handler   =       proc_dointvec_ms_jiffies,
3339         },
3340         { }
3341 };
3342
3343 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
3344 {
3345         struct ctl_table *table;
3346
3347         table = kmemdup(ipv6_route_table_template,
3348                         sizeof(ipv6_route_table_template),
3349                         GFP_KERNEL);
3350
3351         if (table) {
3352                 table[0].data = &net->ipv6.sysctl.flush_delay;
3353                 table[0].extra1 = net;
3354                 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
3355                 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
3356                 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3357                 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
3358                 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
3359                 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
3360                 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
3361                 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
3362                 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3363
3364                 /* Don't export sysctls to unprivileged users */
3365                 if (net->user_ns != &init_user_ns)
3366                         table[0].procname = NULL;
3367         }
3368
3369         return table;
3370 }
3371 #endif
3372
3373 static int __net_init ip6_route_net_init(struct net *net)
3374 {
3375         int ret = -ENOMEM;
3376
3377         memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
3378                sizeof(net->ipv6.ip6_dst_ops));
3379
3380         if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
3381                 goto out_ip6_dst_ops;
3382
3383         net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
3384                                            sizeof(*net->ipv6.ip6_null_entry),
3385                                            GFP_KERNEL);
3386         if (!net->ipv6.ip6_null_entry)
3387                 goto out_ip6_dst_entries;
3388         net->ipv6.ip6_null_entry->dst.path =
3389                 (struct dst_entry *)net->ipv6.ip6_null_entry;
3390         net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3391         dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
3392                          ip6_template_metrics, true);
3393
3394 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3395         net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
3396                                                sizeof(*net->ipv6.ip6_prohibit_entry),
3397                                                GFP_KERNEL);
3398         if (!net->ipv6.ip6_prohibit_entry)
3399                 goto out_ip6_null_entry;
3400         net->ipv6.ip6_prohibit_entry->dst.path =
3401                 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
3402         net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3403         dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
3404                          ip6_template_metrics, true);
3405
3406         net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
3407                                                sizeof(*net->ipv6.ip6_blk_hole_entry),
3408                                                GFP_KERNEL);
3409         if (!net->ipv6.ip6_blk_hole_entry)
3410                 goto out_ip6_prohibit_entry;
3411         net->ipv6.ip6_blk_hole_entry->dst.path =
3412                 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
3413         net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3414         dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
3415                          ip6_template_metrics, true);
3416 #endif
3417
3418         net->ipv6.sysctl.flush_delay = 0;
3419         net->ipv6.sysctl.ip6_rt_max_size = 4096;
3420         net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
3421         net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
3422         net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
3423         net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
3424         net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
3425         net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
3426
3427         net->ipv6.ip6_rt_gc_expire = 30*HZ;
3428
3429         ret = 0;
3430 out:
3431         return ret;
3432
3433 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3434 out_ip6_prohibit_entry:
3435         kfree(net->ipv6.ip6_prohibit_entry);
3436 out_ip6_null_entry:
3437         kfree(net->ipv6.ip6_null_entry);
3438 #endif
3439 out_ip6_dst_entries:
3440         dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3441 out_ip6_dst_ops:
3442         goto out;
3443 }
3444
3445 static void __net_exit ip6_route_net_exit(struct net *net)
3446 {
3447         kfree(net->ipv6.ip6_null_entry);
3448 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3449         kfree(net->ipv6.ip6_prohibit_entry);
3450         kfree(net->ipv6.ip6_blk_hole_entry);
3451 #endif
3452         dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3453 }
3454
3455 static int __net_init ip6_route_net_init_late(struct net *net)
3456 {
3457 #ifdef CONFIG_PROC_FS
3458         proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
3459         proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
3460 #endif
3461         return 0;
3462 }
3463
3464 static void __net_exit ip6_route_net_exit_late(struct net *net)
3465 {
3466 #ifdef CONFIG_PROC_FS
3467         remove_proc_entry("ipv6_route", net->proc_net);
3468         remove_proc_entry("rt6_stats", net->proc_net);
3469 #endif
3470 }
3471
3472 static struct pernet_operations ip6_route_net_ops = {
3473         .init = ip6_route_net_init,
3474         .exit = ip6_route_net_exit,
3475 };
3476
3477 static int __net_init ipv6_inetpeer_init(struct net *net)
3478 {
3479         struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3480
3481         if (!bp)
3482                 return -ENOMEM;
3483         inet_peer_base_init(bp);
3484         net->ipv6.peers = bp;
3485         return 0;
3486 }
3487
3488 static void __net_exit ipv6_inetpeer_exit(struct net *net)
3489 {
3490         struct inet_peer_base *bp = net->ipv6.peers;
3491
3492         net->ipv6.peers = NULL;
3493         inetpeer_invalidate_tree(bp);
3494         kfree(bp);
3495 }
3496
3497 static struct pernet_operations ipv6_inetpeer_ops = {
3498         .init   =       ipv6_inetpeer_init,
3499         .exit   =       ipv6_inetpeer_exit,
3500 };
3501
3502 static struct pernet_operations ip6_route_net_late_ops = {
3503         .init = ip6_route_net_init_late,
3504         .exit = ip6_route_net_exit_late,
3505 };
3506
3507 static struct notifier_block ip6_route_dev_notifier = {
3508         .notifier_call = ip6_route_dev_notify,
3509         .priority = 0,
3510 };
3511
3512 int __init ip6_route_init(void)
3513 {
3514         int ret;
3515         int cpu;
3516
3517         ret = -ENOMEM;
3518         ip6_dst_ops_template.kmem_cachep =
3519                 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
3520                                   SLAB_HWCACHE_ALIGN, NULL);
3521         if (!ip6_dst_ops_template.kmem_cachep)
3522                 goto out;
3523
3524         ret = dst_entries_init(&ip6_dst_blackhole_ops);
3525         if (ret)
3526                 goto out_kmem_cache;
3527
3528         ret = register_pernet_subsys(&ipv6_inetpeer_ops);
3529         if (ret)
3530                 goto out_dst_entries;
3531
3532         ret = register_pernet_subsys(&ip6_route_net_ops);
3533         if (ret)
3534                 goto out_register_inetpeer;
3535
3536         ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
3537
3538         /* Registering of the loopback is done before this portion of code,
3539          * the loopback reference in rt6_info will not be taken, do it
3540          * manually for init_net */
3541         init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
3542         init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3543   #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3544         init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
3545         init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3546         init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
3547         init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3548   #endif
3549         ret = fib6_init();
3550         if (ret)
3551                 goto out_register_subsys;
3552
3553         ret = xfrm6_init();
3554         if (ret)
3555                 goto out_fib6_init;
3556
3557         ret = fib6_rules_init();
3558         if (ret)
3559                 goto xfrm6_init;
3560
3561         ret = register_pernet_subsys(&ip6_route_net_late_ops);
3562         if (ret)
3563                 goto fib6_rules_init;
3564
3565         ret = -ENOBUFS;
3566         if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
3567             __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
3568             __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
3569                 goto out_register_late_subsys;
3570
3571         ret = register_netdevice_notifier(&ip6_route_dev_notifier);
3572         if (ret)
3573                 goto out_register_late_subsys;
3574
3575         for_each_possible_cpu(cpu) {
3576                 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
3577
3578                 INIT_LIST_HEAD(&ul->head);
3579                 spin_lock_init(&ul->lock);
3580         }
3581
3582 out:
3583         return ret;
3584
3585 out_register_late_subsys:
3586         unregister_pernet_subsys(&ip6_route_net_late_ops);
3587 fib6_rules_init:
3588         fib6_rules_cleanup();
3589 xfrm6_init:
3590         xfrm6_fini();
3591 out_fib6_init:
3592         fib6_gc_cleanup();
3593 out_register_subsys:
3594         unregister_pernet_subsys(&ip6_route_net_ops);
3595 out_register_inetpeer:
3596         unregister_pernet_subsys(&ipv6_inetpeer_ops);
3597 out_dst_entries:
3598         dst_entries_destroy(&ip6_dst_blackhole_ops);
3599 out_kmem_cache:
3600         kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3601         goto out;
3602 }
3603
3604 void ip6_route_cleanup(void)
3605 {
3606         unregister_netdevice_notifier(&ip6_route_dev_notifier);
3607         unregister_pernet_subsys(&ip6_route_net_late_ops);
3608         fib6_rules_cleanup();
3609         xfrm6_fini();
3610         fib6_gc_cleanup();
3611         unregister_pernet_subsys(&ipv6_inetpeer_ops);
3612         unregister_pernet_subsys(&ip6_route_net_ops);
3613         dst_entries_destroy(&ip6_dst_blackhole_ops);
3614         kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3615 }