]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/ipv4/route.c
ipv4: add reference counting to metrics
[karo-tx-linux.git] / net / ipv4 / route.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              ROUTE - implementation of the IP router.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
11  *              Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12  *              Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13  *
14  * Fixes:
15  *              Alan Cox        :       Verify area fixes.
16  *              Alan Cox        :       cli() protects routing changes
17  *              Rui Oliveira    :       ICMP routing table updates
18  *              (rco@di.uminho.pt)      Routing table insertion and update
19  *              Linus Torvalds  :       Rewrote bits to be sensible
20  *              Alan Cox        :       Added BSD route gw semantics
21  *              Alan Cox        :       Super /proc >4K
22  *              Alan Cox        :       MTU in route table
23  *              Alan Cox        :       MSS actually. Also added the window
24  *                                      clamper.
25  *              Sam Lantinga    :       Fixed route matching in rt_del()
26  *              Alan Cox        :       Routing cache support.
27  *              Alan Cox        :       Removed compatibility cruft.
28  *              Alan Cox        :       RTF_REJECT support.
29  *              Alan Cox        :       TCP irtt support.
30  *              Jonathan Naylor :       Added Metric support.
31  *      Miquel van Smoorenburg  :       BSD API fixes.
32  *      Miquel van Smoorenburg  :       Metrics.
33  *              Alan Cox        :       Use __u32 properly
34  *              Alan Cox        :       Aligned routing errors more closely with BSD
35  *                                      our system is still very different.
36  *              Alan Cox        :       Faster /proc handling
37  *      Alexey Kuznetsov        :       Massive rework to support tree based routing,
38  *                                      routing caches and better behaviour.
39  *
40  *              Olaf Erb        :       irtt wasn't being copied right.
41  *              Bjorn Ekwall    :       Kerneld route support.
42  *              Alan Cox        :       Multicast fixed (I hope)
43  *              Pavel Krauz     :       Limited broadcast fixed
44  *              Mike McLagan    :       Routing by source
45  *      Alexey Kuznetsov        :       End of old history. Split to fib.c and
46  *                                      route.c and rewritten from scratch.
47  *              Andi Kleen      :       Load-limit warning messages.
48  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
49  *      Vitaly E. Lavrov        :       Race condition in ip_route_input_slow.
50  *      Tobias Ringstrom        :       Uninitialized res.type in ip_route_output_slow.
51  *      Vladimir V. Ivanov      :       IP rule info (flowid) is really useful.
52  *              Marc Boucher    :       routing by fwmark
53  *      Robert Olsson           :       Added rt_cache statistics
54  *      Arnaldo C. Melo         :       Convert proc stuff to seq_file
55  *      Eric Dumazet            :       hashed spinlocks and rt_check_expire() fixes.
56  *      Ilia Sotnikov           :       Ignore TOS on PMTUD and Redirect
57  *      Ilia Sotnikov           :       Removed TOS from hash calculations
58  *
59  *              This program is free software; you can redistribute it and/or
60  *              modify it under the terms of the GNU General Public License
61  *              as published by the Free Software Foundation; either version
62  *              2 of the License, or (at your option) any later version.
63  */
64
65 #define pr_fmt(fmt) "IPv4: " fmt
66
67 #include <linux/module.h>
68 #include <linux/uaccess.h>
69 #include <linux/bitops.h>
70 #include <linux/types.h>
71 #include <linux/kernel.h>
72 #include <linux/mm.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
77 #include <linux/in.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/skbuff.h>
83 #include <linux/inetdevice.h>
84 #include <linux/igmp.h>
85 #include <linux/pkt_sched.h>
86 #include <linux/mroute.h>
87 #include <linux/netfilter_ipv4.h>
88 #include <linux/random.h>
89 #include <linux/rcupdate.h>
90 #include <linux/times.h>
91 #include <linux/slab.h>
92 #include <linux/jhash.h>
93 #include <net/dst.h>
94 #include <net/dst_metadata.h>
95 #include <net/net_namespace.h>
96 #include <net/protocol.h>
97 #include <net/ip.h>
98 #include <net/route.h>
99 #include <net/inetpeer.h>
100 #include <net/sock.h>
101 #include <net/ip_fib.h>
102 #include <net/arp.h>
103 #include <net/tcp.h>
104 #include <net/icmp.h>
105 #include <net/xfrm.h>
106 #include <net/lwtunnel.h>
107 #include <net/netevent.h>
108 #include <net/rtnetlink.h>
109 #ifdef CONFIG_SYSCTL
110 #include <linux/sysctl.h>
111 #include <linux/kmemleak.h>
112 #endif
113 #include <net/secure_seq.h>
114 #include <net/ip_tunnels.h>
115 #include <net/l3mdev.h>
116
117 #define RT_FL_TOS(oldflp4) \
118         ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
119
120 #define RT_GC_TIMEOUT (300*HZ)
121
122 static int ip_rt_max_size;
123 static int ip_rt_redirect_number __read_mostly  = 9;
124 static int ip_rt_redirect_load __read_mostly    = HZ / 50;
125 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
126 static int ip_rt_error_cost __read_mostly       = HZ;
127 static int ip_rt_error_burst __read_mostly      = 5 * HZ;
128 static int ip_rt_mtu_expires __read_mostly      = 10 * 60 * HZ;
129 static int ip_rt_min_pmtu __read_mostly         = 512 + 20 + 20;
130 static int ip_rt_min_advmss __read_mostly       = 256;
131
132 static int ip_rt_gc_timeout __read_mostly       = RT_GC_TIMEOUT;
133 /*
134  *      Interface to generic destination cache.
135  */
136
137 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
138 static unsigned int      ipv4_default_advmss(const struct dst_entry *dst);
139 static unsigned int      ipv4_mtu(const struct dst_entry *dst);
140 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
141 static void              ipv4_link_failure(struct sk_buff *skb);
142 static void              ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
143                                            struct sk_buff *skb, u32 mtu);
144 static void              ip_do_redirect(struct dst_entry *dst, struct sock *sk,
145                                         struct sk_buff *skb);
146 static void             ipv4_dst_destroy(struct dst_entry *dst);
147
148 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
149 {
150         WARN_ON(1);
151         return NULL;
152 }
153
154 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
155                                            struct sk_buff *skb,
156                                            const void *daddr);
157 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
158
159 static struct dst_ops ipv4_dst_ops = {
160         .family =               AF_INET,
161         .check =                ipv4_dst_check,
162         .default_advmss =       ipv4_default_advmss,
163         .mtu =                  ipv4_mtu,
164         .cow_metrics =          ipv4_cow_metrics,
165         .destroy =              ipv4_dst_destroy,
166         .negative_advice =      ipv4_negative_advice,
167         .link_failure =         ipv4_link_failure,
168         .update_pmtu =          ip_rt_update_pmtu,
169         .redirect =             ip_do_redirect,
170         .local_out =            __ip_local_out,
171         .neigh_lookup =         ipv4_neigh_lookup,
172         .confirm_neigh =        ipv4_confirm_neigh,
173 };
174
175 #define ECN_OR_COST(class)      TC_PRIO_##class
176
177 const __u8 ip_tos2prio[16] = {
178         TC_PRIO_BESTEFFORT,
179         ECN_OR_COST(BESTEFFORT),
180         TC_PRIO_BESTEFFORT,
181         ECN_OR_COST(BESTEFFORT),
182         TC_PRIO_BULK,
183         ECN_OR_COST(BULK),
184         TC_PRIO_BULK,
185         ECN_OR_COST(BULK),
186         TC_PRIO_INTERACTIVE,
187         ECN_OR_COST(INTERACTIVE),
188         TC_PRIO_INTERACTIVE,
189         ECN_OR_COST(INTERACTIVE),
190         TC_PRIO_INTERACTIVE_BULK,
191         ECN_OR_COST(INTERACTIVE_BULK),
192         TC_PRIO_INTERACTIVE_BULK,
193         ECN_OR_COST(INTERACTIVE_BULK)
194 };
195 EXPORT_SYMBOL(ip_tos2prio);
196
197 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
198 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
199
200 #ifdef CONFIG_PROC_FS
201 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
202 {
203         if (*pos)
204                 return NULL;
205         return SEQ_START_TOKEN;
206 }
207
208 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
209 {
210         ++*pos;
211         return NULL;
212 }
213
214 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
215 {
216 }
217
218 static int rt_cache_seq_show(struct seq_file *seq, void *v)
219 {
220         if (v == SEQ_START_TOKEN)
221                 seq_printf(seq, "%-127s\n",
222                            "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
223                            "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
224                            "HHUptod\tSpecDst");
225         return 0;
226 }
227
228 static const struct seq_operations rt_cache_seq_ops = {
229         .start  = rt_cache_seq_start,
230         .next   = rt_cache_seq_next,
231         .stop   = rt_cache_seq_stop,
232         .show   = rt_cache_seq_show,
233 };
234
235 static int rt_cache_seq_open(struct inode *inode, struct file *file)
236 {
237         return seq_open(file, &rt_cache_seq_ops);
238 }
239
240 static const struct file_operations rt_cache_seq_fops = {
241         .owner   = THIS_MODULE,
242         .open    = rt_cache_seq_open,
243         .read    = seq_read,
244         .llseek  = seq_lseek,
245         .release = seq_release,
246 };
247
248
249 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
250 {
251         int cpu;
252
253         if (*pos == 0)
254                 return SEQ_START_TOKEN;
255
256         for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
257                 if (!cpu_possible(cpu))
258                         continue;
259                 *pos = cpu+1;
260                 return &per_cpu(rt_cache_stat, cpu);
261         }
262         return NULL;
263 }
264
265 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
266 {
267         int cpu;
268
269         for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
270                 if (!cpu_possible(cpu))
271                         continue;
272                 *pos = cpu+1;
273                 return &per_cpu(rt_cache_stat, cpu);
274         }
275         return NULL;
276
277 }
278
279 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
280 {
281
282 }
283
284 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
285 {
286         struct rt_cache_stat *st = v;
287
288         if (v == SEQ_START_TOKEN) {
289                 seq_printf(seq, "entries  in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src  out_hit out_slow_tot out_slow_mc  gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
290                 return 0;
291         }
292
293         seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
294                    " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
295                    dst_entries_get_slow(&ipv4_dst_ops),
296                    0, /* st->in_hit */
297                    st->in_slow_tot,
298                    st->in_slow_mc,
299                    st->in_no_route,
300                    st->in_brd,
301                    st->in_martian_dst,
302                    st->in_martian_src,
303
304                    0, /* st->out_hit */
305                    st->out_slow_tot,
306                    st->out_slow_mc,
307
308                    0, /* st->gc_total */
309                    0, /* st->gc_ignored */
310                    0, /* st->gc_goal_miss */
311                    0, /* st->gc_dst_overflow */
312                    0, /* st->in_hlist_search */
313                    0  /* st->out_hlist_search */
314                 );
315         return 0;
316 }
317
318 static const struct seq_operations rt_cpu_seq_ops = {
319         .start  = rt_cpu_seq_start,
320         .next   = rt_cpu_seq_next,
321         .stop   = rt_cpu_seq_stop,
322         .show   = rt_cpu_seq_show,
323 };
324
325
326 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
327 {
328         return seq_open(file, &rt_cpu_seq_ops);
329 }
330
331 static const struct file_operations rt_cpu_seq_fops = {
332         .owner   = THIS_MODULE,
333         .open    = rt_cpu_seq_open,
334         .read    = seq_read,
335         .llseek  = seq_lseek,
336         .release = seq_release,
337 };
338
339 #ifdef CONFIG_IP_ROUTE_CLASSID
340 static int rt_acct_proc_show(struct seq_file *m, void *v)
341 {
342         struct ip_rt_acct *dst, *src;
343         unsigned int i, j;
344
345         dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
346         if (!dst)
347                 return -ENOMEM;
348
349         for_each_possible_cpu(i) {
350                 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
351                 for (j = 0; j < 256; j++) {
352                         dst[j].o_bytes   += src[j].o_bytes;
353                         dst[j].o_packets += src[j].o_packets;
354                         dst[j].i_bytes   += src[j].i_bytes;
355                         dst[j].i_packets += src[j].i_packets;
356                 }
357         }
358
359         seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
360         kfree(dst);
361         return 0;
362 }
363
364 static int rt_acct_proc_open(struct inode *inode, struct file *file)
365 {
366         return single_open(file, rt_acct_proc_show, NULL);
367 }
368
369 static const struct file_operations rt_acct_proc_fops = {
370         .owner          = THIS_MODULE,
371         .open           = rt_acct_proc_open,
372         .read           = seq_read,
373         .llseek         = seq_lseek,
374         .release        = single_release,
375 };
376 #endif
377
378 static int __net_init ip_rt_do_proc_init(struct net *net)
379 {
380         struct proc_dir_entry *pde;
381
382         pde = proc_create("rt_cache", S_IRUGO, net->proc_net,
383                           &rt_cache_seq_fops);
384         if (!pde)
385                 goto err1;
386
387         pde = proc_create("rt_cache", S_IRUGO,
388                           net->proc_net_stat, &rt_cpu_seq_fops);
389         if (!pde)
390                 goto err2;
391
392 #ifdef CONFIG_IP_ROUTE_CLASSID
393         pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
394         if (!pde)
395                 goto err3;
396 #endif
397         return 0;
398
399 #ifdef CONFIG_IP_ROUTE_CLASSID
400 err3:
401         remove_proc_entry("rt_cache", net->proc_net_stat);
402 #endif
403 err2:
404         remove_proc_entry("rt_cache", net->proc_net);
405 err1:
406         return -ENOMEM;
407 }
408
409 static void __net_exit ip_rt_do_proc_exit(struct net *net)
410 {
411         remove_proc_entry("rt_cache", net->proc_net_stat);
412         remove_proc_entry("rt_cache", net->proc_net);
413 #ifdef CONFIG_IP_ROUTE_CLASSID
414         remove_proc_entry("rt_acct", net->proc_net);
415 #endif
416 }
417
418 static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
419         .init = ip_rt_do_proc_init,
420         .exit = ip_rt_do_proc_exit,
421 };
422
423 static int __init ip_rt_proc_init(void)
424 {
425         return register_pernet_subsys(&ip_rt_proc_ops);
426 }
427
428 #else
429 static inline int ip_rt_proc_init(void)
430 {
431         return 0;
432 }
433 #endif /* CONFIG_PROC_FS */
434
435 static inline bool rt_is_expired(const struct rtable *rth)
436 {
437         return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
438 }
439
440 void rt_cache_flush(struct net *net)
441 {
442         rt_genid_bump_ipv4(net);
443 }
444
445 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
446                                            struct sk_buff *skb,
447                                            const void *daddr)
448 {
449         struct net_device *dev = dst->dev;
450         const __be32 *pkey = daddr;
451         const struct rtable *rt;
452         struct neighbour *n;
453
454         rt = (const struct rtable *) dst;
455         if (rt->rt_gateway)
456                 pkey = (const __be32 *) &rt->rt_gateway;
457         else if (skb)
458                 pkey = &ip_hdr(skb)->daddr;
459
460         n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
461         if (n)
462                 return n;
463         return neigh_create(&arp_tbl, pkey, dev);
464 }
465
466 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
467 {
468         struct net_device *dev = dst->dev;
469         const __be32 *pkey = daddr;
470         const struct rtable *rt;
471
472         rt = (const struct rtable *)dst;
473         if (rt->rt_gateway)
474                 pkey = (const __be32 *)&rt->rt_gateway;
475         else if (!daddr ||
476                  (rt->rt_flags &
477                   (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL)))
478                 return;
479
480         __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
481 }
482
483 #define IP_IDENTS_SZ 2048u
484
485 static atomic_t *ip_idents __read_mostly;
486 static u32 *ip_tstamps __read_mostly;
487
488 /* In order to protect privacy, we add a perturbation to identifiers
489  * if one generator is seldom used. This makes hard for an attacker
490  * to infer how many packets were sent between two points in time.
491  */
492 u32 ip_idents_reserve(u32 hash, int segs)
493 {
494         u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
495         atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
496         u32 old = ACCESS_ONCE(*p_tstamp);
497         u32 now = (u32)jiffies;
498         u32 new, delta = 0;
499
500         if (old != now && cmpxchg(p_tstamp, old, now) == old)
501                 delta = prandom_u32_max(now - old);
502
503         /* Do not use atomic_add_return() as it makes UBSAN unhappy */
504         do {
505                 old = (u32)atomic_read(p_id);
506                 new = old + delta + segs;
507         } while (atomic_cmpxchg(p_id, old, new) != old);
508
509         return new - segs;
510 }
511 EXPORT_SYMBOL(ip_idents_reserve);
512
513 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
514 {
515         static u32 ip_idents_hashrnd __read_mostly;
516         u32 hash, id;
517
518         net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
519
520         hash = jhash_3words((__force u32)iph->daddr,
521                             (__force u32)iph->saddr,
522                             iph->protocol ^ net_hash_mix(net),
523                             ip_idents_hashrnd);
524         id = ip_idents_reserve(hash, segs);
525         iph->id = htons(id);
526 }
527 EXPORT_SYMBOL(__ip_select_ident);
528
529 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
530                              const struct sock *sk,
531                              const struct iphdr *iph,
532                              int oif, u8 tos,
533                              u8 prot, u32 mark, int flow_flags)
534 {
535         if (sk) {
536                 const struct inet_sock *inet = inet_sk(sk);
537
538                 oif = sk->sk_bound_dev_if;
539                 mark = sk->sk_mark;
540                 tos = RT_CONN_FLAGS(sk);
541                 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
542         }
543         flowi4_init_output(fl4, oif, mark, tos,
544                            RT_SCOPE_UNIVERSE, prot,
545                            flow_flags,
546                            iph->daddr, iph->saddr, 0, 0,
547                            sock_net_uid(net, sk));
548 }
549
550 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
551                                const struct sock *sk)
552 {
553         const struct net *net = dev_net(skb->dev);
554         const struct iphdr *iph = ip_hdr(skb);
555         int oif = skb->dev->ifindex;
556         u8 tos = RT_TOS(iph->tos);
557         u8 prot = iph->protocol;
558         u32 mark = skb->mark;
559
560         __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
561 }
562
563 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
564 {
565         const struct inet_sock *inet = inet_sk(sk);
566         const struct ip_options_rcu *inet_opt;
567         __be32 daddr = inet->inet_daddr;
568
569         rcu_read_lock();
570         inet_opt = rcu_dereference(inet->inet_opt);
571         if (inet_opt && inet_opt->opt.srr)
572                 daddr = inet_opt->opt.faddr;
573         flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
574                            RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
575                            inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
576                            inet_sk_flowi_flags(sk),
577                            daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
578         rcu_read_unlock();
579 }
580
581 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
582                                  const struct sk_buff *skb)
583 {
584         if (skb)
585                 build_skb_flow_key(fl4, skb, sk);
586         else
587                 build_sk_flow_key(fl4, sk);
588 }
589
590 static inline void rt_free(struct rtable *rt)
591 {
592         call_rcu(&rt->dst.rcu_head, dst_rcu_free);
593 }
594
595 static DEFINE_SPINLOCK(fnhe_lock);
596
597 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
598 {
599         struct rtable *rt;
600
601         rt = rcu_dereference(fnhe->fnhe_rth_input);
602         if (rt) {
603                 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
604                 rt_free(rt);
605         }
606         rt = rcu_dereference(fnhe->fnhe_rth_output);
607         if (rt) {
608                 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
609                 rt_free(rt);
610         }
611 }
612
613 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
614 {
615         struct fib_nh_exception *fnhe, *oldest;
616
617         oldest = rcu_dereference(hash->chain);
618         for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
619              fnhe = rcu_dereference(fnhe->fnhe_next)) {
620                 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
621                         oldest = fnhe;
622         }
623         fnhe_flush_routes(oldest);
624         return oldest;
625 }
626
627 static inline u32 fnhe_hashfun(__be32 daddr)
628 {
629         static u32 fnhe_hashrnd __read_mostly;
630         u32 hval;
631
632         net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
633         hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
634         return hash_32(hval, FNHE_HASH_SHIFT);
635 }
636
637 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
638 {
639         rt->rt_pmtu = fnhe->fnhe_pmtu;
640         rt->dst.expires = fnhe->fnhe_expires;
641
642         if (fnhe->fnhe_gw) {
643                 rt->rt_flags |= RTCF_REDIRECTED;
644                 rt->rt_gateway = fnhe->fnhe_gw;
645                 rt->rt_uses_gateway = 1;
646         }
647 }
648
649 static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
650                                   u32 pmtu, unsigned long expires)
651 {
652         struct fnhe_hash_bucket *hash;
653         struct fib_nh_exception *fnhe;
654         struct rtable *rt;
655         unsigned int i;
656         int depth;
657         u32 hval = fnhe_hashfun(daddr);
658
659         spin_lock_bh(&fnhe_lock);
660
661         hash = rcu_dereference(nh->nh_exceptions);
662         if (!hash) {
663                 hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
664                 if (!hash)
665                         goto out_unlock;
666                 rcu_assign_pointer(nh->nh_exceptions, hash);
667         }
668
669         hash += hval;
670
671         depth = 0;
672         for (fnhe = rcu_dereference(hash->chain); fnhe;
673              fnhe = rcu_dereference(fnhe->fnhe_next)) {
674                 if (fnhe->fnhe_daddr == daddr)
675                         break;
676                 depth++;
677         }
678
679         if (fnhe) {
680                 if (gw)
681                         fnhe->fnhe_gw = gw;
682                 if (pmtu) {
683                         fnhe->fnhe_pmtu = pmtu;
684                         fnhe->fnhe_expires = max(1UL, expires);
685                 }
686                 /* Update all cached dsts too */
687                 rt = rcu_dereference(fnhe->fnhe_rth_input);
688                 if (rt)
689                         fill_route_from_fnhe(rt, fnhe);
690                 rt = rcu_dereference(fnhe->fnhe_rth_output);
691                 if (rt)
692                         fill_route_from_fnhe(rt, fnhe);
693         } else {
694                 if (depth > FNHE_RECLAIM_DEPTH)
695                         fnhe = fnhe_oldest(hash);
696                 else {
697                         fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
698                         if (!fnhe)
699                                 goto out_unlock;
700
701                         fnhe->fnhe_next = hash->chain;
702                         rcu_assign_pointer(hash->chain, fnhe);
703                 }
704                 fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev));
705                 fnhe->fnhe_daddr = daddr;
706                 fnhe->fnhe_gw = gw;
707                 fnhe->fnhe_pmtu = pmtu;
708                 fnhe->fnhe_expires = expires;
709
710                 /* Exception created; mark the cached routes for the nexthop
711                  * stale, so anyone caching it rechecks if this exception
712                  * applies to them.
713                  */
714                 rt = rcu_dereference(nh->nh_rth_input);
715                 if (rt)
716                         rt->dst.obsolete = DST_OBSOLETE_KILL;
717
718                 for_each_possible_cpu(i) {
719                         struct rtable __rcu **prt;
720                         prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i);
721                         rt = rcu_dereference(*prt);
722                         if (rt)
723                                 rt->dst.obsolete = DST_OBSOLETE_KILL;
724                 }
725         }
726
727         fnhe->fnhe_stamp = jiffies;
728
729 out_unlock:
730         spin_unlock_bh(&fnhe_lock);
731 }
732
733 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
734                              bool kill_route)
735 {
736         __be32 new_gw = icmp_hdr(skb)->un.gateway;
737         __be32 old_gw = ip_hdr(skb)->saddr;
738         struct net_device *dev = skb->dev;
739         struct in_device *in_dev;
740         struct fib_result res;
741         struct neighbour *n;
742         struct net *net;
743
744         switch (icmp_hdr(skb)->code & 7) {
745         case ICMP_REDIR_NET:
746         case ICMP_REDIR_NETTOS:
747         case ICMP_REDIR_HOST:
748         case ICMP_REDIR_HOSTTOS:
749                 break;
750
751         default:
752                 return;
753         }
754
755         if (rt->rt_gateway != old_gw)
756                 return;
757
758         in_dev = __in_dev_get_rcu(dev);
759         if (!in_dev)
760                 return;
761
762         net = dev_net(dev);
763         if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
764             ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
765             ipv4_is_zeronet(new_gw))
766                 goto reject_redirect;
767
768         if (!IN_DEV_SHARED_MEDIA(in_dev)) {
769                 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
770                         goto reject_redirect;
771                 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
772                         goto reject_redirect;
773         } else {
774                 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
775                         goto reject_redirect;
776         }
777
778         n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
779         if (!n)
780                 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
781         if (!IS_ERR(n)) {
782                 if (!(n->nud_state & NUD_VALID)) {
783                         neigh_event_send(n, NULL);
784                 } else {
785                         if (fib_lookup(net, fl4, &res, 0) == 0) {
786                                 struct fib_nh *nh = &FIB_RES_NH(res);
787
788                                 update_or_create_fnhe(nh, fl4->daddr, new_gw,
789                                                 0, jiffies + ip_rt_gc_timeout);
790                         }
791                         if (kill_route)
792                                 rt->dst.obsolete = DST_OBSOLETE_KILL;
793                         call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
794                 }
795                 neigh_release(n);
796         }
797         return;
798
799 reject_redirect:
800 #ifdef CONFIG_IP_ROUTE_VERBOSE
801         if (IN_DEV_LOG_MARTIANS(in_dev)) {
802                 const struct iphdr *iph = (const struct iphdr *) skb->data;
803                 __be32 daddr = iph->daddr;
804                 __be32 saddr = iph->saddr;
805
806                 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
807                                      "  Advised path = %pI4 -> %pI4\n",
808                                      &old_gw, dev->name, &new_gw,
809                                      &saddr, &daddr);
810         }
811 #endif
812         ;
813 }
814
815 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
816 {
817         struct rtable *rt;
818         struct flowi4 fl4;
819         const struct iphdr *iph = (const struct iphdr *) skb->data;
820         struct net *net = dev_net(skb->dev);
821         int oif = skb->dev->ifindex;
822         u8 tos = RT_TOS(iph->tos);
823         u8 prot = iph->protocol;
824         u32 mark = skb->mark;
825
826         rt = (struct rtable *) dst;
827
828         __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
829         __ip_do_redirect(rt, skb, &fl4, true);
830 }
831
832 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
833 {
834         struct rtable *rt = (struct rtable *)dst;
835         struct dst_entry *ret = dst;
836
837         if (rt) {
838                 if (dst->obsolete > 0) {
839                         ip_rt_put(rt);
840                         ret = NULL;
841                 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
842                            rt->dst.expires) {
843                         ip_rt_put(rt);
844                         ret = NULL;
845                 }
846         }
847         return ret;
848 }
849
850 /*
851  * Algorithm:
852  *      1. The first ip_rt_redirect_number redirects are sent
853  *         with exponential backoff, then we stop sending them at all,
854  *         assuming that the host ignores our redirects.
855  *      2. If we did not see packets requiring redirects
856  *         during ip_rt_redirect_silence, we assume that the host
857  *         forgot redirected route and start to send redirects again.
858  *
859  * This algorithm is much cheaper and more intelligent than dumb load limiting
860  * in icmp.c.
861  *
862  * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
863  * and "frag. need" (breaks PMTU discovery) in icmp.c.
864  */
865
866 void ip_rt_send_redirect(struct sk_buff *skb)
867 {
868         struct rtable *rt = skb_rtable(skb);
869         struct in_device *in_dev;
870         struct inet_peer *peer;
871         struct net *net;
872         int log_martians;
873         int vif;
874
875         rcu_read_lock();
876         in_dev = __in_dev_get_rcu(rt->dst.dev);
877         if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
878                 rcu_read_unlock();
879                 return;
880         }
881         log_martians = IN_DEV_LOG_MARTIANS(in_dev);
882         vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
883         rcu_read_unlock();
884
885         net = dev_net(rt->dst.dev);
886         peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
887         if (!peer) {
888                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
889                           rt_nexthop(rt, ip_hdr(skb)->daddr));
890                 return;
891         }
892
893         /* No redirected packets during ip_rt_redirect_silence;
894          * reset the algorithm.
895          */
896         if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
897                 peer->rate_tokens = 0;
898
899         /* Too many ignored redirects; do not send anything
900          * set dst.rate_last to the last seen redirected packet.
901          */
902         if (peer->rate_tokens >= ip_rt_redirect_number) {
903                 peer->rate_last = jiffies;
904                 goto out_put_peer;
905         }
906
907         /* Check for load limit; set rate_last to the latest sent
908          * redirect.
909          */
910         if (peer->rate_tokens == 0 ||
911             time_after(jiffies,
912                        (peer->rate_last +
913                         (ip_rt_redirect_load << peer->rate_tokens)))) {
914                 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
915
916                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
917                 peer->rate_last = jiffies;
918                 ++peer->rate_tokens;
919 #ifdef CONFIG_IP_ROUTE_VERBOSE
920                 if (log_martians &&
921                     peer->rate_tokens == ip_rt_redirect_number)
922                         net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
923                                              &ip_hdr(skb)->saddr, inet_iif(skb),
924                                              &ip_hdr(skb)->daddr, &gw);
925 #endif
926         }
927 out_put_peer:
928         inet_putpeer(peer);
929 }
930
931 static int ip_error(struct sk_buff *skb)
932 {
933         struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
934         struct rtable *rt = skb_rtable(skb);
935         struct inet_peer *peer;
936         unsigned long now;
937         struct net *net;
938         bool send;
939         int code;
940
941         /* IP on this device is disabled. */
942         if (!in_dev)
943                 goto out;
944
945         net = dev_net(rt->dst.dev);
946         if (!IN_DEV_FORWARD(in_dev)) {
947                 switch (rt->dst.error) {
948                 case EHOSTUNREACH:
949                         __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
950                         break;
951
952                 case ENETUNREACH:
953                         __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
954                         break;
955                 }
956                 goto out;
957         }
958
959         switch (rt->dst.error) {
960         case EINVAL:
961         default:
962                 goto out;
963         case EHOSTUNREACH:
964                 code = ICMP_HOST_UNREACH;
965                 break;
966         case ENETUNREACH:
967                 code = ICMP_NET_UNREACH;
968                 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
969                 break;
970         case EACCES:
971                 code = ICMP_PKT_FILTERED;
972                 break;
973         }
974
975         peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
976                                l3mdev_master_ifindex(skb->dev), 1);
977
978         send = true;
979         if (peer) {
980                 now = jiffies;
981                 peer->rate_tokens += now - peer->rate_last;
982                 if (peer->rate_tokens > ip_rt_error_burst)
983                         peer->rate_tokens = ip_rt_error_burst;
984                 peer->rate_last = now;
985                 if (peer->rate_tokens >= ip_rt_error_cost)
986                         peer->rate_tokens -= ip_rt_error_cost;
987                 else
988                         send = false;
989                 inet_putpeer(peer);
990         }
991         if (send)
992                 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
993
994 out:    kfree_skb(skb);
995         return 0;
996 }
997
998 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
999 {
1000         struct dst_entry *dst = &rt->dst;
1001         struct fib_result res;
1002
1003         if (dst_metric_locked(dst, RTAX_MTU))
1004                 return;
1005
1006         if (ipv4_mtu(dst) < mtu)
1007                 return;
1008
1009         if (mtu < ip_rt_min_pmtu)
1010                 mtu = ip_rt_min_pmtu;
1011
1012         if (rt->rt_pmtu == mtu &&
1013             time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
1014                 return;
1015
1016         rcu_read_lock();
1017         if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
1018                 struct fib_nh *nh = &FIB_RES_NH(res);
1019
1020                 update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
1021                                       jiffies + ip_rt_mtu_expires);
1022         }
1023         rcu_read_unlock();
1024 }
1025
1026 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1027                               struct sk_buff *skb, u32 mtu)
1028 {
1029         struct rtable *rt = (struct rtable *) dst;
1030         struct flowi4 fl4;
1031
1032         ip_rt_build_flow_key(&fl4, sk, skb);
1033         __ip_rt_update_pmtu(rt, &fl4, mtu);
1034 }
1035
1036 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1037                       int oif, u32 mark, u8 protocol, int flow_flags)
1038 {
1039         const struct iphdr *iph = (const struct iphdr *) skb->data;
1040         struct flowi4 fl4;
1041         struct rtable *rt;
1042
1043         if (!mark)
1044                 mark = IP4_REPLY_MARK(net, skb->mark);
1045
1046         __build_flow_key(net, &fl4, NULL, iph, oif,
1047                          RT_TOS(iph->tos), protocol, mark, flow_flags);
1048         rt = __ip_route_output_key(net, &fl4);
1049         if (!IS_ERR(rt)) {
1050                 __ip_rt_update_pmtu(rt, &fl4, mtu);
1051                 ip_rt_put(rt);
1052         }
1053 }
1054 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1055
1056 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1057 {
1058         const struct iphdr *iph = (const struct iphdr *) skb->data;
1059         struct flowi4 fl4;
1060         struct rtable *rt;
1061
1062         __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1063
1064         if (!fl4.flowi4_mark)
1065                 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1066
1067         rt = __ip_route_output_key(sock_net(sk), &fl4);
1068         if (!IS_ERR(rt)) {
1069                 __ip_rt_update_pmtu(rt, &fl4, mtu);
1070                 ip_rt_put(rt);
1071         }
1072 }
1073
1074 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1075 {
1076         const struct iphdr *iph = (const struct iphdr *) skb->data;
1077         struct flowi4 fl4;
1078         struct rtable *rt;
1079         struct dst_entry *odst = NULL;
1080         bool new = false;
1081         struct net *net = sock_net(sk);
1082
1083         bh_lock_sock(sk);
1084
1085         if (!ip_sk_accept_pmtu(sk))
1086                 goto out;
1087
1088         odst = sk_dst_get(sk);
1089
1090         if (sock_owned_by_user(sk) || !odst) {
1091                 __ipv4_sk_update_pmtu(skb, sk, mtu);
1092                 goto out;
1093         }
1094
1095         __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1096
1097         rt = (struct rtable *)odst;
1098         if (odst->obsolete && !odst->ops->check(odst, 0)) {
1099                 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1100                 if (IS_ERR(rt))
1101                         goto out;
1102
1103                 new = true;
1104         }
1105
1106         __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
1107
1108         if (!dst_check(&rt->dst, 0)) {
1109                 if (new)
1110                         dst_release(&rt->dst);
1111
1112                 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1113                 if (IS_ERR(rt))
1114                         goto out;
1115
1116                 new = true;
1117         }
1118
1119         if (new)
1120                 sk_dst_set(sk, &rt->dst);
1121
1122 out:
1123         bh_unlock_sock(sk);
1124         dst_release(odst);
1125 }
1126 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1127
1128 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1129                    int oif, u32 mark, u8 protocol, int flow_flags)
1130 {
1131         const struct iphdr *iph = (const struct iphdr *) skb->data;
1132         struct flowi4 fl4;
1133         struct rtable *rt;
1134
1135         __build_flow_key(net, &fl4, NULL, iph, oif,
1136                          RT_TOS(iph->tos), protocol, mark, flow_flags);
1137         rt = __ip_route_output_key(net, &fl4);
1138         if (!IS_ERR(rt)) {
1139                 __ip_do_redirect(rt, skb, &fl4, false);
1140                 ip_rt_put(rt);
1141         }
1142 }
1143 EXPORT_SYMBOL_GPL(ipv4_redirect);
1144
1145 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1146 {
1147         const struct iphdr *iph = (const struct iphdr *) skb->data;
1148         struct flowi4 fl4;
1149         struct rtable *rt;
1150         struct net *net = sock_net(sk);
1151
1152         __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1153         rt = __ip_route_output_key(net, &fl4);
1154         if (!IS_ERR(rt)) {
1155                 __ip_do_redirect(rt, skb, &fl4, false);
1156                 ip_rt_put(rt);
1157         }
1158 }
1159 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1160
1161 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1162 {
1163         struct rtable *rt = (struct rtable *) dst;
1164
1165         /* All IPV4 dsts are created with ->obsolete set to the value
1166          * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1167          * into this function always.
1168          *
1169          * When a PMTU/redirect information update invalidates a route,
1170          * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1171          * DST_OBSOLETE_DEAD by dst_free().
1172          */
1173         if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1174                 return NULL;
1175         return dst;
1176 }
1177
1178 static void ipv4_link_failure(struct sk_buff *skb)
1179 {
1180         struct rtable *rt;
1181
1182         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1183
1184         rt = skb_rtable(skb);
1185         if (rt)
1186                 dst_set_expires(&rt->dst, 0);
1187 }
1188
1189 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1190 {
1191         pr_debug("%s: %pI4 -> %pI4, %s\n",
1192                  __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1193                  skb->dev ? skb->dev->name : "?");
1194         kfree_skb(skb);
1195         WARN_ON(1);
1196         return 0;
1197 }
1198
1199 /*
1200    We do not cache source address of outgoing interface,
1201    because it is used only by IP RR, TS and SRR options,
1202    so that it out of fast path.
1203
1204    BTW remember: "addr" is allowed to be not aligned
1205    in IP options!
1206  */
1207
1208 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1209 {
1210         __be32 src;
1211
1212         if (rt_is_output_route(rt))
1213                 src = ip_hdr(skb)->saddr;
1214         else {
1215                 struct fib_result res;
1216                 struct flowi4 fl4;
1217                 struct iphdr *iph;
1218
1219                 iph = ip_hdr(skb);
1220
1221                 memset(&fl4, 0, sizeof(fl4));
1222                 fl4.daddr = iph->daddr;
1223                 fl4.saddr = iph->saddr;
1224                 fl4.flowi4_tos = RT_TOS(iph->tos);
1225                 fl4.flowi4_oif = rt->dst.dev->ifindex;
1226                 fl4.flowi4_iif = skb->dev->ifindex;
1227                 fl4.flowi4_mark = skb->mark;
1228
1229                 rcu_read_lock();
1230                 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1231                         src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1232                 else
1233                         src = inet_select_addr(rt->dst.dev,
1234                                                rt_nexthop(rt, iph->daddr),
1235                                                RT_SCOPE_UNIVERSE);
1236                 rcu_read_unlock();
1237         }
1238         memcpy(addr, &src, 4);
1239 }
1240
1241 #ifdef CONFIG_IP_ROUTE_CLASSID
1242 static void set_class_tag(struct rtable *rt, u32 tag)
1243 {
1244         if (!(rt->dst.tclassid & 0xFFFF))
1245                 rt->dst.tclassid |= tag & 0xFFFF;
1246         if (!(rt->dst.tclassid & 0xFFFF0000))
1247                 rt->dst.tclassid |= tag & 0xFFFF0000;
1248 }
1249 #endif
1250
1251 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1252 {
1253         unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1254         unsigned int advmss = max_t(unsigned int, dst->dev->mtu - header_size,
1255                                     ip_rt_min_advmss);
1256
1257         return min(advmss, IPV4_MAX_PMTU - header_size);
1258 }
1259
1260 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1261 {
1262         const struct rtable *rt = (const struct rtable *) dst;
1263         unsigned int mtu = rt->rt_pmtu;
1264
1265         if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1266                 mtu = dst_metric_raw(dst, RTAX_MTU);
1267
1268         if (mtu)
1269                 return mtu;
1270
1271         mtu = dst->dev->mtu;
1272
1273         if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1274                 if (rt->rt_uses_gateway && mtu > 576)
1275                         mtu = 576;
1276         }
1277
1278         mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
1279
1280         return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1281 }
1282
1283 static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1284 {
1285         struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
1286         struct fib_nh_exception *fnhe;
1287         u32 hval;
1288
1289         if (!hash)
1290                 return NULL;
1291
1292         hval = fnhe_hashfun(daddr);
1293
1294         for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1295              fnhe = rcu_dereference(fnhe->fnhe_next)) {
1296                 if (fnhe->fnhe_daddr == daddr)
1297                         return fnhe;
1298         }
1299         return NULL;
1300 }
1301
1302 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1303                               __be32 daddr)
1304 {
1305         bool ret = false;
1306
1307         spin_lock_bh(&fnhe_lock);
1308
1309         if (daddr == fnhe->fnhe_daddr) {
1310                 struct rtable __rcu **porig;
1311                 struct rtable *orig;
1312                 int genid = fnhe_genid(dev_net(rt->dst.dev));
1313
1314                 if (rt_is_input_route(rt))
1315                         porig = &fnhe->fnhe_rth_input;
1316                 else
1317                         porig = &fnhe->fnhe_rth_output;
1318                 orig = rcu_dereference(*porig);
1319
1320                 if (fnhe->fnhe_genid != genid) {
1321                         fnhe->fnhe_genid = genid;
1322                         fnhe->fnhe_gw = 0;
1323                         fnhe->fnhe_pmtu = 0;
1324                         fnhe->fnhe_expires = 0;
1325                         fnhe_flush_routes(fnhe);
1326                         orig = NULL;
1327                 }
1328                 fill_route_from_fnhe(rt, fnhe);
1329                 if (!rt->rt_gateway)
1330                         rt->rt_gateway = daddr;
1331
1332                 if (!(rt->dst.flags & DST_NOCACHE)) {
1333                         rcu_assign_pointer(*porig, rt);
1334                         if (orig)
1335                                 rt_free(orig);
1336                         ret = true;
1337                 }
1338
1339                 fnhe->fnhe_stamp = jiffies;
1340         }
1341         spin_unlock_bh(&fnhe_lock);
1342
1343         return ret;
1344 }
1345
1346 static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1347 {
1348         struct rtable *orig, *prev, **p;
1349         bool ret = true;
1350
1351         if (rt_is_input_route(rt)) {
1352                 p = (struct rtable **)&nh->nh_rth_input;
1353         } else {
1354                 p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output);
1355         }
1356         orig = *p;
1357
1358         prev = cmpxchg(p, orig, rt);
1359         if (prev == orig) {
1360                 if (orig)
1361                         rt_free(orig);
1362         } else
1363                 ret = false;
1364
1365         return ret;
1366 }
1367
1368 struct uncached_list {
1369         spinlock_t              lock;
1370         struct list_head        head;
1371 };
1372
1373 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1374
1375 static void rt_add_uncached_list(struct rtable *rt)
1376 {
1377         struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1378
1379         rt->rt_uncached_list = ul;
1380
1381         spin_lock_bh(&ul->lock);
1382         list_add_tail(&rt->rt_uncached, &ul->head);
1383         spin_unlock_bh(&ul->lock);
1384 }
1385
1386 static void ipv4_dst_destroy(struct dst_entry *dst)
1387 {
1388         struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
1389         struct rtable *rt = (struct rtable *) dst;
1390
1391         if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt))
1392                 kfree(p);
1393
1394         if (!list_empty(&rt->rt_uncached)) {
1395                 struct uncached_list *ul = rt->rt_uncached_list;
1396
1397                 spin_lock_bh(&ul->lock);
1398                 list_del(&rt->rt_uncached);
1399                 spin_unlock_bh(&ul->lock);
1400         }
1401 }
1402
1403 void rt_flush_dev(struct net_device *dev)
1404 {
1405         struct net *net = dev_net(dev);
1406         struct rtable *rt;
1407         int cpu;
1408
1409         for_each_possible_cpu(cpu) {
1410                 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1411
1412                 spin_lock_bh(&ul->lock);
1413                 list_for_each_entry(rt, &ul->head, rt_uncached) {
1414                         if (rt->dst.dev != dev)
1415                                 continue;
1416                         rt->dst.dev = net->loopback_dev;
1417                         dev_hold(rt->dst.dev);
1418                         dev_put(dev);
1419                 }
1420                 spin_unlock_bh(&ul->lock);
1421         }
1422 }
1423
1424 static bool rt_cache_valid(const struct rtable *rt)
1425 {
1426         return  rt &&
1427                 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1428                 !rt_is_expired(rt);
1429 }
1430
1431 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1432                            const struct fib_result *res,
1433                            struct fib_nh_exception *fnhe,
1434                            struct fib_info *fi, u16 type, u32 itag)
1435 {
1436         bool cached = false;
1437
1438         if (fi) {
1439                 struct fib_nh *nh = &FIB_RES_NH(*res);
1440
1441                 if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) {
1442                         rt->rt_gateway = nh->nh_gw;
1443                         rt->rt_uses_gateway = 1;
1444                 }
1445                 dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
1446                 if (fi->fib_metrics != &dst_default_metrics) {
1447                         rt->dst._metrics |= DST_METRICS_REFCOUNTED;
1448                         atomic_inc(&fi->fib_metrics->refcnt);
1449                 }
1450 #ifdef CONFIG_IP_ROUTE_CLASSID
1451                 rt->dst.tclassid = nh->nh_tclassid;
1452 #endif
1453                 rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
1454                 if (unlikely(fnhe))
1455                         cached = rt_bind_exception(rt, fnhe, daddr);
1456                 else if (!(rt->dst.flags & DST_NOCACHE))
1457                         cached = rt_cache_route(nh, rt);
1458                 if (unlikely(!cached)) {
1459                         /* Routes we intend to cache in nexthop exception or
1460                          * FIB nexthop have the DST_NOCACHE bit clear.
1461                          * However, if we are unsuccessful at storing this
1462                          * route into the cache we really need to set it.
1463                          */
1464                         rt->dst.flags |= DST_NOCACHE;
1465                         if (!rt->rt_gateway)
1466                                 rt->rt_gateway = daddr;
1467                         rt_add_uncached_list(rt);
1468                 }
1469         } else
1470                 rt_add_uncached_list(rt);
1471
1472 #ifdef CONFIG_IP_ROUTE_CLASSID
1473 #ifdef CONFIG_IP_MULTIPLE_TABLES
1474         set_class_tag(rt, res->tclassid);
1475 #endif
1476         set_class_tag(rt, itag);
1477 #endif
1478 }
1479
1480 struct rtable *rt_dst_alloc(struct net_device *dev,
1481                             unsigned int flags, u16 type,
1482                             bool nopolicy, bool noxfrm, bool will_cache)
1483 {
1484         struct rtable *rt;
1485
1486         rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1487                        (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
1488                        (nopolicy ? DST_NOPOLICY : 0) |
1489                        (noxfrm ? DST_NOXFRM : 0));
1490
1491         if (rt) {
1492                 rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1493                 rt->rt_flags = flags;
1494                 rt->rt_type = type;
1495                 rt->rt_is_input = 0;
1496                 rt->rt_iif = 0;
1497                 rt->rt_pmtu = 0;
1498                 rt->rt_gateway = 0;
1499                 rt->rt_uses_gateway = 0;
1500                 rt->rt_table_id = 0;
1501                 INIT_LIST_HEAD(&rt->rt_uncached);
1502
1503                 rt->dst.output = ip_output;
1504                 if (flags & RTCF_LOCAL)
1505                         rt->dst.input = ip_local_deliver;
1506         }
1507
1508         return rt;
1509 }
1510 EXPORT_SYMBOL(rt_dst_alloc);
1511
1512 /* called in rcu_read_lock() section */
1513 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1514                                 u8 tos, struct net_device *dev, int our)
1515 {
1516         struct rtable *rth;
1517         struct in_device *in_dev = __in_dev_get_rcu(dev);
1518         unsigned int flags = RTCF_MULTICAST;
1519         u32 itag = 0;
1520         int err;
1521
1522         /* Primary sanity checks. */
1523
1524         if (!in_dev)
1525                 return -EINVAL;
1526
1527         if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1528             skb->protocol != htons(ETH_P_IP))
1529                 goto e_inval;
1530
1531         if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1532                 goto e_inval;
1533
1534         if (ipv4_is_zeronet(saddr)) {
1535                 if (!ipv4_is_local_multicast(daddr))
1536                         goto e_inval;
1537         } else {
1538                 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1539                                           in_dev, &itag);
1540                 if (err < 0)
1541                         goto e_err;
1542         }
1543         if (our)
1544                 flags |= RTCF_LOCAL;
1545
1546         rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1547                            IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1548         if (!rth)
1549                 goto e_nobufs;
1550
1551 #ifdef CONFIG_IP_ROUTE_CLASSID
1552         rth->dst.tclassid = itag;
1553 #endif
1554         rth->dst.output = ip_rt_bug;
1555         rth->rt_is_input= 1;
1556
1557 #ifdef CONFIG_IP_MROUTE
1558         if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1559                 rth->dst.input = ip_mr_input;
1560 #endif
1561         RT_CACHE_STAT_INC(in_slow_mc);
1562
1563         skb_dst_set(skb, &rth->dst);
1564         return 0;
1565
1566 e_nobufs:
1567         return -ENOBUFS;
1568 e_inval:
1569         return -EINVAL;
1570 e_err:
1571         return err;
1572 }
1573
1574
1575 static void ip_handle_martian_source(struct net_device *dev,
1576                                      struct in_device *in_dev,
1577                                      struct sk_buff *skb,
1578                                      __be32 daddr,
1579                                      __be32 saddr)
1580 {
1581         RT_CACHE_STAT_INC(in_martian_src);
1582 #ifdef CONFIG_IP_ROUTE_VERBOSE
1583         if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1584                 /*
1585                  *      RFC1812 recommendation, if source is martian,
1586                  *      the only hint is MAC header.
1587                  */
1588                 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1589                         &daddr, &saddr, dev->name);
1590                 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1591                         print_hex_dump(KERN_WARNING, "ll header: ",
1592                                        DUMP_PREFIX_OFFSET, 16, 1,
1593                                        skb_mac_header(skb),
1594                                        dev->hard_header_len, true);
1595                 }
1596         }
1597 #endif
1598 }
1599
1600 static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
1601 {
1602         struct fnhe_hash_bucket *hash;
1603         struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1604         u32 hval = fnhe_hashfun(daddr);
1605
1606         spin_lock_bh(&fnhe_lock);
1607
1608         hash = rcu_dereference_protected(nh->nh_exceptions,
1609                                          lockdep_is_held(&fnhe_lock));
1610         hash += hval;
1611
1612         fnhe_p = &hash->chain;
1613         fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1614         while (fnhe) {
1615                 if (fnhe->fnhe_daddr == daddr) {
1616                         rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1617                                 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1618                         fnhe_flush_routes(fnhe);
1619                         kfree_rcu(fnhe, rcu);
1620                         break;
1621                 }
1622                 fnhe_p = &fnhe->fnhe_next;
1623                 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1624                                                  lockdep_is_held(&fnhe_lock));
1625         }
1626
1627         spin_unlock_bh(&fnhe_lock);
1628 }
1629
1630 static void set_lwt_redirect(struct rtable *rth)
1631 {
1632         if (lwtunnel_output_redirect(rth->dst.lwtstate)) {
1633                 rth->dst.lwtstate->orig_output = rth->dst.output;
1634                 rth->dst.output = lwtunnel_output;
1635         }
1636
1637         if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
1638                 rth->dst.lwtstate->orig_input = rth->dst.input;
1639                 rth->dst.input = lwtunnel_input;
1640         }
1641 }
1642
1643 /* called in rcu_read_lock() section */
1644 static int __mkroute_input(struct sk_buff *skb,
1645                            const struct fib_result *res,
1646                            struct in_device *in_dev,
1647                            __be32 daddr, __be32 saddr, u32 tos)
1648 {
1649         struct fib_nh_exception *fnhe;
1650         struct rtable *rth;
1651         int err;
1652         struct in_device *out_dev;
1653         bool do_cache;
1654         u32 itag = 0;
1655
1656         /* get a working reference to the output device */
1657         out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1658         if (!out_dev) {
1659                 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1660                 return -EINVAL;
1661         }
1662
1663         err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1664                                   in_dev->dev, in_dev, &itag);
1665         if (err < 0) {
1666                 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1667                                          saddr);
1668
1669                 goto cleanup;
1670         }
1671
1672         do_cache = res->fi && !itag;
1673         if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1674             skb->protocol == htons(ETH_P_IP) &&
1675             (IN_DEV_SHARED_MEDIA(out_dev) ||
1676              inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1677                 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1678
1679         if (skb->protocol != htons(ETH_P_IP)) {
1680                 /* Not IP (i.e. ARP). Do not create route, if it is
1681                  * invalid for proxy arp. DNAT routes are always valid.
1682                  *
1683                  * Proxy arp feature have been extended to allow, ARP
1684                  * replies back to the same interface, to support
1685                  * Private VLAN switch technologies. See arp.c.
1686                  */
1687                 if (out_dev == in_dev &&
1688                     IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1689                         err = -EINVAL;
1690                         goto cleanup;
1691                 }
1692         }
1693
1694         fnhe = find_exception(&FIB_RES_NH(*res), daddr);
1695         if (do_cache) {
1696                 if (fnhe) {
1697                         rth = rcu_dereference(fnhe->fnhe_rth_input);
1698                         if (rth && rth->dst.expires &&
1699                             time_after(jiffies, rth->dst.expires)) {
1700                                 ip_del_fnhe(&FIB_RES_NH(*res), daddr);
1701                                 fnhe = NULL;
1702                         } else {
1703                                 goto rt_cache;
1704                         }
1705                 }
1706
1707                 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1708
1709 rt_cache:
1710                 if (rt_cache_valid(rth)) {
1711                         skb_dst_set_noref(skb, &rth->dst);
1712                         goto out;
1713                 }
1714         }
1715
1716         rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1717                            IN_DEV_CONF_GET(in_dev, NOPOLICY),
1718                            IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1719         if (!rth) {
1720                 err = -ENOBUFS;
1721                 goto cleanup;
1722         }
1723
1724         rth->rt_is_input = 1;
1725         if (res->table)
1726                 rth->rt_table_id = res->table->tb_id;
1727         RT_CACHE_STAT_INC(in_slow_tot);
1728
1729         rth->dst.input = ip_forward;
1730
1731         rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
1732         set_lwt_redirect(rth);
1733         skb_dst_set(skb, &rth->dst);
1734 out:
1735         err = 0;
1736  cleanup:
1737         return err;
1738 }
1739
1740 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1741 /* To make ICMP packets follow the right flow, the multipath hash is
1742  * calculated from the inner IP addresses.
1743  */
1744 static void ip_multipath_l3_keys(const struct sk_buff *skb,
1745                                  struct flow_keys *hash_keys)
1746 {
1747         const struct iphdr *outer_iph = ip_hdr(skb);
1748         const struct iphdr *inner_iph;
1749         const struct icmphdr *icmph;
1750         struct iphdr _inner_iph;
1751         struct icmphdr _icmph;
1752
1753         hash_keys->addrs.v4addrs.src = outer_iph->saddr;
1754         hash_keys->addrs.v4addrs.dst = outer_iph->daddr;
1755         if (likely(outer_iph->protocol != IPPROTO_ICMP))
1756                 return;
1757
1758         if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1759                 return;
1760
1761         icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1762                                    &_icmph);
1763         if (!icmph)
1764                 return;
1765
1766         if (icmph->type != ICMP_DEST_UNREACH &&
1767             icmph->type != ICMP_REDIRECT &&
1768             icmph->type != ICMP_TIME_EXCEEDED &&
1769             icmph->type != ICMP_PARAMETERPROB)
1770                 return;
1771
1772         inner_iph = skb_header_pointer(skb,
1773                                        outer_iph->ihl * 4 + sizeof(_icmph),
1774                                        sizeof(_inner_iph), &_inner_iph);
1775         if (!inner_iph)
1776                 return;
1777         hash_keys->addrs.v4addrs.src = inner_iph->saddr;
1778         hash_keys->addrs.v4addrs.dst = inner_iph->daddr;
1779 }
1780
1781 /* if skb is set it will be used and fl4 can be NULL */
1782 int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
1783                        const struct sk_buff *skb)
1784 {
1785         struct net *net = fi->fib_net;
1786         struct flow_keys hash_keys;
1787         u32 mhash;
1788
1789         switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
1790         case 0:
1791                 memset(&hash_keys, 0, sizeof(hash_keys));
1792                 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1793                 if (skb) {
1794                         ip_multipath_l3_keys(skb, &hash_keys);
1795                 } else {
1796                         hash_keys.addrs.v4addrs.src = fl4->saddr;
1797                         hash_keys.addrs.v4addrs.dst = fl4->daddr;
1798                 }
1799                 break;
1800         case 1:
1801                 /* skb is currently provided only when forwarding */
1802                 if (skb) {
1803                         unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
1804                         struct flow_keys keys;
1805
1806                         /* short-circuit if we already have L4 hash present */
1807                         if (skb->l4_hash)
1808                                 return skb_get_hash_raw(skb) >> 1;
1809                         memset(&hash_keys, 0, sizeof(hash_keys));
1810                         skb_flow_dissect_flow_keys(skb, &keys, flag);
1811                         hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1812                         hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1813                         hash_keys.ports.src = keys.ports.src;
1814                         hash_keys.ports.dst = keys.ports.dst;
1815                         hash_keys.basic.ip_proto = keys.basic.ip_proto;
1816                 } else {
1817                         memset(&hash_keys, 0, sizeof(hash_keys));
1818                         hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1819                         hash_keys.addrs.v4addrs.src = fl4->saddr;
1820                         hash_keys.addrs.v4addrs.dst = fl4->daddr;
1821                         hash_keys.ports.src = fl4->fl4_sport;
1822                         hash_keys.ports.dst = fl4->fl4_dport;
1823                         hash_keys.basic.ip_proto = fl4->flowi4_proto;
1824                 }
1825                 break;
1826         }
1827         mhash = flow_hash_from_keys(&hash_keys);
1828
1829         return mhash >> 1;
1830 }
1831 EXPORT_SYMBOL_GPL(fib_multipath_hash);
1832 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
1833
1834 static int ip_mkroute_input(struct sk_buff *skb,
1835                             struct fib_result *res,
1836                             struct in_device *in_dev,
1837                             __be32 daddr, __be32 saddr, u32 tos)
1838 {
1839 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1840         if (res->fi && res->fi->fib_nhs > 1) {
1841                 int h = fib_multipath_hash(res->fi, NULL, skb);
1842
1843                 fib_select_multipath(res, h);
1844         }
1845 #endif
1846
1847         /* create a routing cache entry */
1848         return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
1849 }
1850
1851 /*
1852  *      NOTE. We drop all the packets that has local source
1853  *      addresses, because every properly looped back packet
1854  *      must have correct destination already attached by output routine.
1855  *
1856  *      Such approach solves two big problems:
1857  *      1. Not simplex devices are handled properly.
1858  *      2. IP spoofing attempts are filtered with 100% of guarantee.
1859  *      called with rcu_read_lock()
1860  */
1861
1862 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1863                                u8 tos, struct net_device *dev)
1864 {
1865         struct fib_result res;
1866         struct in_device *in_dev = __in_dev_get_rcu(dev);
1867         struct ip_tunnel_info *tun_info;
1868         struct flowi4   fl4;
1869         unsigned int    flags = 0;
1870         u32             itag = 0;
1871         struct rtable   *rth;
1872         int             err = -EINVAL;
1873         struct net    *net = dev_net(dev);
1874         bool do_cache;
1875
1876         /* IP on this device is disabled. */
1877
1878         if (!in_dev)
1879                 goto out;
1880
1881         /* Check for the most weird martians, which can be not detected
1882            by fib_lookup.
1883          */
1884
1885         tun_info = skb_tunnel_info(skb);
1886         if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1887                 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
1888         else
1889                 fl4.flowi4_tun_key.tun_id = 0;
1890         skb_dst_drop(skb);
1891
1892         if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
1893                 goto martian_source;
1894
1895         res.fi = NULL;
1896         res.table = NULL;
1897         if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1898                 goto brd_input;
1899
1900         /* Accept zero addresses only to limited broadcast;
1901          * I even do not know to fix it or not. Waiting for complains :-)
1902          */
1903         if (ipv4_is_zeronet(saddr))
1904                 goto martian_source;
1905
1906         if (ipv4_is_zeronet(daddr))
1907                 goto martian_destination;
1908
1909         /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
1910          * and call it once if daddr or/and saddr are loopback addresses
1911          */
1912         if (ipv4_is_loopback(daddr)) {
1913                 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1914                         goto martian_destination;
1915         } else if (ipv4_is_loopback(saddr)) {
1916                 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1917                         goto martian_source;
1918         }
1919
1920         /*
1921          *      Now we are ready to route packet.
1922          */
1923         fl4.flowi4_oif = 0;
1924         fl4.flowi4_iif = dev->ifindex;
1925         fl4.flowi4_mark = skb->mark;
1926         fl4.flowi4_tos = tos;
1927         fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
1928         fl4.flowi4_flags = 0;
1929         fl4.daddr = daddr;
1930         fl4.saddr = saddr;
1931         fl4.flowi4_uid = sock_net_uid(net, NULL);
1932         err = fib_lookup(net, &fl4, &res, 0);
1933         if (err != 0) {
1934                 if (!IN_DEV_FORWARD(in_dev))
1935                         err = -EHOSTUNREACH;
1936                 goto no_route;
1937         }
1938
1939         if (res.type == RTN_BROADCAST)
1940                 goto brd_input;
1941
1942         if (res.type == RTN_LOCAL) {
1943                 err = fib_validate_source(skb, saddr, daddr, tos,
1944                                           0, dev, in_dev, &itag);
1945                 if (err < 0)
1946                         goto martian_source;
1947                 goto local_input;
1948         }
1949
1950         if (!IN_DEV_FORWARD(in_dev)) {
1951                 err = -EHOSTUNREACH;
1952                 goto no_route;
1953         }
1954         if (res.type != RTN_UNICAST)
1955                 goto martian_destination;
1956
1957         err = ip_mkroute_input(skb, &res, in_dev, daddr, saddr, tos);
1958 out:    return err;
1959
1960 brd_input:
1961         if (skb->protocol != htons(ETH_P_IP))
1962                 goto e_inval;
1963
1964         if (!ipv4_is_zeronet(saddr)) {
1965                 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1966                                           in_dev, &itag);
1967                 if (err < 0)
1968                         goto martian_source;
1969         }
1970         flags |= RTCF_BROADCAST;
1971         res.type = RTN_BROADCAST;
1972         RT_CACHE_STAT_INC(in_brd);
1973
1974 local_input:
1975         do_cache = false;
1976         if (res.fi) {
1977                 if (!itag) {
1978                         rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input);
1979                         if (rt_cache_valid(rth)) {
1980                                 skb_dst_set_noref(skb, &rth->dst);
1981                                 err = 0;
1982                                 goto out;
1983                         }
1984                         do_cache = true;
1985                 }
1986         }
1987
1988         rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
1989                            flags | RTCF_LOCAL, res.type,
1990                            IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
1991         if (!rth)
1992                 goto e_nobufs;
1993
1994         rth->dst.output= ip_rt_bug;
1995 #ifdef CONFIG_IP_ROUTE_CLASSID
1996         rth->dst.tclassid = itag;
1997 #endif
1998         rth->rt_is_input = 1;
1999         if (res.table)
2000                 rth->rt_table_id = res.table->tb_id;
2001
2002         RT_CACHE_STAT_INC(in_slow_tot);
2003         if (res.type == RTN_UNREACHABLE) {
2004                 rth->dst.input= ip_error;
2005                 rth->dst.error= -err;
2006                 rth->rt_flags   &= ~RTCF_LOCAL;
2007         }
2008
2009         if (do_cache) {
2010                 struct fib_nh *nh = &FIB_RES_NH(res);
2011
2012                 rth->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
2013                 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2014                         WARN_ON(rth->dst.input == lwtunnel_input);
2015                         rth->dst.lwtstate->orig_input = rth->dst.input;
2016                         rth->dst.input = lwtunnel_input;
2017                 }
2018
2019                 if (unlikely(!rt_cache_route(nh, rth))) {
2020                         rth->dst.flags |= DST_NOCACHE;
2021                         rt_add_uncached_list(rth);
2022                 }
2023         }
2024         skb_dst_set(skb, &rth->dst);
2025         err = 0;
2026         goto out;
2027
2028 no_route:
2029         RT_CACHE_STAT_INC(in_no_route);
2030         res.type = RTN_UNREACHABLE;
2031         res.fi = NULL;
2032         res.table = NULL;
2033         goto local_input;
2034
2035         /*
2036          *      Do not cache martian addresses: they should be logged (RFC1812)
2037          */
2038 martian_destination:
2039         RT_CACHE_STAT_INC(in_martian_dst);
2040 #ifdef CONFIG_IP_ROUTE_VERBOSE
2041         if (IN_DEV_LOG_MARTIANS(in_dev))
2042                 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2043                                      &daddr, &saddr, dev->name);
2044 #endif
2045
2046 e_inval:
2047         err = -EINVAL;
2048         goto out;
2049
2050 e_nobufs:
2051         err = -ENOBUFS;
2052         goto out;
2053
2054 martian_source:
2055         ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2056         goto out;
2057 }
2058
2059 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2060                          u8 tos, struct net_device *dev)
2061 {
2062         int res;
2063
2064         tos &= IPTOS_RT_MASK;
2065         rcu_read_lock();
2066
2067         /* Multicast recognition logic is moved from route cache to here.
2068            The problem was that too many Ethernet cards have broken/missing
2069            hardware multicast filters :-( As result the host on multicasting
2070            network acquires a lot of useless route cache entries, sort of
2071            SDR messages from all the world. Now we try to get rid of them.
2072            Really, provided software IP multicast filter is organized
2073            reasonably (at least, hashed), it does not result in a slowdown
2074            comparing with route cache reject entries.
2075            Note, that multicast routers are not affected, because
2076            route cache entry is created eventually.
2077          */
2078         if (ipv4_is_multicast(daddr)) {
2079                 struct in_device *in_dev = __in_dev_get_rcu(dev);
2080                 int our = 0;
2081
2082                 if (in_dev)
2083                         our = ip_check_mc_rcu(in_dev, daddr, saddr,
2084                                               ip_hdr(skb)->protocol);
2085
2086                 /* check l3 master if no match yet */
2087                 if ((!in_dev || !our) && netif_is_l3_slave(dev)) {
2088                         struct in_device *l3_in_dev;
2089
2090                         l3_in_dev = __in_dev_get_rcu(skb->dev);
2091                         if (l3_in_dev)
2092                                 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2093                                                       ip_hdr(skb)->protocol);
2094                 }
2095
2096                 res = -EINVAL;
2097                 if (our
2098 #ifdef CONFIG_IP_MROUTE
2099                         ||
2100                     (!ipv4_is_local_multicast(daddr) &&
2101                      IN_DEV_MFORWARD(in_dev))
2102 #endif
2103                    ) {
2104                         res = ip_route_input_mc(skb, daddr, saddr,
2105                                                 tos, dev, our);
2106                 }
2107                 rcu_read_unlock();
2108                 return res;
2109         }
2110         res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
2111         rcu_read_unlock();
2112         return res;
2113 }
2114 EXPORT_SYMBOL(ip_route_input_noref);
2115
2116 /* called with rcu_read_lock() */
2117 static struct rtable *__mkroute_output(const struct fib_result *res,
2118                                        const struct flowi4 *fl4, int orig_oif,
2119                                        struct net_device *dev_out,
2120                                        unsigned int flags)
2121 {
2122         struct fib_info *fi = res->fi;
2123         struct fib_nh_exception *fnhe;
2124         struct in_device *in_dev;
2125         u16 type = res->type;
2126         struct rtable *rth;
2127         bool do_cache;
2128
2129         in_dev = __in_dev_get_rcu(dev_out);
2130         if (!in_dev)
2131                 return ERR_PTR(-EINVAL);
2132
2133         if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2134                 if (ipv4_is_loopback(fl4->saddr) &&
2135                     !(dev_out->flags & IFF_LOOPBACK) &&
2136                     !netif_is_l3_master(dev_out))
2137                         return ERR_PTR(-EINVAL);
2138
2139         if (ipv4_is_lbcast(fl4->daddr))
2140                 type = RTN_BROADCAST;
2141         else if (ipv4_is_multicast(fl4->daddr))
2142                 type = RTN_MULTICAST;
2143         else if (ipv4_is_zeronet(fl4->daddr))
2144                 return ERR_PTR(-EINVAL);
2145
2146         if (dev_out->flags & IFF_LOOPBACK)
2147                 flags |= RTCF_LOCAL;
2148
2149         do_cache = true;
2150         if (type == RTN_BROADCAST) {
2151                 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2152                 fi = NULL;
2153         } else if (type == RTN_MULTICAST) {
2154                 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2155                 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2156                                      fl4->flowi4_proto))
2157                         flags &= ~RTCF_LOCAL;
2158                 else
2159                         do_cache = false;
2160                 /* If multicast route do not exist use
2161                  * default one, but do not gateway in this case.
2162                  * Yes, it is hack.
2163                  */
2164                 if (fi && res->prefixlen < 4)
2165                         fi = NULL;
2166         } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2167                    (orig_oif != dev_out->ifindex)) {
2168                 /* For local routes that require a particular output interface
2169                  * we do not want to cache the result.  Caching the result
2170                  * causes incorrect behaviour when there are multiple source
2171                  * addresses on the interface, the end result being that if the
2172                  * intended recipient is waiting on that interface for the
2173                  * packet he won't receive it because it will be delivered on
2174                  * the loopback interface and the IP_PKTINFO ipi_ifindex will
2175                  * be set to the loopback interface as well.
2176                  */
2177                 fi = NULL;
2178         }
2179
2180         fnhe = NULL;
2181         do_cache &= fi != NULL;
2182         if (do_cache) {
2183                 struct rtable __rcu **prth;
2184                 struct fib_nh *nh = &FIB_RES_NH(*res);
2185
2186                 fnhe = find_exception(nh, fl4->daddr);
2187                 if (fnhe) {
2188                         prth = &fnhe->fnhe_rth_output;
2189                         rth = rcu_dereference(*prth);
2190                         if (rth && rth->dst.expires &&
2191                             time_after(jiffies, rth->dst.expires)) {
2192                                 ip_del_fnhe(nh, fl4->daddr);
2193                                 fnhe = NULL;
2194                         } else {
2195                                 goto rt_cache;
2196                         }
2197                 }
2198
2199                 if (unlikely(fl4->flowi4_flags &
2200                              FLOWI_FLAG_KNOWN_NH &&
2201                              !(nh->nh_gw &&
2202                                nh->nh_scope == RT_SCOPE_LINK))) {
2203                         do_cache = false;
2204                         goto add;
2205                 }
2206                 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
2207                 rth = rcu_dereference(*prth);
2208
2209 rt_cache:
2210                 if (rt_cache_valid(rth)) {
2211                         dst_hold(&rth->dst);
2212                         return rth;
2213                 }
2214         }
2215
2216 add:
2217         rth = rt_dst_alloc(dev_out, flags, type,
2218                            IN_DEV_CONF_GET(in_dev, NOPOLICY),
2219                            IN_DEV_CONF_GET(in_dev, NOXFRM),
2220                            do_cache);
2221         if (!rth)
2222                 return ERR_PTR(-ENOBUFS);
2223
2224         rth->rt_iif     = orig_oif ? : 0;
2225         if (res->table)
2226                 rth->rt_table_id = res->table->tb_id;
2227
2228         RT_CACHE_STAT_INC(out_slow_tot);
2229
2230         if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2231                 if (flags & RTCF_LOCAL &&
2232                     !(dev_out->flags & IFF_LOOPBACK)) {
2233                         rth->dst.output = ip_mc_output;
2234                         RT_CACHE_STAT_INC(out_slow_mc);
2235                 }
2236 #ifdef CONFIG_IP_MROUTE
2237                 if (type == RTN_MULTICAST) {
2238                         if (IN_DEV_MFORWARD(in_dev) &&
2239                             !ipv4_is_local_multicast(fl4->daddr)) {
2240                                 rth->dst.input = ip_mr_input;
2241                                 rth->dst.output = ip_mc_output;
2242                         }
2243                 }
2244 #endif
2245         }
2246
2247         rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
2248         set_lwt_redirect(rth);
2249
2250         return rth;
2251 }
2252
2253 /*
2254  * Major route resolver routine.
2255  */
2256
2257 struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2258                                           const struct sk_buff *skb)
2259 {
2260         struct net_device *dev_out = NULL;
2261         __u8 tos = RT_FL_TOS(fl4);
2262         unsigned int flags = 0;
2263         struct fib_result res;
2264         struct rtable *rth;
2265         int orig_oif;
2266         int err = -ENETUNREACH;
2267
2268         res.tclassid    = 0;
2269         res.fi          = NULL;
2270         res.table       = NULL;
2271
2272         orig_oif = fl4->flowi4_oif;
2273
2274         fl4->flowi4_iif = LOOPBACK_IFINDEX;
2275         fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2276         fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2277                          RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2278
2279         rcu_read_lock();
2280         if (fl4->saddr) {
2281                 rth = ERR_PTR(-EINVAL);
2282                 if (ipv4_is_multicast(fl4->saddr) ||
2283                     ipv4_is_lbcast(fl4->saddr) ||
2284                     ipv4_is_zeronet(fl4->saddr))
2285                         goto out;
2286
2287                 /* I removed check for oif == dev_out->oif here.
2288                    It was wrong for two reasons:
2289                    1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2290                       is assigned to multiple interfaces.
2291                    2. Moreover, we are allowed to send packets with saddr
2292                       of another iface. --ANK
2293                  */
2294
2295                 if (fl4->flowi4_oif == 0 &&
2296                     (ipv4_is_multicast(fl4->daddr) ||
2297                      ipv4_is_lbcast(fl4->daddr))) {
2298                         /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2299                         dev_out = __ip_dev_find(net, fl4->saddr, false);
2300                         if (!dev_out)
2301                                 goto out;
2302
2303                         /* Special hack: user can direct multicasts
2304                            and limited broadcast via necessary interface
2305                            without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2306                            This hack is not just for fun, it allows
2307                            vic,vat and friends to work.
2308                            They bind socket to loopback, set ttl to zero
2309                            and expect that it will work.
2310                            From the viewpoint of routing cache they are broken,
2311                            because we are not allowed to build multicast path
2312                            with loopback source addr (look, routing cache
2313                            cannot know, that ttl is zero, so that packet
2314                            will not leave this host and route is valid).
2315                            Luckily, this hack is good workaround.
2316                          */
2317
2318                         fl4->flowi4_oif = dev_out->ifindex;
2319                         goto make_route;
2320                 }
2321
2322                 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2323                         /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2324                         if (!__ip_dev_find(net, fl4->saddr, false))
2325                                 goto out;
2326                 }
2327         }
2328
2329
2330         if (fl4->flowi4_oif) {
2331                 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2332                 rth = ERR_PTR(-ENODEV);
2333                 if (!dev_out)
2334                         goto out;
2335
2336                 /* RACE: Check return value of inet_select_addr instead. */
2337                 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2338                         rth = ERR_PTR(-ENETUNREACH);
2339                         goto out;
2340                 }
2341                 if (ipv4_is_local_multicast(fl4->daddr) ||
2342                     ipv4_is_lbcast(fl4->daddr) ||
2343                     fl4->flowi4_proto == IPPROTO_IGMP) {
2344                         if (!fl4->saddr)
2345                                 fl4->saddr = inet_select_addr(dev_out, 0,
2346                                                               RT_SCOPE_LINK);
2347                         goto make_route;
2348                 }
2349                 if (!fl4->saddr) {
2350                         if (ipv4_is_multicast(fl4->daddr))
2351                                 fl4->saddr = inet_select_addr(dev_out, 0,
2352                                                               fl4->flowi4_scope);
2353                         else if (!fl4->daddr)
2354                                 fl4->saddr = inet_select_addr(dev_out, 0,
2355                                                               RT_SCOPE_HOST);
2356                 }
2357         }
2358
2359         if (!fl4->daddr) {
2360                 fl4->daddr = fl4->saddr;
2361                 if (!fl4->daddr)
2362                         fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2363                 dev_out = net->loopback_dev;
2364                 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2365                 res.type = RTN_LOCAL;
2366                 flags |= RTCF_LOCAL;
2367                 goto make_route;
2368         }
2369
2370         err = fib_lookup(net, fl4, &res, 0);
2371         if (err) {
2372                 res.fi = NULL;
2373                 res.table = NULL;
2374                 if (fl4->flowi4_oif &&
2375                     (ipv4_is_multicast(fl4->daddr) ||
2376                     !netif_index_is_l3_master(net, fl4->flowi4_oif))) {
2377                         /* Apparently, routing tables are wrong. Assume,
2378                            that the destination is on link.
2379
2380                            WHY? DW.
2381                            Because we are allowed to send to iface
2382                            even if it has NO routes and NO assigned
2383                            addresses. When oif is specified, routing
2384                            tables are looked up with only one purpose:
2385                            to catch if destination is gatewayed, rather than
2386                            direct. Moreover, if MSG_DONTROUTE is set,
2387                            we send packet, ignoring both routing tables
2388                            and ifaddr state. --ANK
2389
2390
2391                            We could make it even if oif is unknown,
2392                            likely IPv6, but we do not.
2393                          */
2394
2395                         if (fl4->saddr == 0)
2396                                 fl4->saddr = inet_select_addr(dev_out, 0,
2397                                                               RT_SCOPE_LINK);
2398                         res.type = RTN_UNICAST;
2399                         goto make_route;
2400                 }
2401                 rth = ERR_PTR(err);
2402                 goto out;
2403         }
2404
2405         if (res.type == RTN_LOCAL) {
2406                 if (!fl4->saddr) {
2407                         if (res.fi->fib_prefsrc)
2408                                 fl4->saddr = res.fi->fib_prefsrc;
2409                         else
2410                                 fl4->saddr = fl4->daddr;
2411                 }
2412
2413                 /* L3 master device is the loopback for that domain */
2414                 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(res)) ? :
2415                         net->loopback_dev;
2416                 fl4->flowi4_oif = dev_out->ifindex;
2417                 flags |= RTCF_LOCAL;
2418                 goto make_route;
2419         }
2420
2421         fib_select_path(net, &res, fl4, skb);
2422
2423         dev_out = FIB_RES_DEV(res);
2424         fl4->flowi4_oif = dev_out->ifindex;
2425
2426
2427 make_route:
2428         rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags);
2429
2430 out:
2431         rcu_read_unlock();
2432         return rth;
2433 }
2434 EXPORT_SYMBOL_GPL(__ip_route_output_key_hash);
2435
2436 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2437 {
2438         return NULL;
2439 }
2440
2441 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2442 {
2443         unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2444
2445         return mtu ? : dst->dev->mtu;
2446 }
2447
2448 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2449                                           struct sk_buff *skb, u32 mtu)
2450 {
2451 }
2452
2453 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2454                                        struct sk_buff *skb)
2455 {
2456 }
2457
2458 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2459                                           unsigned long old)
2460 {
2461         return NULL;
2462 }
2463
2464 static struct dst_ops ipv4_dst_blackhole_ops = {
2465         .family                 =       AF_INET,
2466         .check                  =       ipv4_blackhole_dst_check,
2467         .mtu                    =       ipv4_blackhole_mtu,
2468         .default_advmss         =       ipv4_default_advmss,
2469         .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
2470         .redirect               =       ipv4_rt_blackhole_redirect,
2471         .cow_metrics            =       ipv4_rt_blackhole_cow_metrics,
2472         .neigh_lookup           =       ipv4_neigh_lookup,
2473 };
2474
2475 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2476 {
2477         struct rtable *ort = (struct rtable *) dst_orig;
2478         struct rtable *rt;
2479
2480         rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2481         if (rt) {
2482                 struct dst_entry *new = &rt->dst;
2483
2484                 new->__use = 1;
2485                 new->input = dst_discard;
2486                 new->output = dst_discard_out;
2487
2488                 new->dev = ort->dst.dev;
2489                 if (new->dev)
2490                         dev_hold(new->dev);
2491
2492                 rt->rt_is_input = ort->rt_is_input;
2493                 rt->rt_iif = ort->rt_iif;
2494                 rt->rt_pmtu = ort->rt_pmtu;
2495
2496                 rt->rt_genid = rt_genid_ipv4(net);
2497                 rt->rt_flags = ort->rt_flags;
2498                 rt->rt_type = ort->rt_type;
2499                 rt->rt_gateway = ort->rt_gateway;
2500                 rt->rt_uses_gateway = ort->rt_uses_gateway;
2501
2502                 INIT_LIST_HEAD(&rt->rt_uncached);
2503                 dst_free(new);
2504         }
2505
2506         dst_release(dst_orig);
2507
2508         return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2509 }
2510
2511 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2512                                     const struct sock *sk)
2513 {
2514         struct rtable *rt = __ip_route_output_key(net, flp4);
2515
2516         if (IS_ERR(rt))
2517                 return rt;
2518
2519         if (flp4->flowi4_proto)
2520                 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2521                                                         flowi4_to_flowi(flp4),
2522                                                         sk, 0);
2523
2524         return rt;
2525 }
2526 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2527
2528 static int rt_fill_info(struct net *net,  __be32 dst, __be32 src, u32 table_id,
2529                         struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
2530                         u32 seq, int event)
2531 {
2532         struct rtable *rt = skb_rtable(skb);
2533         struct rtmsg *r;
2534         struct nlmsghdr *nlh;
2535         unsigned long expires = 0;
2536         u32 error;
2537         u32 metrics[RTAX_MAX];
2538
2539         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), 0);
2540         if (!nlh)
2541                 return -EMSGSIZE;
2542
2543         r = nlmsg_data(nlh);
2544         r->rtm_family    = AF_INET;
2545         r->rtm_dst_len  = 32;
2546         r->rtm_src_len  = 0;
2547         r->rtm_tos      = fl4->flowi4_tos;
2548         r->rtm_table    = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2549         if (nla_put_u32(skb, RTA_TABLE, table_id))
2550                 goto nla_put_failure;
2551         r->rtm_type     = rt->rt_type;
2552         r->rtm_scope    = RT_SCOPE_UNIVERSE;
2553         r->rtm_protocol = RTPROT_UNSPEC;
2554         r->rtm_flags    = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2555         if (rt->rt_flags & RTCF_NOTIFY)
2556                 r->rtm_flags |= RTM_F_NOTIFY;
2557         if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2558                 r->rtm_flags |= RTCF_DOREDIRECT;
2559
2560         if (nla_put_in_addr(skb, RTA_DST, dst))
2561                 goto nla_put_failure;
2562         if (src) {
2563                 r->rtm_src_len = 32;
2564                 if (nla_put_in_addr(skb, RTA_SRC, src))
2565                         goto nla_put_failure;
2566         }
2567         if (rt->dst.dev &&
2568             nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2569                 goto nla_put_failure;
2570 #ifdef CONFIG_IP_ROUTE_CLASSID
2571         if (rt->dst.tclassid &&
2572             nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2573                 goto nla_put_failure;
2574 #endif
2575         if (!rt_is_input_route(rt) &&
2576             fl4->saddr != src) {
2577                 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2578                         goto nla_put_failure;
2579         }
2580         if (rt->rt_uses_gateway &&
2581             nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gateway))
2582                 goto nla_put_failure;
2583
2584         expires = rt->dst.expires;
2585         if (expires) {
2586                 unsigned long now = jiffies;
2587
2588                 if (time_before(now, expires))
2589                         expires -= now;
2590                 else
2591                         expires = 0;
2592         }
2593
2594         memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2595         if (rt->rt_pmtu && expires)
2596                 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2597         if (rtnetlink_put_metrics(skb, metrics) < 0)
2598                 goto nla_put_failure;
2599
2600         if (fl4->flowi4_mark &&
2601             nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2602                 goto nla_put_failure;
2603
2604         if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2605             nla_put_u32(skb, RTA_UID,
2606                         from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
2607                 goto nla_put_failure;
2608
2609         error = rt->dst.error;
2610
2611         if (rt_is_input_route(rt)) {
2612 #ifdef CONFIG_IP_MROUTE
2613                 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2614                     IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2615                         int err = ipmr_get_route(net, skb,
2616                                                  fl4->saddr, fl4->daddr,
2617                                                  r, portid);
2618
2619                         if (err <= 0) {
2620                                 if (err == 0)
2621                                         return 0;
2622                                 goto nla_put_failure;
2623                         }
2624                 } else
2625 #endif
2626                         if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex))
2627                                 goto nla_put_failure;
2628         }
2629
2630         if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2631                 goto nla_put_failure;
2632
2633         nlmsg_end(skb, nlh);
2634         return 0;
2635
2636 nla_put_failure:
2637         nlmsg_cancel(skb, nlh);
2638         return -EMSGSIZE;
2639 }
2640
2641 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2642                              struct netlink_ext_ack *extack)
2643 {
2644         struct net *net = sock_net(in_skb->sk);
2645         struct rtmsg *rtm;
2646         struct nlattr *tb[RTA_MAX+1];
2647         struct rtable *rt = NULL;
2648         struct flowi4 fl4;
2649         __be32 dst = 0;
2650         __be32 src = 0;
2651         u32 iif;
2652         int err;
2653         int mark;
2654         struct sk_buff *skb;
2655         u32 table_id = RT_TABLE_MAIN;
2656         kuid_t uid;
2657
2658         err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy,
2659                           extack);
2660         if (err < 0)
2661                 goto errout;
2662
2663         rtm = nlmsg_data(nlh);
2664
2665         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2666         if (!skb) {
2667                 err = -ENOBUFS;
2668                 goto errout;
2669         }
2670
2671         /* Reserve room for dummy headers, this skb can pass
2672            through good chunk of routing engine.
2673          */
2674         skb_reset_mac_header(skb);
2675         skb_reset_network_header(skb);
2676
2677         src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
2678         dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
2679         iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2680         mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2681         if (tb[RTA_UID])
2682                 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
2683         else
2684                 uid = (iif ? INVALID_UID : current_uid());
2685
2686         /* Bugfix: need to give ip_route_input enough of an IP header to
2687          * not gag.
2688          */
2689         ip_hdr(skb)->protocol = IPPROTO_UDP;
2690         ip_hdr(skb)->saddr = src;
2691         ip_hdr(skb)->daddr = dst;
2692
2693         skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2694
2695         memset(&fl4, 0, sizeof(fl4));
2696         fl4.daddr = dst;
2697         fl4.saddr = src;
2698         fl4.flowi4_tos = rtm->rtm_tos;
2699         fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
2700         fl4.flowi4_mark = mark;
2701         fl4.flowi4_uid = uid;
2702
2703         if (iif) {
2704                 struct net_device *dev;
2705
2706                 dev = __dev_get_by_index(net, iif);
2707                 if (!dev) {
2708                         err = -ENODEV;
2709                         goto errout_free;
2710                 }
2711
2712                 skb->protocol   = htons(ETH_P_IP);
2713                 skb->dev        = dev;
2714                 skb->mark       = mark;
2715                 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2716
2717                 rt = skb_rtable(skb);
2718                 if (err == 0 && rt->dst.error)
2719                         err = -rt->dst.error;
2720         } else {
2721                 rt = ip_route_output_key(net, &fl4);
2722
2723                 err = 0;
2724                 if (IS_ERR(rt))
2725                         err = PTR_ERR(rt);
2726         }
2727
2728         if (err)
2729                 goto errout_free;
2730
2731         skb_dst_set(skb, &rt->dst);
2732         if (rtm->rtm_flags & RTM_F_NOTIFY)
2733                 rt->rt_flags |= RTCF_NOTIFY;
2734
2735         if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
2736                 table_id = rt->rt_table_id;
2737
2738         err = rt_fill_info(net, dst, src, table_id, &fl4, skb,
2739                            NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2740                            RTM_NEWROUTE);
2741         if (err < 0)
2742                 goto errout_free;
2743
2744         err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2745 errout:
2746         return err;
2747
2748 errout_free:
2749         kfree_skb(skb);
2750         goto errout;
2751 }
2752
2753 void ip_rt_multicast_event(struct in_device *in_dev)
2754 {
2755         rt_cache_flush(dev_net(in_dev->dev));
2756 }
2757
2758 #ifdef CONFIG_SYSCTL
2759 static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
2760 static int ip_rt_gc_min_interval __read_mostly  = HZ / 2;
2761 static int ip_rt_gc_elasticity __read_mostly    = 8;
2762
2763 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
2764                                         void __user *buffer,
2765                                         size_t *lenp, loff_t *ppos)
2766 {
2767         struct net *net = (struct net *)__ctl->extra1;
2768
2769         if (write) {
2770                 rt_cache_flush(net);
2771                 fnhe_genid_bump(net);
2772                 return 0;
2773         }
2774
2775         return -EINVAL;
2776 }
2777
2778 static struct ctl_table ipv4_route_table[] = {
2779         {
2780                 .procname       = "gc_thresh",
2781                 .data           = &ipv4_dst_ops.gc_thresh,
2782                 .maxlen         = sizeof(int),
2783                 .mode           = 0644,
2784                 .proc_handler   = proc_dointvec,
2785         },
2786         {
2787                 .procname       = "max_size",
2788                 .data           = &ip_rt_max_size,
2789                 .maxlen         = sizeof(int),
2790                 .mode           = 0644,
2791                 .proc_handler   = proc_dointvec,
2792         },
2793         {
2794                 /*  Deprecated. Use gc_min_interval_ms */
2795
2796                 .procname       = "gc_min_interval",
2797                 .data           = &ip_rt_gc_min_interval,
2798                 .maxlen         = sizeof(int),
2799                 .mode           = 0644,
2800                 .proc_handler   = proc_dointvec_jiffies,
2801         },
2802         {
2803                 .procname       = "gc_min_interval_ms",
2804                 .data           = &ip_rt_gc_min_interval,
2805                 .maxlen         = sizeof(int),
2806                 .mode           = 0644,
2807                 .proc_handler   = proc_dointvec_ms_jiffies,
2808         },
2809         {
2810                 .procname       = "gc_timeout",
2811                 .data           = &ip_rt_gc_timeout,
2812                 .maxlen         = sizeof(int),
2813                 .mode           = 0644,
2814                 .proc_handler   = proc_dointvec_jiffies,
2815         },
2816         {
2817                 .procname       = "gc_interval",
2818                 .data           = &ip_rt_gc_interval,
2819                 .maxlen         = sizeof(int),
2820                 .mode           = 0644,
2821                 .proc_handler   = proc_dointvec_jiffies,
2822         },
2823         {
2824                 .procname       = "redirect_load",
2825                 .data           = &ip_rt_redirect_load,
2826                 .maxlen         = sizeof(int),
2827                 .mode           = 0644,
2828                 .proc_handler   = proc_dointvec,
2829         },
2830         {
2831                 .procname       = "redirect_number",
2832                 .data           = &ip_rt_redirect_number,
2833                 .maxlen         = sizeof(int),
2834                 .mode           = 0644,
2835                 .proc_handler   = proc_dointvec,
2836         },
2837         {
2838                 .procname       = "redirect_silence",
2839                 .data           = &ip_rt_redirect_silence,
2840                 .maxlen         = sizeof(int),
2841                 .mode           = 0644,
2842                 .proc_handler   = proc_dointvec,
2843         },
2844         {
2845                 .procname       = "error_cost",
2846                 .data           = &ip_rt_error_cost,
2847                 .maxlen         = sizeof(int),
2848                 .mode           = 0644,
2849                 .proc_handler   = proc_dointvec,
2850         },
2851         {
2852                 .procname       = "error_burst",
2853                 .data           = &ip_rt_error_burst,
2854                 .maxlen         = sizeof(int),
2855                 .mode           = 0644,
2856                 .proc_handler   = proc_dointvec,
2857         },
2858         {
2859                 .procname       = "gc_elasticity",
2860                 .data           = &ip_rt_gc_elasticity,
2861                 .maxlen         = sizeof(int),
2862                 .mode           = 0644,
2863                 .proc_handler   = proc_dointvec,
2864         },
2865         {
2866                 .procname       = "mtu_expires",
2867                 .data           = &ip_rt_mtu_expires,
2868                 .maxlen         = sizeof(int),
2869                 .mode           = 0644,
2870                 .proc_handler   = proc_dointvec_jiffies,
2871         },
2872         {
2873                 .procname       = "min_pmtu",
2874                 .data           = &ip_rt_min_pmtu,
2875                 .maxlen         = sizeof(int),
2876                 .mode           = 0644,
2877                 .proc_handler   = proc_dointvec,
2878         },
2879         {
2880                 .procname       = "min_adv_mss",
2881                 .data           = &ip_rt_min_advmss,
2882                 .maxlen         = sizeof(int),
2883                 .mode           = 0644,
2884                 .proc_handler   = proc_dointvec,
2885         },
2886         { }
2887 };
2888
2889 static struct ctl_table ipv4_route_flush_table[] = {
2890         {
2891                 .procname       = "flush",
2892                 .maxlen         = sizeof(int),
2893                 .mode           = 0200,
2894                 .proc_handler   = ipv4_sysctl_rtcache_flush,
2895         },
2896         { },
2897 };
2898
2899 static __net_init int sysctl_route_net_init(struct net *net)
2900 {
2901         struct ctl_table *tbl;
2902
2903         tbl = ipv4_route_flush_table;
2904         if (!net_eq(net, &init_net)) {
2905                 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
2906                 if (!tbl)
2907                         goto err_dup;
2908
2909                 /* Don't export sysctls to unprivileged users */
2910                 if (net->user_ns != &init_user_ns)
2911                         tbl[0].procname = NULL;
2912         }
2913         tbl[0].extra1 = net;
2914
2915         net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
2916         if (!net->ipv4.route_hdr)
2917                 goto err_reg;
2918         return 0;
2919
2920 err_reg:
2921         if (tbl != ipv4_route_flush_table)
2922                 kfree(tbl);
2923 err_dup:
2924         return -ENOMEM;
2925 }
2926
2927 static __net_exit void sysctl_route_net_exit(struct net *net)
2928 {
2929         struct ctl_table *tbl;
2930
2931         tbl = net->ipv4.route_hdr->ctl_table_arg;
2932         unregister_net_sysctl_table(net->ipv4.route_hdr);
2933         BUG_ON(tbl == ipv4_route_flush_table);
2934         kfree(tbl);
2935 }
2936
2937 static __net_initdata struct pernet_operations sysctl_route_ops = {
2938         .init = sysctl_route_net_init,
2939         .exit = sysctl_route_net_exit,
2940 };
2941 #endif
2942
2943 static __net_init int rt_genid_init(struct net *net)
2944 {
2945         atomic_set(&net->ipv4.rt_genid, 0);
2946         atomic_set(&net->fnhe_genid, 0);
2947         get_random_bytes(&net->ipv4.dev_addr_genid,
2948                          sizeof(net->ipv4.dev_addr_genid));
2949         return 0;
2950 }
2951
2952 static __net_initdata struct pernet_operations rt_genid_ops = {
2953         .init = rt_genid_init,
2954 };
2955
2956 static int __net_init ipv4_inetpeer_init(struct net *net)
2957 {
2958         struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
2959
2960         if (!bp)
2961                 return -ENOMEM;
2962         inet_peer_base_init(bp);
2963         net->ipv4.peers = bp;
2964         return 0;
2965 }
2966
2967 static void __net_exit ipv4_inetpeer_exit(struct net *net)
2968 {
2969         struct inet_peer_base *bp = net->ipv4.peers;
2970
2971         net->ipv4.peers = NULL;
2972         inetpeer_invalidate_tree(bp);
2973         kfree(bp);
2974 }
2975
2976 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
2977         .init   =       ipv4_inetpeer_init,
2978         .exit   =       ipv4_inetpeer_exit,
2979 };
2980
2981 #ifdef CONFIG_IP_ROUTE_CLASSID
2982 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
2983 #endif /* CONFIG_IP_ROUTE_CLASSID */
2984
2985 int __init ip_rt_init(void)
2986 {
2987         int rc = 0;
2988         int cpu;
2989
2990         ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
2991         if (!ip_idents)
2992                 panic("IP: failed to allocate ip_idents\n");
2993
2994         prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
2995
2996         ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
2997         if (!ip_tstamps)
2998                 panic("IP: failed to allocate ip_tstamps\n");
2999
3000         for_each_possible_cpu(cpu) {
3001                 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3002
3003                 INIT_LIST_HEAD(&ul->head);
3004                 spin_lock_init(&ul->lock);
3005         }
3006 #ifdef CONFIG_IP_ROUTE_CLASSID
3007         ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3008         if (!ip_rt_acct)
3009                 panic("IP: failed to allocate ip_rt_acct\n");
3010 #endif
3011
3012         ipv4_dst_ops.kmem_cachep =
3013                 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3014                                   SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3015
3016         ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3017
3018         if (dst_entries_init(&ipv4_dst_ops) < 0)
3019                 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3020
3021         if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3022                 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3023
3024         ipv4_dst_ops.gc_thresh = ~0;
3025         ip_rt_max_size = INT_MAX;
3026
3027         devinet_init();
3028         ip_fib_init();
3029
3030         if (ip_rt_proc_init())
3031                 pr_err("Unable to create route proc files\n");
3032 #ifdef CONFIG_XFRM
3033         xfrm_init();
3034         xfrm4_init();
3035 #endif
3036         rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
3037
3038 #ifdef CONFIG_SYSCTL
3039         register_pernet_subsys(&sysctl_route_ops);
3040 #endif
3041         register_pernet_subsys(&rt_genid_ops);
3042         register_pernet_subsys(&ipv4_inetpeer_ops);
3043         return rc;
3044 }
3045
3046 #ifdef CONFIG_SYSCTL
3047 /*
3048  * We really need to sanitize the damn ipv4 init order, then all
3049  * this nonsense will go away.
3050  */
3051 void __init ip_static_sysctl_init(void)
3052 {
3053         register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
3054 }
3055 #endif