]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/ipv4/route.c
Merge commit 'kumar/merge' into merge
[karo-tx-linux.git] / net / ipv4 / route.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              ROUTE - implementation of the IP router.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
11  *              Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12  *              Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13  *
14  * Fixes:
15  *              Alan Cox        :       Verify area fixes.
16  *              Alan Cox        :       cli() protects routing changes
17  *              Rui Oliveira    :       ICMP routing table updates
18  *              (rco@di.uminho.pt)      Routing table insertion and update
19  *              Linus Torvalds  :       Rewrote bits to be sensible
20  *              Alan Cox        :       Added BSD route gw semantics
21  *              Alan Cox        :       Super /proc >4K
22  *              Alan Cox        :       MTU in route table
23  *              Alan Cox        :       MSS actually. Also added the window
24  *                                      clamper.
25  *              Sam Lantinga    :       Fixed route matching in rt_del()
26  *              Alan Cox        :       Routing cache support.
27  *              Alan Cox        :       Removed compatibility cruft.
28  *              Alan Cox        :       RTF_REJECT support.
29  *              Alan Cox        :       TCP irtt support.
30  *              Jonathan Naylor :       Added Metric support.
31  *      Miquel van Smoorenburg  :       BSD API fixes.
32  *      Miquel van Smoorenburg  :       Metrics.
33  *              Alan Cox        :       Use __u32 properly
34  *              Alan Cox        :       Aligned routing errors more closely with BSD
35  *                                      our system is still very different.
36  *              Alan Cox        :       Faster /proc handling
37  *      Alexey Kuznetsov        :       Massive rework to support tree based routing,
38  *                                      routing caches and better behaviour.
39  *
40  *              Olaf Erb        :       irtt wasn't being copied right.
41  *              Bjorn Ekwall    :       Kerneld route support.
42  *              Alan Cox        :       Multicast fixed (I hope)
43  *              Pavel Krauz     :       Limited broadcast fixed
44  *              Mike McLagan    :       Routing by source
45  *      Alexey Kuznetsov        :       End of old history. Split to fib.c and
46  *                                      route.c and rewritten from scratch.
47  *              Andi Kleen      :       Load-limit warning messages.
48  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
49  *      Vitaly E. Lavrov        :       Race condition in ip_route_input_slow.
50  *      Tobias Ringstrom        :       Uninitialized res.type in ip_route_output_slow.
51  *      Vladimir V. Ivanov      :       IP rule info (flowid) is really useful.
52  *              Marc Boucher    :       routing by fwmark
53  *      Robert Olsson           :       Added rt_cache statistics
54  *      Arnaldo C. Melo         :       Convert proc stuff to seq_file
55  *      Eric Dumazet            :       hashed spinlocks and rt_check_expire() fixes.
56  *      Ilia Sotnikov           :       Ignore TOS on PMTUD and Redirect
57  *      Ilia Sotnikov           :       Removed TOS from hash calculations
58  *
59  *              This program is free software; you can redistribute it and/or
60  *              modify it under the terms of the GNU General Public License
61  *              as published by the Free Software Foundation; either version
62  *              2 of the License, or (at your option) any later version.
63  */
64
65 #include <linux/module.h>
66 #include <asm/uaccess.h>
67 #include <asm/system.h>
68 #include <linux/bitops.h>
69 #include <linux/types.h>
70 #include <linux/kernel.h>
71 #include <linux/mm.h>
72 #include <linux/bootmem.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
77 #include <linux/in.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/workqueue.h>
83 #include <linux/skbuff.h>
84 #include <linux/inetdevice.h>
85 #include <linux/igmp.h>
86 #include <linux/pkt_sched.h>
87 #include <linux/mroute.h>
88 #include <linux/netfilter_ipv4.h>
89 #include <linux/random.h>
90 #include <linux/jhash.h>
91 #include <linux/rcupdate.h>
92 #include <linux/times.h>
93 #include <net/dst.h>
94 #include <net/net_namespace.h>
95 #include <net/protocol.h>
96 #include <net/ip.h>
97 #include <net/route.h>
98 #include <net/inetpeer.h>
99 #include <net/sock.h>
100 #include <net/ip_fib.h>
101 #include <net/arp.h>
102 #include <net/tcp.h>
103 #include <net/icmp.h>
104 #include <net/xfrm.h>
105 #include <net/netevent.h>
106 #include <net/rtnetlink.h>
107 #ifdef CONFIG_SYSCTL
108 #include <linux/sysctl.h>
109 #endif
110
111 #define RT_FL_TOS(oldflp) \
112     ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
113
114 #define IP_MAX_MTU      0xFFF0
115
116 #define RT_GC_TIMEOUT (300*HZ)
117
118 static int ip_rt_max_size;
119 static int ip_rt_gc_timeout __read_mostly       = RT_GC_TIMEOUT;
120 static int ip_rt_gc_interval __read_mostly      = 60 * HZ;
121 static int ip_rt_gc_min_interval __read_mostly  = HZ / 2;
122 static int ip_rt_redirect_number __read_mostly  = 9;
123 static int ip_rt_redirect_load __read_mostly    = HZ / 50;
124 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
125 static int ip_rt_error_cost __read_mostly       = HZ;
126 static int ip_rt_error_burst __read_mostly      = 5 * HZ;
127 static int ip_rt_gc_elasticity __read_mostly    = 8;
128 static int ip_rt_mtu_expires __read_mostly      = 10 * 60 * HZ;
129 static int ip_rt_min_pmtu __read_mostly         = 512 + 20 + 20;
130 static int ip_rt_min_advmss __read_mostly       = 256;
131 static int ip_rt_secret_interval __read_mostly  = 10 * 60 * HZ;
132 static int rt_chain_length_max __read_mostly    = 20;
133
134 static struct delayed_work expires_work;
135 static unsigned long expires_ljiffies;
136
137 /*
138  *      Interface to generic destination cache.
139  */
140
141 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
142 static void              ipv4_dst_destroy(struct dst_entry *dst);
143 static void              ipv4_dst_ifdown(struct dst_entry *dst,
144                                          struct net_device *dev, int how);
145 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
146 static void              ipv4_link_failure(struct sk_buff *skb);
147 static void              ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
148 static int rt_garbage_collect(struct dst_ops *ops);
149
150
151 static struct dst_ops ipv4_dst_ops = {
152         .family =               AF_INET,
153         .protocol =             cpu_to_be16(ETH_P_IP),
154         .gc =                   rt_garbage_collect,
155         .check =                ipv4_dst_check,
156         .destroy =              ipv4_dst_destroy,
157         .ifdown =               ipv4_dst_ifdown,
158         .negative_advice =      ipv4_negative_advice,
159         .link_failure =         ipv4_link_failure,
160         .update_pmtu =          ip_rt_update_pmtu,
161         .local_out =            __ip_local_out,
162         .entries =              ATOMIC_INIT(0),
163 };
164
165 #define ECN_OR_COST(class)      TC_PRIO_##class
166
167 const __u8 ip_tos2prio[16] = {
168         TC_PRIO_BESTEFFORT,
169         ECN_OR_COST(FILLER),
170         TC_PRIO_BESTEFFORT,
171         ECN_OR_COST(BESTEFFORT),
172         TC_PRIO_BULK,
173         ECN_OR_COST(BULK),
174         TC_PRIO_BULK,
175         ECN_OR_COST(BULK),
176         TC_PRIO_INTERACTIVE,
177         ECN_OR_COST(INTERACTIVE),
178         TC_PRIO_INTERACTIVE,
179         ECN_OR_COST(INTERACTIVE),
180         TC_PRIO_INTERACTIVE_BULK,
181         ECN_OR_COST(INTERACTIVE_BULK),
182         TC_PRIO_INTERACTIVE_BULK,
183         ECN_OR_COST(INTERACTIVE_BULK)
184 };
185
186
187 /*
188  * Route cache.
189  */
190
191 /* The locking scheme is rather straight forward:
192  *
193  * 1) Read-Copy Update protects the buckets of the central route hash.
194  * 2) Only writers remove entries, and they hold the lock
195  *    as they look at rtable reference counts.
196  * 3) Only readers acquire references to rtable entries,
197  *    they do so with atomic increments and with the
198  *    lock held.
199  */
200
201 struct rt_hash_bucket {
202         struct rtable   *chain;
203 };
204
205 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
206         defined(CONFIG_PROVE_LOCKING)
207 /*
208  * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
209  * The size of this table is a power of two and depends on the number of CPUS.
210  * (on lockdep we have a quite big spinlock_t, so keep the size down there)
211  */
212 #ifdef CONFIG_LOCKDEP
213 # define RT_HASH_LOCK_SZ        256
214 #else
215 # if NR_CPUS >= 32
216 #  define RT_HASH_LOCK_SZ       4096
217 # elif NR_CPUS >= 16
218 #  define RT_HASH_LOCK_SZ       2048
219 # elif NR_CPUS >= 8
220 #  define RT_HASH_LOCK_SZ       1024
221 # elif NR_CPUS >= 4
222 #  define RT_HASH_LOCK_SZ       512
223 # else
224 #  define RT_HASH_LOCK_SZ       256
225 # endif
226 #endif
227
228 static spinlock_t       *rt_hash_locks;
229 # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
230
231 static __init void rt_hash_lock_init(void)
232 {
233         int i;
234
235         rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
236                         GFP_KERNEL);
237         if (!rt_hash_locks)
238                 panic("IP: failed to allocate rt_hash_locks\n");
239
240         for (i = 0; i < RT_HASH_LOCK_SZ; i++)
241                 spin_lock_init(&rt_hash_locks[i]);
242 }
243 #else
244 # define rt_hash_lock_addr(slot) NULL
245
246 static inline void rt_hash_lock_init(void)
247 {
248 }
249 #endif
250
251 static struct rt_hash_bucket    *rt_hash_table __read_mostly;
252 static unsigned                 rt_hash_mask __read_mostly;
253 static unsigned int             rt_hash_log  __read_mostly;
254
255 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
256 #define RT_CACHE_STAT_INC(field) \
257         (__raw_get_cpu_var(rt_cache_stat).field++)
258
259 static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
260                 int genid)
261 {
262         return jhash_3words((__force u32)(__be32)(daddr),
263                             (__force u32)(__be32)(saddr),
264                             idx, genid)
265                 & rt_hash_mask;
266 }
267
268 static inline int rt_genid(struct net *net)
269 {
270         return atomic_read(&net->ipv4.rt_genid);
271 }
272
273 #ifdef CONFIG_PROC_FS
274 struct rt_cache_iter_state {
275         struct seq_net_private p;
276         int bucket;
277         int genid;
278 };
279
280 static struct rtable *rt_cache_get_first(struct seq_file *seq)
281 {
282         struct rt_cache_iter_state *st = seq->private;
283         struct rtable *r = NULL;
284
285         for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
286                 if (!rt_hash_table[st->bucket].chain)
287                         continue;
288                 rcu_read_lock_bh();
289                 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
290                 while (r) {
291                         if (dev_net(r->u.dst.dev) == seq_file_net(seq) &&
292                             r->rt_genid == st->genid)
293                                 return r;
294                         r = rcu_dereference_bh(r->u.dst.rt_next);
295                 }
296                 rcu_read_unlock_bh();
297         }
298         return r;
299 }
300
301 static struct rtable *__rt_cache_get_next(struct seq_file *seq,
302                                           struct rtable *r)
303 {
304         struct rt_cache_iter_state *st = seq->private;
305
306         r = r->u.dst.rt_next;
307         while (!r) {
308                 rcu_read_unlock_bh();
309                 do {
310                         if (--st->bucket < 0)
311                                 return NULL;
312                 } while (!rt_hash_table[st->bucket].chain);
313                 rcu_read_lock_bh();
314                 r = rt_hash_table[st->bucket].chain;
315         }
316         return rcu_dereference_bh(r);
317 }
318
319 static struct rtable *rt_cache_get_next(struct seq_file *seq,
320                                         struct rtable *r)
321 {
322         struct rt_cache_iter_state *st = seq->private;
323         while ((r = __rt_cache_get_next(seq, r)) != NULL) {
324                 if (dev_net(r->u.dst.dev) != seq_file_net(seq))
325                         continue;
326                 if (r->rt_genid == st->genid)
327                         break;
328         }
329         return r;
330 }
331
332 static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
333 {
334         struct rtable *r = rt_cache_get_first(seq);
335
336         if (r)
337                 while (pos && (r = rt_cache_get_next(seq, r)))
338                         --pos;
339         return pos ? NULL : r;
340 }
341
342 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
343 {
344         struct rt_cache_iter_state *st = seq->private;
345         if (*pos)
346                 return rt_cache_get_idx(seq, *pos - 1);
347         st->genid = rt_genid(seq_file_net(seq));
348         return SEQ_START_TOKEN;
349 }
350
351 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
352 {
353         struct rtable *r;
354
355         if (v == SEQ_START_TOKEN)
356                 r = rt_cache_get_first(seq);
357         else
358                 r = rt_cache_get_next(seq, v);
359         ++*pos;
360         return r;
361 }
362
363 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
364 {
365         if (v && v != SEQ_START_TOKEN)
366                 rcu_read_unlock_bh();
367 }
368
369 static int rt_cache_seq_show(struct seq_file *seq, void *v)
370 {
371         if (v == SEQ_START_TOKEN)
372                 seq_printf(seq, "%-127s\n",
373                            "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
374                            "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
375                            "HHUptod\tSpecDst");
376         else {
377                 struct rtable *r = v;
378                 int len;
379
380                 seq_printf(seq, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
381                               "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
382                         r->u.dst.dev ? r->u.dst.dev->name : "*",
383                         (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway,
384                         r->rt_flags, atomic_read(&r->u.dst.__refcnt),
385                         r->u.dst.__use, 0, (unsigned long)r->rt_src,
386                         (dst_metric(&r->u.dst, RTAX_ADVMSS) ?
387                              (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0),
388                         dst_metric(&r->u.dst, RTAX_WINDOW),
389                         (int)((dst_metric(&r->u.dst, RTAX_RTT) >> 3) +
390                               dst_metric(&r->u.dst, RTAX_RTTVAR)),
391                         r->fl.fl4_tos,
392                         r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1,
393                         r->u.dst.hh ? (r->u.dst.hh->hh_output ==
394                                        dev_queue_xmit) : 0,
395                         r->rt_spec_dst, &len);
396
397                 seq_printf(seq, "%*s\n", 127 - len, "");
398         }
399         return 0;
400 }
401
402 static const struct seq_operations rt_cache_seq_ops = {
403         .start  = rt_cache_seq_start,
404         .next   = rt_cache_seq_next,
405         .stop   = rt_cache_seq_stop,
406         .show   = rt_cache_seq_show,
407 };
408
409 static int rt_cache_seq_open(struct inode *inode, struct file *file)
410 {
411         return seq_open_net(inode, file, &rt_cache_seq_ops,
412                         sizeof(struct rt_cache_iter_state));
413 }
414
415 static const struct file_operations rt_cache_seq_fops = {
416         .owner   = THIS_MODULE,
417         .open    = rt_cache_seq_open,
418         .read    = seq_read,
419         .llseek  = seq_lseek,
420         .release = seq_release_net,
421 };
422
423
424 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
425 {
426         int cpu;
427
428         if (*pos == 0)
429                 return SEQ_START_TOKEN;
430
431         for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
432                 if (!cpu_possible(cpu))
433                         continue;
434                 *pos = cpu+1;
435                 return &per_cpu(rt_cache_stat, cpu);
436         }
437         return NULL;
438 }
439
440 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
441 {
442         int cpu;
443
444         for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
445                 if (!cpu_possible(cpu))
446                         continue;
447                 *pos = cpu+1;
448                 return &per_cpu(rt_cache_stat, cpu);
449         }
450         return NULL;
451
452 }
453
454 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
455 {
456
457 }
458
459 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
460 {
461         struct rt_cache_stat *st = v;
462
463         if (v == SEQ_START_TOKEN) {
464                 seq_printf(seq, "entries  in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src  out_hit out_slow_tot out_slow_mc  gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
465                 return 0;
466         }
467
468         seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
469                    " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
470                    atomic_read(&ipv4_dst_ops.entries),
471                    st->in_hit,
472                    st->in_slow_tot,
473                    st->in_slow_mc,
474                    st->in_no_route,
475                    st->in_brd,
476                    st->in_martian_dst,
477                    st->in_martian_src,
478
479                    st->out_hit,
480                    st->out_slow_tot,
481                    st->out_slow_mc,
482
483                    st->gc_total,
484                    st->gc_ignored,
485                    st->gc_goal_miss,
486                    st->gc_dst_overflow,
487                    st->in_hlist_search,
488                    st->out_hlist_search
489                 );
490         return 0;
491 }
492
493 static const struct seq_operations rt_cpu_seq_ops = {
494         .start  = rt_cpu_seq_start,
495         .next   = rt_cpu_seq_next,
496         .stop   = rt_cpu_seq_stop,
497         .show   = rt_cpu_seq_show,
498 };
499
500
501 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
502 {
503         return seq_open(file, &rt_cpu_seq_ops);
504 }
505
506 static const struct file_operations rt_cpu_seq_fops = {
507         .owner   = THIS_MODULE,
508         .open    = rt_cpu_seq_open,
509         .read    = seq_read,
510         .llseek  = seq_lseek,
511         .release = seq_release,
512 };
513
514 #ifdef CONFIG_NET_CLS_ROUTE
515 static int rt_acct_proc_show(struct seq_file *m, void *v)
516 {
517         struct ip_rt_acct *dst, *src;
518         unsigned int i, j;
519
520         dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
521         if (!dst)
522                 return -ENOMEM;
523
524         for_each_possible_cpu(i) {
525                 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
526                 for (j = 0; j < 256; j++) {
527                         dst[j].o_bytes   += src[j].o_bytes;
528                         dst[j].o_packets += src[j].o_packets;
529                         dst[j].i_bytes   += src[j].i_bytes;
530                         dst[j].i_packets += src[j].i_packets;
531                 }
532         }
533
534         seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
535         kfree(dst);
536         return 0;
537 }
538
539 static int rt_acct_proc_open(struct inode *inode, struct file *file)
540 {
541         return single_open(file, rt_acct_proc_show, NULL);
542 }
543
544 static const struct file_operations rt_acct_proc_fops = {
545         .owner          = THIS_MODULE,
546         .open           = rt_acct_proc_open,
547         .read           = seq_read,
548         .llseek         = seq_lseek,
549         .release        = single_release,
550 };
551 #endif
552
553 static int __net_init ip_rt_do_proc_init(struct net *net)
554 {
555         struct proc_dir_entry *pde;
556
557         pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
558                         &rt_cache_seq_fops);
559         if (!pde)
560                 goto err1;
561
562         pde = proc_create("rt_cache", S_IRUGO,
563                           net->proc_net_stat, &rt_cpu_seq_fops);
564         if (!pde)
565                 goto err2;
566
567 #ifdef CONFIG_NET_CLS_ROUTE
568         pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
569         if (!pde)
570                 goto err3;
571 #endif
572         return 0;
573
574 #ifdef CONFIG_NET_CLS_ROUTE
575 err3:
576         remove_proc_entry("rt_cache", net->proc_net_stat);
577 #endif
578 err2:
579         remove_proc_entry("rt_cache", net->proc_net);
580 err1:
581         return -ENOMEM;
582 }
583
584 static void __net_exit ip_rt_do_proc_exit(struct net *net)
585 {
586         remove_proc_entry("rt_cache", net->proc_net_stat);
587         remove_proc_entry("rt_cache", net->proc_net);
588 #ifdef CONFIG_NET_CLS_ROUTE
589         remove_proc_entry("rt_acct", net->proc_net);
590 #endif
591 }
592
593 static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
594         .init = ip_rt_do_proc_init,
595         .exit = ip_rt_do_proc_exit,
596 };
597
598 static int __init ip_rt_proc_init(void)
599 {
600         return register_pernet_subsys(&ip_rt_proc_ops);
601 }
602
603 #else
604 static inline int ip_rt_proc_init(void)
605 {
606         return 0;
607 }
608 #endif /* CONFIG_PROC_FS */
609
610 static inline void rt_free(struct rtable *rt)
611 {
612         call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
613 }
614
615 static inline void rt_drop(struct rtable *rt)
616 {
617         ip_rt_put(rt);
618         call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
619 }
620
621 static inline int rt_fast_clean(struct rtable *rth)
622 {
623         /* Kill broadcast/multicast entries very aggresively, if they
624            collide in hash table with more useful entries */
625         return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
626                 rth->fl.iif && rth->u.dst.rt_next;
627 }
628
629 static inline int rt_valuable(struct rtable *rth)
630 {
631         return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
632                 rth->u.dst.expires;
633 }
634
635 static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
636 {
637         unsigned long age;
638         int ret = 0;
639
640         if (atomic_read(&rth->u.dst.__refcnt))
641                 goto out;
642
643         ret = 1;
644         if (rth->u.dst.expires &&
645             time_after_eq(jiffies, rth->u.dst.expires))
646                 goto out;
647
648         age = jiffies - rth->u.dst.lastuse;
649         ret = 0;
650         if ((age <= tmo1 && !rt_fast_clean(rth)) ||
651             (age <= tmo2 && rt_valuable(rth)))
652                 goto out;
653         ret = 1;
654 out:    return ret;
655 }
656
657 /* Bits of score are:
658  * 31: very valuable
659  * 30: not quite useless
660  * 29..0: usage counter
661  */
662 static inline u32 rt_score(struct rtable *rt)
663 {
664         u32 score = jiffies - rt->u.dst.lastuse;
665
666         score = ~score & ~(3<<30);
667
668         if (rt_valuable(rt))
669                 score |= (1<<31);
670
671         if (!rt->fl.iif ||
672             !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
673                 score |= (1<<30);
674
675         return score;
676 }
677
678 static inline bool rt_caching(const struct net *net)
679 {
680         return net->ipv4.current_rt_cache_rebuild_count <=
681                 net->ipv4.sysctl_rt_cache_rebuild_count;
682 }
683
684 static inline bool compare_hash_inputs(const struct flowi *fl1,
685                                         const struct flowi *fl2)
686 {
687         return (__force u32)(((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
688                 (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) |
689                 (fl1->iif ^ fl2->iif)) == 0);
690 }
691
692 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
693 {
694         return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
695                 (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) |
696                 (fl1->mark ^ fl2->mark) |
697                 (*(u16 *)&fl1->nl_u.ip4_u.tos ^
698                  *(u16 *)&fl2->nl_u.ip4_u.tos) |
699                 (fl1->oif ^ fl2->oif) |
700                 (fl1->iif ^ fl2->iif)) == 0;
701 }
702
703 static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
704 {
705         return net_eq(dev_net(rt1->u.dst.dev), dev_net(rt2->u.dst.dev));
706 }
707
708 static inline int rt_is_expired(struct rtable *rth)
709 {
710         return rth->rt_genid != rt_genid(dev_net(rth->u.dst.dev));
711 }
712
713 /*
714  * Perform a full scan of hash table and free all entries.
715  * Can be called by a softirq or a process.
716  * In the later case, we want to be reschedule if necessary
717  */
718 static void rt_do_flush(int process_context)
719 {
720         unsigned int i;
721         struct rtable *rth, *next;
722         struct rtable * tail;
723
724         for (i = 0; i <= rt_hash_mask; i++) {
725                 if (process_context && need_resched())
726                         cond_resched();
727                 rth = rt_hash_table[i].chain;
728                 if (!rth)
729                         continue;
730
731                 spin_lock_bh(rt_hash_lock_addr(i));
732 #ifdef CONFIG_NET_NS
733                 {
734                 struct rtable ** prev, * p;
735
736                 rth = rt_hash_table[i].chain;
737
738                 /* defer releasing the head of the list after spin_unlock */
739                 for (tail = rth; tail; tail = tail->u.dst.rt_next)
740                         if (!rt_is_expired(tail))
741                                 break;
742                 if (rth != tail)
743                         rt_hash_table[i].chain = tail;
744
745                 /* call rt_free on entries after the tail requiring flush */
746                 prev = &rt_hash_table[i].chain;
747                 for (p = *prev; p; p = next) {
748                         next = p->u.dst.rt_next;
749                         if (!rt_is_expired(p)) {
750                                 prev = &p->u.dst.rt_next;
751                         } else {
752                                 *prev = next;
753                                 rt_free(p);
754                         }
755                 }
756                 }
757 #else
758                 rth = rt_hash_table[i].chain;
759                 rt_hash_table[i].chain = NULL;
760                 tail = NULL;
761 #endif
762                 spin_unlock_bh(rt_hash_lock_addr(i));
763
764                 for (; rth != tail; rth = next) {
765                         next = rth->u.dst.rt_next;
766                         rt_free(rth);
767                 }
768         }
769 }
770
771 /*
772  * While freeing expired entries, we compute average chain length
773  * and standard deviation, using fixed-point arithmetic.
774  * This to have an estimation of rt_chain_length_max
775  *  rt_chain_length_max = max(elasticity, AVG + 4*SD)
776  * We use 3 bits for frational part, and 29 (or 61) for magnitude.
777  */
778
779 #define FRACT_BITS 3
780 #define ONE (1UL << FRACT_BITS)
781
782 /*
783  * Given a hash chain and an item in this hash chain,
784  * find if a previous entry has the same hash_inputs
785  * (but differs on tos, mark or oif)
786  * Returns 0 if an alias is found.
787  * Returns ONE if rth has no alias before itself.
788  */
789 static int has_noalias(const struct rtable *head, const struct rtable *rth)
790 {
791         const struct rtable *aux = head;
792
793         while (aux != rth) {
794                 if (compare_hash_inputs(&aux->fl, &rth->fl))
795                         return 0;
796                 aux = aux->u.dst.rt_next;
797         }
798         return ONE;
799 }
800
801 static void rt_check_expire(void)
802 {
803         static unsigned int rover;
804         unsigned int i = rover, goal;
805         struct rtable *rth, **rthp;
806         unsigned long samples = 0;
807         unsigned long sum = 0, sum2 = 0;
808         unsigned long delta;
809         u64 mult;
810
811         delta = jiffies - expires_ljiffies;
812         expires_ljiffies = jiffies;
813         mult = ((u64)delta) << rt_hash_log;
814         if (ip_rt_gc_timeout > 1)
815                 do_div(mult, ip_rt_gc_timeout);
816         goal = (unsigned int)mult;
817         if (goal > rt_hash_mask)
818                 goal = rt_hash_mask + 1;
819         for (; goal > 0; goal--) {
820                 unsigned long tmo = ip_rt_gc_timeout;
821                 unsigned long length;
822
823                 i = (i + 1) & rt_hash_mask;
824                 rthp = &rt_hash_table[i].chain;
825
826                 if (need_resched())
827                         cond_resched();
828
829                 samples++;
830
831                 if (*rthp == NULL)
832                         continue;
833                 length = 0;
834                 spin_lock_bh(rt_hash_lock_addr(i));
835                 while ((rth = *rthp) != NULL) {
836                         prefetch(rth->u.dst.rt_next);
837                         if (rt_is_expired(rth)) {
838                                 *rthp = rth->u.dst.rt_next;
839                                 rt_free(rth);
840                                 continue;
841                         }
842                         if (rth->u.dst.expires) {
843                                 /* Entry is expired even if it is in use */
844                                 if (time_before_eq(jiffies, rth->u.dst.expires)) {
845 nofree:
846                                         tmo >>= 1;
847                                         rthp = &rth->u.dst.rt_next;
848                                         /*
849                                          * We only count entries on
850                                          * a chain with equal hash inputs once
851                                          * so that entries for different QOS
852                                          * levels, and other non-hash input
853                                          * attributes don't unfairly skew
854                                          * the length computation
855                                          */
856                                         length += has_noalias(rt_hash_table[i].chain, rth);
857                                         continue;
858                                 }
859                         } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
860                                 goto nofree;
861
862                         /* Cleanup aged off entries. */
863                         *rthp = rth->u.dst.rt_next;
864                         rt_free(rth);
865                 }
866                 spin_unlock_bh(rt_hash_lock_addr(i));
867                 sum += length;
868                 sum2 += length*length;
869         }
870         if (samples) {
871                 unsigned long avg = sum / samples;
872                 unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
873                 rt_chain_length_max = max_t(unsigned long,
874                                         ip_rt_gc_elasticity,
875                                         (avg + 4*sd) >> FRACT_BITS);
876         }
877         rover = i;
878 }
879
880 /*
881  * rt_worker_func() is run in process context.
882  * we call rt_check_expire() to scan part of the hash table
883  */
884 static void rt_worker_func(struct work_struct *work)
885 {
886         rt_check_expire();
887         schedule_delayed_work(&expires_work, ip_rt_gc_interval);
888 }
889
890 /*
891  * Pertubation of rt_genid by a small quantity [1..256]
892  * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
893  * many times (2^24) without giving recent rt_genid.
894  * Jenkins hash is strong enough that litle changes of rt_genid are OK.
895  */
896 static void rt_cache_invalidate(struct net *net)
897 {
898         unsigned char shuffle;
899
900         get_random_bytes(&shuffle, sizeof(shuffle));
901         atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
902 }
903
904 /*
905  * delay < 0  : invalidate cache (fast : entries will be deleted later)
906  * delay >= 0 : invalidate & flush cache (can be long)
907  */
908 void rt_cache_flush(struct net *net, int delay)
909 {
910         rt_cache_invalidate(net);
911         if (delay >= 0)
912                 rt_do_flush(!in_softirq());
913 }
914
915 /* Flush previous cache invalidated entries from the cache */
916 void rt_cache_flush_batch(void)
917 {
918         rt_do_flush(!in_softirq());
919 }
920
921 /*
922  * We change rt_genid and let gc do the cleanup
923  */
924 static void rt_secret_rebuild(unsigned long __net)
925 {
926         struct net *net = (struct net *)__net;
927         rt_cache_invalidate(net);
928         mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
929 }
930
931 static void rt_secret_rebuild_oneshot(struct net *net)
932 {
933         del_timer_sync(&net->ipv4.rt_secret_timer);
934         rt_cache_invalidate(net);
935         if (ip_rt_secret_interval) {
936                 net->ipv4.rt_secret_timer.expires += ip_rt_secret_interval;
937                 add_timer(&net->ipv4.rt_secret_timer);
938         }
939 }
940
941 static void rt_emergency_hash_rebuild(struct net *net)
942 {
943         if (net_ratelimit()) {
944                 printk(KERN_WARNING "Route hash chain too long!\n");
945                 printk(KERN_WARNING "Adjust your secret_interval!\n");
946         }
947
948         rt_secret_rebuild_oneshot(net);
949 }
950
951 /*
952    Short description of GC goals.
953
954    We want to build algorithm, which will keep routing cache
955    at some equilibrium point, when number of aged off entries
956    is kept approximately equal to newly generated ones.
957
958    Current expiration strength is variable "expire".
959    We try to adjust it dynamically, so that if networking
960    is idle expires is large enough to keep enough of warm entries,
961    and when load increases it reduces to limit cache size.
962  */
963
964 static int rt_garbage_collect(struct dst_ops *ops)
965 {
966         static unsigned long expire = RT_GC_TIMEOUT;
967         static unsigned long last_gc;
968         static int rover;
969         static int equilibrium;
970         struct rtable *rth, **rthp;
971         unsigned long now = jiffies;
972         int goal;
973
974         /*
975          * Garbage collection is pretty expensive,
976          * do not make it too frequently.
977          */
978
979         RT_CACHE_STAT_INC(gc_total);
980
981         if (now - last_gc < ip_rt_gc_min_interval &&
982             atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size) {
983                 RT_CACHE_STAT_INC(gc_ignored);
984                 goto out;
985         }
986
987         /* Calculate number of entries, which we want to expire now. */
988         goal = atomic_read(&ipv4_dst_ops.entries) -
989                 (ip_rt_gc_elasticity << rt_hash_log);
990         if (goal <= 0) {
991                 if (equilibrium < ipv4_dst_ops.gc_thresh)
992                         equilibrium = ipv4_dst_ops.gc_thresh;
993                 goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
994                 if (goal > 0) {
995                         equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
996                         goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
997                 }
998         } else {
999                 /* We are in dangerous area. Try to reduce cache really
1000                  * aggressively.
1001                  */
1002                 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
1003                 equilibrium = atomic_read(&ipv4_dst_ops.entries) - goal;
1004         }
1005
1006         if (now - last_gc >= ip_rt_gc_min_interval)
1007                 last_gc = now;
1008
1009         if (goal <= 0) {
1010                 equilibrium += goal;
1011                 goto work_done;
1012         }
1013
1014         do {
1015                 int i, k;
1016
1017                 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
1018                         unsigned long tmo = expire;
1019
1020                         k = (k + 1) & rt_hash_mask;
1021                         rthp = &rt_hash_table[k].chain;
1022                         spin_lock_bh(rt_hash_lock_addr(k));
1023                         while ((rth = *rthp) != NULL) {
1024                                 if (!rt_is_expired(rth) &&
1025                                         !rt_may_expire(rth, tmo, expire)) {
1026                                         tmo >>= 1;
1027                                         rthp = &rth->u.dst.rt_next;
1028                                         continue;
1029                                 }
1030                                 *rthp = rth->u.dst.rt_next;
1031                                 rt_free(rth);
1032                                 goal--;
1033                         }
1034                         spin_unlock_bh(rt_hash_lock_addr(k));
1035                         if (goal <= 0)
1036                                 break;
1037                 }
1038                 rover = k;
1039
1040                 if (goal <= 0)
1041                         goto work_done;
1042
1043                 /* Goal is not achieved. We stop process if:
1044
1045                    - if expire reduced to zero. Otherwise, expire is halfed.
1046                    - if table is not full.
1047                    - if we are called from interrupt.
1048                    - jiffies check is just fallback/debug loop breaker.
1049                      We will not spin here for long time in any case.
1050                  */
1051
1052                 RT_CACHE_STAT_INC(gc_goal_miss);
1053
1054                 if (expire == 0)
1055                         break;
1056
1057                 expire >>= 1;
1058 #if RT_CACHE_DEBUG >= 2
1059                 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
1060                                 atomic_read(&ipv4_dst_ops.entries), goal, i);
1061 #endif
1062
1063                 if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
1064                         goto out;
1065         } while (!in_softirq() && time_before_eq(jiffies, now));
1066
1067         if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
1068                 goto out;
1069         if (net_ratelimit())
1070                 printk(KERN_WARNING "dst cache overflow\n");
1071         RT_CACHE_STAT_INC(gc_dst_overflow);
1072         return 1;
1073
1074 work_done:
1075         expire += ip_rt_gc_min_interval;
1076         if (expire > ip_rt_gc_timeout ||
1077             atomic_read(&ipv4_dst_ops.entries) < ipv4_dst_ops.gc_thresh)
1078                 expire = ip_rt_gc_timeout;
1079 #if RT_CACHE_DEBUG >= 2
1080         printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
1081                         atomic_read(&ipv4_dst_ops.entries), goal, rover);
1082 #endif
1083 out:    return 0;
1084 }
1085
1086 /*
1087  * Returns number of entries in a hash chain that have different hash_inputs
1088  */
1089 static int slow_chain_length(const struct rtable *head)
1090 {
1091         int length = 0;
1092         const struct rtable *rth = head;
1093
1094         while (rth) {
1095                 length += has_noalias(head, rth);
1096                 rth = rth->u.dst.rt_next;
1097         }
1098         return length >> FRACT_BITS;
1099 }
1100
1101 static int rt_intern_hash(unsigned hash, struct rtable *rt,
1102                           struct rtable **rp, struct sk_buff *skb)
1103 {
1104         struct rtable   *rth, **rthp;
1105         unsigned long   now;
1106         struct rtable *cand, **candp;
1107         u32             min_score;
1108         int             chain_length;
1109         int attempts = !in_softirq();
1110
1111 restart:
1112         chain_length = 0;
1113         min_score = ~(u32)0;
1114         cand = NULL;
1115         candp = NULL;
1116         now = jiffies;
1117
1118         if (!rt_caching(dev_net(rt->u.dst.dev))) {
1119                 /*
1120                  * If we're not caching, just tell the caller we
1121                  * were successful and don't touch the route.  The
1122                  * caller hold the sole reference to the cache entry, and
1123                  * it will be released when the caller is done with it.
1124                  * If we drop it here, the callers have no way to resolve routes
1125                  * when we're not caching.  Instead, just point *rp at rt, so
1126                  * the caller gets a single use out of the route
1127                  * Note that we do rt_free on this new route entry, so that
1128                  * once its refcount hits zero, we are still able to reap it
1129                  * (Thanks Alexey)
1130                  * Note also the rt_free uses call_rcu.  We don't actually
1131                  * need rcu protection here, this is just our path to get
1132                  * on the route gc list.
1133                  */
1134
1135                 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
1136                         int err = arp_bind_neighbour(&rt->u.dst);
1137                         if (err) {
1138                                 if (net_ratelimit())
1139                                         printk(KERN_WARNING
1140                                             "Neighbour table failure & not caching routes.\n");
1141                                 rt_drop(rt);
1142                                 return err;
1143                         }
1144                 }
1145
1146                 rt_free(rt);
1147                 goto skip_hashing;
1148         }
1149
1150         rthp = &rt_hash_table[hash].chain;
1151
1152         spin_lock_bh(rt_hash_lock_addr(hash));
1153         while ((rth = *rthp) != NULL) {
1154                 if (rt_is_expired(rth)) {
1155                         *rthp = rth->u.dst.rt_next;
1156                         rt_free(rth);
1157                         continue;
1158                 }
1159                 if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
1160                         /* Put it first */
1161                         *rthp = rth->u.dst.rt_next;
1162                         /*
1163                          * Since lookup is lockfree, the deletion
1164                          * must be visible to another weakly ordered CPU before
1165                          * the insertion at the start of the hash chain.
1166                          */
1167                         rcu_assign_pointer(rth->u.dst.rt_next,
1168                                            rt_hash_table[hash].chain);
1169                         /*
1170                          * Since lookup is lockfree, the update writes
1171                          * must be ordered for consistency on SMP.
1172                          */
1173                         rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1174
1175                         dst_use(&rth->u.dst, now);
1176                         spin_unlock_bh(rt_hash_lock_addr(hash));
1177
1178                         rt_drop(rt);
1179                         if (rp)
1180                                 *rp = rth;
1181                         else
1182                                 skb_dst_set(skb, &rth->u.dst);
1183                         return 0;
1184                 }
1185
1186                 if (!atomic_read(&rth->u.dst.__refcnt)) {
1187                         u32 score = rt_score(rth);
1188
1189                         if (score <= min_score) {
1190                                 cand = rth;
1191                                 candp = rthp;
1192                                 min_score = score;
1193                         }
1194                 }
1195
1196                 chain_length++;
1197
1198                 rthp = &rth->u.dst.rt_next;
1199         }
1200
1201         if (cand) {
1202                 /* ip_rt_gc_elasticity used to be average length of chain
1203                  * length, when exceeded gc becomes really aggressive.
1204                  *
1205                  * The second limit is less certain. At the moment it allows
1206                  * only 2 entries per bucket. We will see.
1207                  */
1208                 if (chain_length > ip_rt_gc_elasticity) {
1209                         *candp = cand->u.dst.rt_next;
1210                         rt_free(cand);
1211                 }
1212         } else {
1213                 if (chain_length > rt_chain_length_max &&
1214                     slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
1215                         struct net *net = dev_net(rt->u.dst.dev);
1216                         int num = ++net->ipv4.current_rt_cache_rebuild_count;
1217                         if (!rt_caching(dev_net(rt->u.dst.dev))) {
1218                                 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
1219                                         rt->u.dst.dev->name, num);
1220                         }
1221                         rt_emergency_hash_rebuild(dev_net(rt->u.dst.dev));
1222                 }
1223         }
1224
1225         /* Try to bind route to arp only if it is output
1226            route or unicast forwarding path.
1227          */
1228         if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
1229                 int err = arp_bind_neighbour(&rt->u.dst);
1230                 if (err) {
1231                         spin_unlock_bh(rt_hash_lock_addr(hash));
1232
1233                         if (err != -ENOBUFS) {
1234                                 rt_drop(rt);
1235                                 return err;
1236                         }
1237
1238                         /* Neighbour tables are full and nothing
1239                            can be released. Try to shrink route cache,
1240                            it is most likely it holds some neighbour records.
1241                          */
1242                         if (attempts-- > 0) {
1243                                 int saved_elasticity = ip_rt_gc_elasticity;
1244                                 int saved_int = ip_rt_gc_min_interval;
1245                                 ip_rt_gc_elasticity     = 1;
1246                                 ip_rt_gc_min_interval   = 0;
1247                                 rt_garbage_collect(&ipv4_dst_ops);
1248                                 ip_rt_gc_min_interval   = saved_int;
1249                                 ip_rt_gc_elasticity     = saved_elasticity;
1250                                 goto restart;
1251                         }
1252
1253                         if (net_ratelimit())
1254                                 printk(KERN_WARNING "Neighbour table overflow.\n");
1255                         rt_drop(rt);
1256                         return -ENOBUFS;
1257                 }
1258         }
1259
1260         rt->u.dst.rt_next = rt_hash_table[hash].chain;
1261
1262 #if RT_CACHE_DEBUG >= 2
1263         if (rt->u.dst.rt_next) {
1264                 struct rtable *trt;
1265                 printk(KERN_DEBUG "rt_cache @%02x: %pI4",
1266                        hash, &rt->rt_dst);
1267                 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
1268                         printk(" . %pI4", &trt->rt_dst);
1269                 printk("\n");
1270         }
1271 #endif
1272         /*
1273          * Since lookup is lockfree, we must make sure
1274          * previous writes to rt are comitted to memory
1275          * before making rt visible to other CPUS.
1276          */
1277         rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1278
1279         spin_unlock_bh(rt_hash_lock_addr(hash));
1280
1281 skip_hashing:
1282         if (rp)
1283                 *rp = rt;
1284         else
1285                 skb_dst_set(skb, &rt->u.dst);
1286         return 0;
1287 }
1288
1289 void rt_bind_peer(struct rtable *rt, int create)
1290 {
1291         static DEFINE_SPINLOCK(rt_peer_lock);
1292         struct inet_peer *peer;
1293
1294         peer = inet_getpeer(rt->rt_dst, create);
1295
1296         spin_lock_bh(&rt_peer_lock);
1297         if (rt->peer == NULL) {
1298                 rt->peer = peer;
1299                 peer = NULL;
1300         }
1301         spin_unlock_bh(&rt_peer_lock);
1302         if (peer)
1303                 inet_putpeer(peer);
1304 }
1305
1306 /*
1307  * Peer allocation may fail only in serious out-of-memory conditions.  However
1308  * we still can generate some output.
1309  * Random ID selection looks a bit dangerous because we have no chances to
1310  * select ID being unique in a reasonable period of time.
1311  * But broken packet identifier may be better than no packet at all.
1312  */
1313 static void ip_select_fb_ident(struct iphdr *iph)
1314 {
1315         static DEFINE_SPINLOCK(ip_fb_id_lock);
1316         static u32 ip_fallback_id;
1317         u32 salt;
1318
1319         spin_lock_bh(&ip_fb_id_lock);
1320         salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1321         iph->id = htons(salt & 0xFFFF);
1322         ip_fallback_id = salt;
1323         spin_unlock_bh(&ip_fb_id_lock);
1324 }
1325
1326 void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1327 {
1328         struct rtable *rt = (struct rtable *) dst;
1329
1330         if (rt) {
1331                 if (rt->peer == NULL)
1332                         rt_bind_peer(rt, 1);
1333
1334                 /* If peer is attached to destination, it is never detached,
1335                    so that we need not to grab a lock to dereference it.
1336                  */
1337                 if (rt->peer) {
1338                         iph->id = htons(inet_getid(rt->peer, more));
1339                         return;
1340                 }
1341         } else
1342                 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
1343                        __builtin_return_address(0));
1344
1345         ip_select_fb_ident(iph);
1346 }
1347
1348 static void rt_del(unsigned hash, struct rtable *rt)
1349 {
1350         struct rtable **rthp, *aux;
1351
1352         rthp = &rt_hash_table[hash].chain;
1353         spin_lock_bh(rt_hash_lock_addr(hash));
1354         ip_rt_put(rt);
1355         while ((aux = *rthp) != NULL) {
1356                 if (aux == rt || rt_is_expired(aux)) {
1357                         *rthp = aux->u.dst.rt_next;
1358                         rt_free(aux);
1359                         continue;
1360                 }
1361                 rthp = &aux->u.dst.rt_next;
1362         }
1363         spin_unlock_bh(rt_hash_lock_addr(hash));
1364 }
1365
1366 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1367                     __be32 saddr, struct net_device *dev)
1368 {
1369         int i, k;
1370         struct in_device *in_dev = in_dev_get(dev);
1371         struct rtable *rth, **rthp;
1372         __be32  skeys[2] = { saddr, 0 };
1373         int  ikeys[2] = { dev->ifindex, 0 };
1374         struct netevent_redirect netevent;
1375         struct net *net;
1376
1377         if (!in_dev)
1378                 return;
1379
1380         net = dev_net(dev);
1381         if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
1382             ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
1383             ipv4_is_zeronet(new_gw))
1384                 goto reject_redirect;
1385
1386         if (!rt_caching(net))
1387                 goto reject_redirect;
1388
1389         if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1390                 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1391                         goto reject_redirect;
1392                 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1393                         goto reject_redirect;
1394         } else {
1395                 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1396                         goto reject_redirect;
1397         }
1398
1399         for (i = 0; i < 2; i++) {
1400                 for (k = 0; k < 2; k++) {
1401                         unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
1402                                                 rt_genid(net));
1403
1404                         rthp=&rt_hash_table[hash].chain;
1405
1406                         rcu_read_lock();
1407                         while ((rth = rcu_dereference(*rthp)) != NULL) {
1408                                 struct rtable *rt;
1409
1410                                 if (rth->fl.fl4_dst != daddr ||
1411                                     rth->fl.fl4_src != skeys[i] ||
1412                                     rth->fl.oif != ikeys[k] ||
1413                                     rth->fl.iif != 0 ||
1414                                     rt_is_expired(rth) ||
1415                                     !net_eq(dev_net(rth->u.dst.dev), net)) {
1416                                         rthp = &rth->u.dst.rt_next;
1417                                         continue;
1418                                 }
1419
1420                                 if (rth->rt_dst != daddr ||
1421                                     rth->rt_src != saddr ||
1422                                     rth->u.dst.error ||
1423                                     rth->rt_gateway != old_gw ||
1424                                     rth->u.dst.dev != dev)
1425                                         break;
1426
1427                                 dst_hold(&rth->u.dst);
1428                                 rcu_read_unlock();
1429
1430                                 rt = dst_alloc(&ipv4_dst_ops);
1431                                 if (rt == NULL) {
1432                                         ip_rt_put(rth);
1433                                         in_dev_put(in_dev);
1434                                         return;
1435                                 }
1436
1437                                 /* Copy all the information. */
1438                                 *rt = *rth;
1439                                 rt->u.dst.__use         = 1;
1440                                 atomic_set(&rt->u.dst.__refcnt, 1);
1441                                 rt->u.dst.child         = NULL;
1442                                 if (rt->u.dst.dev)
1443                                         dev_hold(rt->u.dst.dev);
1444                                 if (rt->idev)
1445                                         in_dev_hold(rt->idev);
1446                                 rt->u.dst.obsolete      = 0;
1447                                 rt->u.dst.lastuse       = jiffies;
1448                                 rt->u.dst.path          = &rt->u.dst;
1449                                 rt->u.dst.neighbour     = NULL;
1450                                 rt->u.dst.hh            = NULL;
1451 #ifdef CONFIG_XFRM
1452                                 rt->u.dst.xfrm          = NULL;
1453 #endif
1454                                 rt->rt_genid            = rt_genid(net);
1455                                 rt->rt_flags            |= RTCF_REDIRECTED;
1456
1457                                 /* Gateway is different ... */
1458                                 rt->rt_gateway          = new_gw;
1459
1460                                 /* Redirect received -> path was valid */
1461                                 dst_confirm(&rth->u.dst);
1462
1463                                 if (rt->peer)
1464                                         atomic_inc(&rt->peer->refcnt);
1465
1466                                 if (arp_bind_neighbour(&rt->u.dst) ||
1467                                     !(rt->u.dst.neighbour->nud_state &
1468                                             NUD_VALID)) {
1469                                         if (rt->u.dst.neighbour)
1470                                                 neigh_event_send(rt->u.dst.neighbour, NULL);
1471                                         ip_rt_put(rth);
1472                                         rt_drop(rt);
1473                                         goto do_next;
1474                                 }
1475
1476                                 netevent.old = &rth->u.dst;
1477                                 netevent.new = &rt->u.dst;
1478                                 call_netevent_notifiers(NETEVENT_REDIRECT,
1479                                                         &netevent);
1480
1481                                 rt_del(hash, rth);
1482                                 if (!rt_intern_hash(hash, rt, &rt, NULL))
1483                                         ip_rt_put(rt);
1484                                 goto do_next;
1485                         }
1486                         rcu_read_unlock();
1487                 do_next:
1488                         ;
1489                 }
1490         }
1491         in_dev_put(in_dev);
1492         return;
1493
1494 reject_redirect:
1495 #ifdef CONFIG_IP_ROUTE_VERBOSE
1496         if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1497                 printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n"
1498                         "  Advised path = %pI4 -> %pI4\n",
1499                        &old_gw, dev->name, &new_gw,
1500                        &saddr, &daddr);
1501 #endif
1502         in_dev_put(in_dev);
1503 }
1504
1505 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1506 {
1507         struct rtable *rt = (struct rtable *)dst;
1508         struct dst_entry *ret = dst;
1509
1510         if (rt) {
1511                 if (dst->obsolete) {
1512                         ip_rt_put(rt);
1513                         ret = NULL;
1514                 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1515                            rt->u.dst.expires) {
1516                         unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1517                                                 rt->fl.oif,
1518                                                 rt_genid(dev_net(dst->dev)));
1519 #if RT_CACHE_DEBUG >= 1
1520                         printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
1521                                 &rt->rt_dst, rt->fl.fl4_tos);
1522 #endif
1523                         rt_del(hash, rt);
1524                         ret = NULL;
1525                 }
1526         }
1527         return ret;
1528 }
1529
1530 /*
1531  * Algorithm:
1532  *      1. The first ip_rt_redirect_number redirects are sent
1533  *         with exponential backoff, then we stop sending them at all,
1534  *         assuming that the host ignores our redirects.
1535  *      2. If we did not see packets requiring redirects
1536  *         during ip_rt_redirect_silence, we assume that the host
1537  *         forgot redirected route and start to send redirects again.
1538  *
1539  * This algorithm is much cheaper and more intelligent than dumb load limiting
1540  * in icmp.c.
1541  *
1542  * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1543  * and "frag. need" (breaks PMTU discovery) in icmp.c.
1544  */
1545
1546 void ip_rt_send_redirect(struct sk_buff *skb)
1547 {
1548         struct rtable *rt = skb_rtable(skb);
1549         struct in_device *in_dev;
1550         int log_martians;
1551
1552         rcu_read_lock();
1553         in_dev = __in_dev_get_rcu(rt->u.dst.dev);
1554         if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1555                 rcu_read_unlock();
1556                 return;
1557         }
1558         log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1559         rcu_read_unlock();
1560
1561         /* No redirected packets during ip_rt_redirect_silence;
1562          * reset the algorithm.
1563          */
1564         if (time_after(jiffies, rt->u.dst.rate_last + ip_rt_redirect_silence))
1565                 rt->u.dst.rate_tokens = 0;
1566
1567         /* Too many ignored redirects; do not send anything
1568          * set u.dst.rate_last to the last seen redirected packet.
1569          */
1570         if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) {
1571                 rt->u.dst.rate_last = jiffies;
1572                 return;
1573         }
1574
1575         /* Check for load limit; set rate_last to the latest sent
1576          * redirect.
1577          */
1578         if (rt->u.dst.rate_tokens == 0 ||
1579             time_after(jiffies,
1580                        (rt->u.dst.rate_last +
1581                         (ip_rt_redirect_load << rt->u.dst.rate_tokens)))) {
1582                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1583                 rt->u.dst.rate_last = jiffies;
1584                 ++rt->u.dst.rate_tokens;
1585 #ifdef CONFIG_IP_ROUTE_VERBOSE
1586                 if (log_martians &&
1587                     rt->u.dst.rate_tokens == ip_rt_redirect_number &&
1588                     net_ratelimit())
1589                         printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1590                                 &rt->rt_src, rt->rt_iif,
1591                                 &rt->rt_dst, &rt->rt_gateway);
1592 #endif
1593         }
1594 }
1595
1596 static int ip_error(struct sk_buff *skb)
1597 {
1598         struct rtable *rt = skb_rtable(skb);
1599         unsigned long now;
1600         int code;
1601
1602         switch (rt->u.dst.error) {
1603                 case EINVAL:
1604                 default:
1605                         goto out;
1606                 case EHOSTUNREACH:
1607                         code = ICMP_HOST_UNREACH;
1608                         break;
1609                 case ENETUNREACH:
1610                         code = ICMP_NET_UNREACH;
1611                         IP_INC_STATS_BH(dev_net(rt->u.dst.dev),
1612                                         IPSTATS_MIB_INNOROUTES);
1613                         break;
1614                 case EACCES:
1615                         code = ICMP_PKT_FILTERED;
1616                         break;
1617         }
1618
1619         now = jiffies;
1620         rt->u.dst.rate_tokens += now - rt->u.dst.rate_last;
1621         if (rt->u.dst.rate_tokens > ip_rt_error_burst)
1622                 rt->u.dst.rate_tokens = ip_rt_error_burst;
1623         rt->u.dst.rate_last = now;
1624         if (rt->u.dst.rate_tokens >= ip_rt_error_cost) {
1625                 rt->u.dst.rate_tokens -= ip_rt_error_cost;
1626                 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1627         }
1628
1629 out:    kfree_skb(skb);
1630         return 0;
1631 }
1632
1633 /*
1634  *      The last two values are not from the RFC but
1635  *      are needed for AMPRnet AX.25 paths.
1636  */
1637
1638 static const unsigned short mtu_plateau[] =
1639 {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1640
1641 static inline unsigned short guess_mtu(unsigned short old_mtu)
1642 {
1643         int i;
1644
1645         for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1646                 if (old_mtu > mtu_plateau[i])
1647                         return mtu_plateau[i];
1648         return 68;
1649 }
1650
1651 unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1652                                  unsigned short new_mtu,
1653                                  struct net_device *dev)
1654 {
1655         int i, k;
1656         unsigned short old_mtu = ntohs(iph->tot_len);
1657         struct rtable *rth;
1658         int  ikeys[2] = { dev->ifindex, 0 };
1659         __be32  skeys[2] = { iph->saddr, 0, };
1660         __be32  daddr = iph->daddr;
1661         unsigned short est_mtu = 0;
1662
1663         for (k = 0; k < 2; k++) {
1664                 for (i = 0; i < 2; i++) {
1665                         unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
1666                                                 rt_genid(net));
1667
1668                         rcu_read_lock();
1669                         for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
1670                              rth = rcu_dereference(rth->u.dst.rt_next)) {
1671                                 unsigned short mtu = new_mtu;
1672
1673                                 if (rth->fl.fl4_dst != daddr ||
1674                                     rth->fl.fl4_src != skeys[i] ||
1675                                     rth->rt_dst != daddr ||
1676                                     rth->rt_src != iph->saddr ||
1677                                     rth->fl.oif != ikeys[k] ||
1678                                     rth->fl.iif != 0 ||
1679                                     dst_metric_locked(&rth->u.dst, RTAX_MTU) ||
1680                                     !net_eq(dev_net(rth->u.dst.dev), net) ||
1681                                     rt_is_expired(rth))
1682                                         continue;
1683
1684                                 if (new_mtu < 68 || new_mtu >= old_mtu) {
1685
1686                                         /* BSD 4.2 compatibility hack :-( */
1687                                         if (mtu == 0 &&
1688                                             old_mtu >= dst_mtu(&rth->u.dst) &&
1689                                             old_mtu >= 68 + (iph->ihl << 2))
1690                                                 old_mtu -= iph->ihl << 2;
1691
1692                                         mtu = guess_mtu(old_mtu);
1693                                 }
1694                                 if (mtu <= dst_mtu(&rth->u.dst)) {
1695                                         if (mtu < dst_mtu(&rth->u.dst)) {
1696                                                 dst_confirm(&rth->u.dst);
1697                                                 if (mtu < ip_rt_min_pmtu) {
1698                                                         mtu = ip_rt_min_pmtu;
1699                                                         rth->u.dst.metrics[RTAX_LOCK-1] |=
1700                                                                 (1 << RTAX_MTU);
1701                                                 }
1702                                                 rth->u.dst.metrics[RTAX_MTU-1] = mtu;
1703                                                 dst_set_expires(&rth->u.dst,
1704                                                         ip_rt_mtu_expires);
1705                                         }
1706                                         est_mtu = mtu;
1707                                 }
1708                         }
1709                         rcu_read_unlock();
1710                 }
1711         }
1712         return est_mtu ? : new_mtu;
1713 }
1714
1715 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1716 {
1717         if (dst_mtu(dst) > mtu && mtu >= 68 &&
1718             !(dst_metric_locked(dst, RTAX_MTU))) {
1719                 if (mtu < ip_rt_min_pmtu) {
1720                         mtu = ip_rt_min_pmtu;
1721                         dst->metrics[RTAX_LOCK-1] |= (1 << RTAX_MTU);
1722                 }
1723                 dst->metrics[RTAX_MTU-1] = mtu;
1724                 dst_set_expires(dst, ip_rt_mtu_expires);
1725                 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
1726         }
1727 }
1728
1729 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1730 {
1731         return NULL;
1732 }
1733
1734 static void ipv4_dst_destroy(struct dst_entry *dst)
1735 {
1736         struct rtable *rt = (struct rtable *) dst;
1737         struct inet_peer *peer = rt->peer;
1738         struct in_device *idev = rt->idev;
1739
1740         if (peer) {
1741                 rt->peer = NULL;
1742                 inet_putpeer(peer);
1743         }
1744
1745         if (idev) {
1746                 rt->idev = NULL;
1747                 in_dev_put(idev);
1748         }
1749 }
1750
1751 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
1752                             int how)
1753 {
1754         struct rtable *rt = (struct rtable *) dst;
1755         struct in_device *idev = rt->idev;
1756         if (dev != dev_net(dev)->loopback_dev && idev && idev->dev == dev) {
1757                 struct in_device *loopback_idev =
1758                         in_dev_get(dev_net(dev)->loopback_dev);
1759                 if (loopback_idev) {
1760                         rt->idev = loopback_idev;
1761                         in_dev_put(idev);
1762                 }
1763         }
1764 }
1765
1766 static void ipv4_link_failure(struct sk_buff *skb)
1767 {
1768         struct rtable *rt;
1769
1770         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1771
1772         rt = skb_rtable(skb);
1773         if (rt)
1774                 dst_set_expires(&rt->u.dst, 0);
1775 }
1776
1777 static int ip_rt_bug(struct sk_buff *skb)
1778 {
1779         printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
1780                 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1781                 skb->dev ? skb->dev->name : "?");
1782         kfree_skb(skb);
1783         return 0;
1784 }
1785
1786 /*
1787    We do not cache source address of outgoing interface,
1788    because it is used only by IP RR, TS and SRR options,
1789    so that it out of fast path.
1790
1791    BTW remember: "addr" is allowed to be not aligned
1792    in IP options!
1793  */
1794
1795 void ip_rt_get_source(u8 *addr, struct rtable *rt)
1796 {
1797         __be32 src;
1798         struct fib_result res;
1799
1800         if (rt->fl.iif == 0)
1801                 src = rt->rt_src;
1802         else if (fib_lookup(dev_net(rt->u.dst.dev), &rt->fl, &res) == 0) {
1803                 src = FIB_RES_PREFSRC(res);
1804                 fib_res_put(&res);
1805         } else
1806                 src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway,
1807                                         RT_SCOPE_UNIVERSE);
1808         memcpy(addr, &src, 4);
1809 }
1810
1811 #ifdef CONFIG_NET_CLS_ROUTE
1812 static void set_class_tag(struct rtable *rt, u32 tag)
1813 {
1814         if (!(rt->u.dst.tclassid & 0xFFFF))
1815                 rt->u.dst.tclassid |= tag & 0xFFFF;
1816         if (!(rt->u.dst.tclassid & 0xFFFF0000))
1817                 rt->u.dst.tclassid |= tag & 0xFFFF0000;
1818 }
1819 #endif
1820
1821 static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1822 {
1823         struct fib_info *fi = res->fi;
1824
1825         if (fi) {
1826                 if (FIB_RES_GW(*res) &&
1827                     FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1828                         rt->rt_gateway = FIB_RES_GW(*res);
1829                 memcpy(rt->u.dst.metrics, fi->fib_metrics,
1830                        sizeof(rt->u.dst.metrics));
1831                 if (fi->fib_mtu == 0) {
1832                         rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
1833                         if (dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
1834                             rt->rt_gateway != rt->rt_dst &&
1835                             rt->u.dst.dev->mtu > 576)
1836                                 rt->u.dst.metrics[RTAX_MTU-1] = 576;
1837                 }
1838 #ifdef CONFIG_NET_CLS_ROUTE
1839                 rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
1840 #endif
1841         } else
1842                 rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu;
1843
1844         if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0)
1845                 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
1846         if (dst_mtu(&rt->u.dst) > IP_MAX_MTU)
1847                 rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
1848         if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0)
1849                 rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40,
1850                                        ip_rt_min_advmss);
1851         if (dst_metric(&rt->u.dst, RTAX_ADVMSS) > 65535 - 40)
1852                 rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
1853
1854 #ifdef CONFIG_NET_CLS_ROUTE
1855 #ifdef CONFIG_IP_MULTIPLE_TABLES
1856         set_class_tag(rt, fib_rules_tclass(res));
1857 #endif
1858         set_class_tag(rt, itag);
1859 #endif
1860         rt->rt_type = res->type;
1861 }
1862
1863 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1864                                 u8 tos, struct net_device *dev, int our)
1865 {
1866         unsigned hash;
1867         struct rtable *rth;
1868         __be32 spec_dst;
1869         struct in_device *in_dev = in_dev_get(dev);
1870         u32 itag = 0;
1871
1872         /* Primary sanity checks. */
1873
1874         if (in_dev == NULL)
1875                 return -EINVAL;
1876
1877         if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1878             ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
1879                 goto e_inval;
1880
1881         if (ipv4_is_zeronet(saddr)) {
1882                 if (!ipv4_is_local_multicast(daddr))
1883                         goto e_inval;
1884                 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1885         } else if (fib_validate_source(saddr, 0, tos, 0,
1886                                         dev, &spec_dst, &itag, 0) < 0)
1887                 goto e_inval;
1888
1889         rth = dst_alloc(&ipv4_dst_ops);
1890         if (!rth)
1891                 goto e_nobufs;
1892
1893         rth->u.dst.output= ip_rt_bug;
1894
1895         atomic_set(&rth->u.dst.__refcnt, 1);
1896         rth->u.dst.flags= DST_HOST;
1897         if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1898                 rth->u.dst.flags |= DST_NOPOLICY;
1899         rth->fl.fl4_dst = daddr;
1900         rth->rt_dst     = daddr;
1901         rth->fl.fl4_tos = tos;
1902         rth->fl.mark    = skb->mark;
1903         rth->fl.fl4_src = saddr;
1904         rth->rt_src     = saddr;
1905 #ifdef CONFIG_NET_CLS_ROUTE
1906         rth->u.dst.tclassid = itag;
1907 #endif
1908         rth->rt_iif     =
1909         rth->fl.iif     = dev->ifindex;
1910         rth->u.dst.dev  = init_net.loopback_dev;
1911         dev_hold(rth->u.dst.dev);
1912         rth->idev       = in_dev_get(rth->u.dst.dev);
1913         rth->fl.oif     = 0;
1914         rth->rt_gateway = daddr;
1915         rth->rt_spec_dst= spec_dst;
1916         rth->rt_genid   = rt_genid(dev_net(dev));
1917         rth->rt_flags   = RTCF_MULTICAST;
1918         rth->rt_type    = RTN_MULTICAST;
1919         if (our) {
1920                 rth->u.dst.input= ip_local_deliver;
1921                 rth->rt_flags |= RTCF_LOCAL;
1922         }
1923
1924 #ifdef CONFIG_IP_MROUTE
1925         if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1926                 rth->u.dst.input = ip_mr_input;
1927 #endif
1928         RT_CACHE_STAT_INC(in_slow_mc);
1929
1930         in_dev_put(in_dev);
1931         hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1932         return rt_intern_hash(hash, rth, NULL, skb);
1933
1934 e_nobufs:
1935         in_dev_put(in_dev);
1936         return -ENOBUFS;
1937
1938 e_inval:
1939         in_dev_put(in_dev);
1940         return -EINVAL;
1941 }
1942
1943
1944 static void ip_handle_martian_source(struct net_device *dev,
1945                                      struct in_device *in_dev,
1946                                      struct sk_buff *skb,
1947                                      __be32 daddr,
1948                                      __be32 saddr)
1949 {
1950         RT_CACHE_STAT_INC(in_martian_src);
1951 #ifdef CONFIG_IP_ROUTE_VERBOSE
1952         if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1953                 /*
1954                  *      RFC1812 recommendation, if source is martian,
1955                  *      the only hint is MAC header.
1956                  */
1957                 printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n",
1958                         &daddr, &saddr, dev->name);
1959                 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1960                         int i;
1961                         const unsigned char *p = skb_mac_header(skb);
1962                         printk(KERN_WARNING "ll header: ");
1963                         for (i = 0; i < dev->hard_header_len; i++, p++) {
1964                                 printk("%02x", *p);
1965                                 if (i < (dev->hard_header_len - 1))
1966                                         printk(":");
1967                         }
1968                         printk("\n");
1969                 }
1970         }
1971 #endif
1972 }
1973
1974 static int __mkroute_input(struct sk_buff *skb,
1975                            struct fib_result *res,
1976                            struct in_device *in_dev,
1977                            __be32 daddr, __be32 saddr, u32 tos,
1978                            struct rtable **result)
1979 {
1980
1981         struct rtable *rth;
1982         int err;
1983         struct in_device *out_dev;
1984         unsigned flags = 0;
1985         __be32 spec_dst;
1986         u32 itag;
1987
1988         /* get a working reference to the output device */
1989         out_dev = in_dev_get(FIB_RES_DEV(*res));
1990         if (out_dev == NULL) {
1991                 if (net_ratelimit())
1992                         printk(KERN_CRIT "Bug in ip_route_input" \
1993                                "_slow(). Please, report\n");
1994                 return -EINVAL;
1995         }
1996
1997
1998         err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
1999                                   in_dev->dev, &spec_dst, &itag, skb->mark);
2000         if (err < 0) {
2001                 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
2002                                          saddr);
2003
2004                 err = -EINVAL;
2005                 goto cleanup;
2006         }
2007
2008         if (err)
2009                 flags |= RTCF_DIRECTSRC;
2010
2011         if (out_dev == in_dev && err &&
2012             (IN_DEV_SHARED_MEDIA(out_dev) ||
2013              inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
2014                 flags |= RTCF_DOREDIRECT;
2015
2016         if (skb->protocol != htons(ETH_P_IP)) {
2017                 /* Not IP (i.e. ARP). Do not create route, if it is
2018                  * invalid for proxy arp. DNAT routes are always valid.
2019                  *
2020                  * Proxy arp feature have been extended to allow, ARP
2021                  * replies back to the same interface, to support
2022                  * Private VLAN switch technologies. See arp.c.
2023                  */
2024                 if (out_dev == in_dev &&
2025                     IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
2026                         err = -EINVAL;
2027                         goto cleanup;
2028                 }
2029         }
2030
2031
2032         rth = dst_alloc(&ipv4_dst_ops);
2033         if (!rth) {
2034                 err = -ENOBUFS;
2035                 goto cleanup;
2036         }
2037
2038         atomic_set(&rth->u.dst.__refcnt, 1);
2039         rth->u.dst.flags= DST_HOST;
2040         if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2041                 rth->u.dst.flags |= DST_NOPOLICY;
2042         if (IN_DEV_CONF_GET(out_dev, NOXFRM))
2043                 rth->u.dst.flags |= DST_NOXFRM;
2044         rth->fl.fl4_dst = daddr;
2045         rth->rt_dst     = daddr;
2046         rth->fl.fl4_tos = tos;
2047         rth->fl.mark    = skb->mark;
2048         rth->fl.fl4_src = saddr;
2049         rth->rt_src     = saddr;
2050         rth->rt_gateway = daddr;
2051         rth->rt_iif     =
2052                 rth->fl.iif     = in_dev->dev->ifindex;
2053         rth->u.dst.dev  = (out_dev)->dev;
2054         dev_hold(rth->u.dst.dev);
2055         rth->idev       = in_dev_get(rth->u.dst.dev);
2056         rth->fl.oif     = 0;
2057         rth->rt_spec_dst= spec_dst;
2058
2059         rth->u.dst.input = ip_forward;
2060         rth->u.dst.output = ip_output;
2061         rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev));
2062
2063         rt_set_nexthop(rth, res, itag);
2064
2065         rth->rt_flags = flags;
2066
2067         *result = rth;
2068         err = 0;
2069  cleanup:
2070         /* release the working reference to the output device */
2071         in_dev_put(out_dev);
2072         return err;
2073 }
2074
2075 static int ip_mkroute_input(struct sk_buff *skb,
2076                             struct fib_result *res,
2077                             const struct flowi *fl,
2078                             struct in_device *in_dev,
2079                             __be32 daddr, __be32 saddr, u32 tos)
2080 {
2081         struct rtable* rth = NULL;
2082         int err;
2083         unsigned hash;
2084
2085 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2086         if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
2087                 fib_select_multipath(fl, res);
2088 #endif
2089
2090         /* create a routing cache entry */
2091         err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2092         if (err)
2093                 return err;
2094
2095         /* put it into the cache */
2096         hash = rt_hash(daddr, saddr, fl->iif,
2097                        rt_genid(dev_net(rth->u.dst.dev)));
2098         return rt_intern_hash(hash, rth, NULL, skb);
2099 }
2100
2101 /*
2102  *      NOTE. We drop all the packets that has local source
2103  *      addresses, because every properly looped back packet
2104  *      must have correct destination already attached by output routine.
2105  *
2106  *      Such approach solves two big problems:
2107  *      1. Not simplex devices are handled properly.
2108  *      2. IP spoofing attempts are filtered with 100% of guarantee.
2109  */
2110
2111 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2112                                u8 tos, struct net_device *dev)
2113 {
2114         struct fib_result res;
2115         struct in_device *in_dev = in_dev_get(dev);
2116         struct flowi fl = { .nl_u = { .ip4_u =
2117                                       { .daddr = daddr,
2118                                         .saddr = saddr,
2119                                         .tos = tos,
2120                                         .scope = RT_SCOPE_UNIVERSE,
2121                                       } },
2122                             .mark = skb->mark,
2123                             .iif = dev->ifindex };
2124         unsigned        flags = 0;
2125         u32             itag = 0;
2126         struct rtable * rth;
2127         unsigned        hash;
2128         __be32          spec_dst;
2129         int             err = -EINVAL;
2130         int             free_res = 0;
2131         struct net    * net = dev_net(dev);
2132
2133         /* IP on this device is disabled. */
2134
2135         if (!in_dev)
2136                 goto out;
2137
2138         /* Check for the most weird martians, which can be not detected
2139            by fib_lookup.
2140          */
2141
2142         if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
2143             ipv4_is_loopback(saddr))
2144                 goto martian_source;
2145
2146         if (daddr == htonl(0xFFFFFFFF) || (saddr == 0 && daddr == 0))
2147                 goto brd_input;
2148
2149         /* Accept zero addresses only to limited broadcast;
2150          * I even do not know to fix it or not. Waiting for complains :-)
2151          */
2152         if (ipv4_is_zeronet(saddr))
2153                 goto martian_source;
2154
2155         if (ipv4_is_lbcast(daddr) || ipv4_is_zeronet(daddr) ||
2156             ipv4_is_loopback(daddr))
2157                 goto martian_destination;
2158
2159         /*
2160          *      Now we are ready to route packet.
2161          */
2162         if ((err = fib_lookup(net, &fl, &res)) != 0) {
2163                 if (!IN_DEV_FORWARD(in_dev))
2164                         goto e_hostunreach;
2165                 goto no_route;
2166         }
2167         free_res = 1;
2168
2169         RT_CACHE_STAT_INC(in_slow_tot);
2170
2171         if (res.type == RTN_BROADCAST)
2172                 goto brd_input;
2173
2174         if (res.type == RTN_LOCAL) {
2175                 int result;
2176                 result = fib_validate_source(saddr, daddr, tos,
2177                                              net->loopback_dev->ifindex,
2178                                              dev, &spec_dst, &itag, skb->mark);
2179                 if (result < 0)
2180                         goto martian_source;
2181                 if (result)
2182                         flags |= RTCF_DIRECTSRC;
2183                 spec_dst = daddr;
2184                 goto local_input;
2185         }
2186
2187         if (!IN_DEV_FORWARD(in_dev))
2188                 goto e_hostunreach;
2189         if (res.type != RTN_UNICAST)
2190                 goto martian_destination;
2191
2192         err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
2193 done:
2194         in_dev_put(in_dev);
2195         if (free_res)
2196                 fib_res_put(&res);
2197 out:    return err;
2198
2199 brd_input:
2200         if (skb->protocol != htons(ETH_P_IP))
2201                 goto e_inval;
2202
2203         if (ipv4_is_zeronet(saddr))
2204                 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2205         else {
2206                 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
2207                                           &itag, skb->mark);
2208                 if (err < 0)
2209                         goto martian_source;
2210                 if (err)
2211                         flags |= RTCF_DIRECTSRC;
2212         }
2213         flags |= RTCF_BROADCAST;
2214         res.type = RTN_BROADCAST;
2215         RT_CACHE_STAT_INC(in_brd);
2216
2217 local_input:
2218         rth = dst_alloc(&ipv4_dst_ops);
2219         if (!rth)
2220                 goto e_nobufs;
2221
2222         rth->u.dst.output= ip_rt_bug;
2223         rth->rt_genid = rt_genid(net);
2224
2225         atomic_set(&rth->u.dst.__refcnt, 1);
2226         rth->u.dst.flags= DST_HOST;
2227         if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2228                 rth->u.dst.flags |= DST_NOPOLICY;
2229         rth->fl.fl4_dst = daddr;
2230         rth->rt_dst     = daddr;
2231         rth->fl.fl4_tos = tos;
2232         rth->fl.mark    = skb->mark;
2233         rth->fl.fl4_src = saddr;
2234         rth->rt_src     = saddr;
2235 #ifdef CONFIG_NET_CLS_ROUTE
2236         rth->u.dst.tclassid = itag;
2237 #endif
2238         rth->rt_iif     =
2239         rth->fl.iif     = dev->ifindex;
2240         rth->u.dst.dev  = net->loopback_dev;
2241         dev_hold(rth->u.dst.dev);
2242         rth->idev       = in_dev_get(rth->u.dst.dev);
2243         rth->rt_gateway = daddr;
2244         rth->rt_spec_dst= spec_dst;
2245         rth->u.dst.input= ip_local_deliver;
2246         rth->rt_flags   = flags|RTCF_LOCAL;
2247         if (res.type == RTN_UNREACHABLE) {
2248                 rth->u.dst.input= ip_error;
2249                 rth->u.dst.error= -err;
2250                 rth->rt_flags   &= ~RTCF_LOCAL;
2251         }
2252         rth->rt_type    = res.type;
2253         hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
2254         err = rt_intern_hash(hash, rth, NULL, skb);
2255         goto done;
2256
2257 no_route:
2258         RT_CACHE_STAT_INC(in_no_route);
2259         spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2260         res.type = RTN_UNREACHABLE;
2261         if (err == -ESRCH)
2262                 err = -ENETUNREACH;
2263         goto local_input;
2264
2265         /*
2266          *      Do not cache martian addresses: they should be logged (RFC1812)
2267          */
2268 martian_destination:
2269         RT_CACHE_STAT_INC(in_martian_dst);
2270 #ifdef CONFIG_IP_ROUTE_VERBOSE
2271         if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
2272                 printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n",
2273                         &daddr, &saddr, dev->name);
2274 #endif
2275
2276 e_hostunreach:
2277         err = -EHOSTUNREACH;
2278         goto done;
2279
2280 e_inval:
2281         err = -EINVAL;
2282         goto done;
2283
2284 e_nobufs:
2285         err = -ENOBUFS;
2286         goto done;
2287
2288 martian_source:
2289         ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2290         goto e_inval;
2291 }
2292
2293 int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2294                    u8 tos, struct net_device *dev)
2295 {
2296         struct rtable * rth;
2297         unsigned        hash;
2298         int iif = dev->ifindex;
2299         struct net *net;
2300
2301         net = dev_net(dev);
2302
2303         if (!rt_caching(net))
2304                 goto skip_cache;
2305
2306         tos &= IPTOS_RT_MASK;
2307         hash = rt_hash(daddr, saddr, iif, rt_genid(net));
2308
2309         rcu_read_lock();
2310         for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2311              rth = rcu_dereference(rth->u.dst.rt_next)) {
2312                 if (((rth->fl.fl4_dst ^ daddr) |
2313                      (rth->fl.fl4_src ^ saddr) |
2314                      (rth->fl.iif ^ iif) |
2315                      rth->fl.oif |
2316                      (rth->fl.fl4_tos ^ tos)) == 0 &&
2317                     rth->fl.mark == skb->mark &&
2318                     net_eq(dev_net(rth->u.dst.dev), net) &&
2319                     !rt_is_expired(rth)) {
2320                         dst_use(&rth->u.dst, jiffies);
2321                         RT_CACHE_STAT_INC(in_hit);
2322                         rcu_read_unlock();
2323                         skb_dst_set(skb, &rth->u.dst);
2324                         return 0;
2325                 }
2326                 RT_CACHE_STAT_INC(in_hlist_search);
2327         }
2328         rcu_read_unlock();
2329
2330 skip_cache:
2331         /* Multicast recognition logic is moved from route cache to here.
2332            The problem was that too many Ethernet cards have broken/missing
2333            hardware multicast filters :-( As result the host on multicasting
2334            network acquires a lot of useless route cache entries, sort of
2335            SDR messages from all the world. Now we try to get rid of them.
2336            Really, provided software IP multicast filter is organized
2337            reasonably (at least, hashed), it does not result in a slowdown
2338            comparing with route cache reject entries.
2339            Note, that multicast routers are not affected, because
2340            route cache entry is created eventually.
2341          */
2342         if (ipv4_is_multicast(daddr)) {
2343                 struct in_device *in_dev;
2344
2345                 rcu_read_lock();
2346                 if ((in_dev = __in_dev_get_rcu(dev)) != NULL) {
2347                         int our = ip_check_mc(in_dev, daddr, saddr,
2348                                 ip_hdr(skb)->protocol);
2349                         if (our
2350 #ifdef CONFIG_IP_MROUTE
2351                                 ||
2352                             (!ipv4_is_local_multicast(daddr) &&
2353                              IN_DEV_MFORWARD(in_dev))
2354 #endif
2355                            ) {
2356                                 rcu_read_unlock();
2357                                 return ip_route_input_mc(skb, daddr, saddr,
2358                                                          tos, dev, our);
2359                         }
2360                 }
2361                 rcu_read_unlock();
2362                 return -EINVAL;
2363         }
2364         return ip_route_input_slow(skb, daddr, saddr, tos, dev);
2365 }
2366
2367 static int __mkroute_output(struct rtable **result,
2368                             struct fib_result *res,
2369                             const struct flowi *fl,
2370                             const struct flowi *oldflp,
2371                             struct net_device *dev_out,
2372                             unsigned flags)
2373 {
2374         struct rtable *rth;
2375         struct in_device *in_dev;
2376         u32 tos = RT_FL_TOS(oldflp);
2377         int err = 0;
2378
2379         if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
2380                 return -EINVAL;
2381
2382         if (fl->fl4_dst == htonl(0xFFFFFFFF))
2383                 res->type = RTN_BROADCAST;
2384         else if (ipv4_is_multicast(fl->fl4_dst))
2385                 res->type = RTN_MULTICAST;
2386         else if (ipv4_is_lbcast(fl->fl4_dst) || ipv4_is_zeronet(fl->fl4_dst))
2387                 return -EINVAL;
2388
2389         if (dev_out->flags & IFF_LOOPBACK)
2390                 flags |= RTCF_LOCAL;
2391
2392         /* get work reference to inet device */
2393         in_dev = in_dev_get(dev_out);
2394         if (!in_dev)
2395                 return -EINVAL;
2396
2397         if (res->type == RTN_BROADCAST) {
2398                 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2399                 if (res->fi) {
2400                         fib_info_put(res->fi);
2401                         res->fi = NULL;
2402                 }
2403         } else if (res->type == RTN_MULTICAST) {
2404                 flags |= RTCF_MULTICAST|RTCF_LOCAL;
2405                 if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
2406                                  oldflp->proto))
2407                         flags &= ~RTCF_LOCAL;
2408                 /* If multicast route do not exist use
2409                    default one, but do not gateway in this case.
2410                    Yes, it is hack.
2411                  */
2412                 if (res->fi && res->prefixlen < 4) {
2413                         fib_info_put(res->fi);
2414                         res->fi = NULL;
2415                 }
2416         }
2417
2418
2419         rth = dst_alloc(&ipv4_dst_ops);
2420         if (!rth) {
2421                 err = -ENOBUFS;
2422                 goto cleanup;
2423         }
2424
2425         atomic_set(&rth->u.dst.__refcnt, 1);
2426         rth->u.dst.flags= DST_HOST;
2427         if (IN_DEV_CONF_GET(in_dev, NOXFRM))
2428                 rth->u.dst.flags |= DST_NOXFRM;
2429         if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2430                 rth->u.dst.flags |= DST_NOPOLICY;
2431
2432         rth->fl.fl4_dst = oldflp->fl4_dst;
2433         rth->fl.fl4_tos = tos;
2434         rth->fl.fl4_src = oldflp->fl4_src;
2435         rth->fl.oif     = oldflp->oif;
2436         rth->fl.mark    = oldflp->mark;
2437         rth->rt_dst     = fl->fl4_dst;
2438         rth->rt_src     = fl->fl4_src;
2439         rth->rt_iif     = oldflp->oif ? : dev_out->ifindex;
2440         /* get references to the devices that are to be hold by the routing
2441            cache entry */
2442         rth->u.dst.dev  = dev_out;
2443         dev_hold(dev_out);
2444         rth->idev       = in_dev_get(dev_out);
2445         rth->rt_gateway = fl->fl4_dst;
2446         rth->rt_spec_dst= fl->fl4_src;
2447
2448         rth->u.dst.output=ip_output;
2449         rth->rt_genid = rt_genid(dev_net(dev_out));
2450
2451         RT_CACHE_STAT_INC(out_slow_tot);
2452
2453         if (flags & RTCF_LOCAL) {
2454                 rth->u.dst.input = ip_local_deliver;
2455                 rth->rt_spec_dst = fl->fl4_dst;
2456         }
2457         if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2458                 rth->rt_spec_dst = fl->fl4_src;
2459                 if (flags & RTCF_LOCAL &&
2460                     !(dev_out->flags & IFF_LOOPBACK)) {
2461                         rth->u.dst.output = ip_mc_output;
2462                         RT_CACHE_STAT_INC(out_slow_mc);
2463                 }
2464 #ifdef CONFIG_IP_MROUTE
2465                 if (res->type == RTN_MULTICAST) {
2466                         if (IN_DEV_MFORWARD(in_dev) &&
2467                             !ipv4_is_local_multicast(oldflp->fl4_dst)) {
2468                                 rth->u.dst.input = ip_mr_input;
2469                                 rth->u.dst.output = ip_mc_output;
2470                         }
2471                 }
2472 #endif
2473         }
2474
2475         rt_set_nexthop(rth, res, 0);
2476
2477         rth->rt_flags = flags;
2478
2479         *result = rth;
2480  cleanup:
2481         /* release work reference to inet device */
2482         in_dev_put(in_dev);
2483
2484         return err;
2485 }
2486
2487 static int ip_mkroute_output(struct rtable **rp,
2488                              struct fib_result *res,
2489                              const struct flowi *fl,
2490                              const struct flowi *oldflp,
2491                              struct net_device *dev_out,
2492                              unsigned flags)
2493 {
2494         struct rtable *rth = NULL;
2495         int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
2496         unsigned hash;
2497         if (err == 0) {
2498                 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
2499                                rt_genid(dev_net(dev_out)));
2500                 err = rt_intern_hash(hash, rth, rp, NULL);
2501         }
2502
2503         return err;
2504 }
2505
2506 /*
2507  * Major route resolver routine.
2508  */
2509
2510 static int ip_route_output_slow(struct net *net, struct rtable **rp,
2511                                 const struct flowi *oldflp)
2512 {
2513         u32 tos = RT_FL_TOS(oldflp);
2514         struct flowi fl = { .nl_u = { .ip4_u =
2515                                       { .daddr = oldflp->fl4_dst,
2516                                         .saddr = oldflp->fl4_src,
2517                                         .tos = tos & IPTOS_RT_MASK,
2518                                         .scope = ((tos & RTO_ONLINK) ?
2519                                                   RT_SCOPE_LINK :
2520                                                   RT_SCOPE_UNIVERSE),
2521                                       } },
2522                             .mark = oldflp->mark,
2523                             .iif = net->loopback_dev->ifindex,
2524                             .oif = oldflp->oif };
2525         struct fib_result res;
2526         unsigned flags = 0;
2527         struct net_device *dev_out = NULL;
2528         int free_res = 0;
2529         int err;
2530
2531
2532         res.fi          = NULL;
2533 #ifdef CONFIG_IP_MULTIPLE_TABLES
2534         res.r           = NULL;
2535 #endif
2536
2537         if (oldflp->fl4_src) {
2538                 err = -EINVAL;
2539                 if (ipv4_is_multicast(oldflp->fl4_src) ||
2540                     ipv4_is_lbcast(oldflp->fl4_src) ||
2541                     ipv4_is_zeronet(oldflp->fl4_src))
2542                         goto out;
2543
2544                 /* I removed check for oif == dev_out->oif here.
2545                    It was wrong for two reasons:
2546                    1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2547                       is assigned to multiple interfaces.
2548                    2. Moreover, we are allowed to send packets with saddr
2549                       of another iface. --ANK
2550                  */
2551
2552                 if (oldflp->oif == 0 &&
2553                     (ipv4_is_multicast(oldflp->fl4_dst) ||
2554                      oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
2555                         /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2556                         dev_out = ip_dev_find(net, oldflp->fl4_src);
2557                         if (dev_out == NULL)
2558                                 goto out;
2559
2560                         /* Special hack: user can direct multicasts
2561                            and limited broadcast via necessary interface
2562                            without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2563                            This hack is not just for fun, it allows
2564                            vic,vat and friends to work.
2565                            They bind socket to loopback, set ttl to zero
2566                            and expect that it will work.
2567                            From the viewpoint of routing cache they are broken,
2568                            because we are not allowed to build multicast path
2569                            with loopback source addr (look, routing cache
2570                            cannot know, that ttl is zero, so that packet
2571                            will not leave this host and route is valid).
2572                            Luckily, this hack is good workaround.
2573                          */
2574
2575                         fl.oif = dev_out->ifindex;
2576                         goto make_route;
2577                 }
2578
2579                 if (!(oldflp->flags & FLOWI_FLAG_ANYSRC)) {
2580                         /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2581                         dev_out = ip_dev_find(net, oldflp->fl4_src);
2582                         if (dev_out == NULL)
2583                                 goto out;
2584                         dev_put(dev_out);
2585                         dev_out = NULL;
2586                 }
2587         }
2588
2589
2590         if (oldflp->oif) {
2591                 dev_out = dev_get_by_index(net, oldflp->oif);
2592                 err = -ENODEV;
2593                 if (dev_out == NULL)
2594                         goto out;
2595
2596                 /* RACE: Check return value of inet_select_addr instead. */
2597                 if (__in_dev_get_rtnl(dev_out) == NULL) {
2598                         dev_put(dev_out);
2599                         goto out;       /* Wrong error code */
2600                 }
2601
2602                 if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
2603                     oldflp->fl4_dst == htonl(0xFFFFFFFF)) {
2604                         if (!fl.fl4_src)
2605                                 fl.fl4_src = inet_select_addr(dev_out, 0,
2606                                                               RT_SCOPE_LINK);
2607                         goto make_route;
2608                 }
2609                 if (!fl.fl4_src) {
2610                         if (ipv4_is_multicast(oldflp->fl4_dst))
2611                                 fl.fl4_src = inet_select_addr(dev_out, 0,
2612                                                               fl.fl4_scope);
2613                         else if (!oldflp->fl4_dst)
2614                                 fl.fl4_src = inet_select_addr(dev_out, 0,
2615                                                               RT_SCOPE_HOST);
2616                 }
2617         }
2618
2619         if (!fl.fl4_dst) {
2620                 fl.fl4_dst = fl.fl4_src;
2621                 if (!fl.fl4_dst)
2622                         fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
2623                 if (dev_out)
2624                         dev_put(dev_out);
2625                 dev_out = net->loopback_dev;
2626                 dev_hold(dev_out);
2627                 fl.oif = net->loopback_dev->ifindex;
2628                 res.type = RTN_LOCAL;
2629                 flags |= RTCF_LOCAL;
2630                 goto make_route;
2631         }
2632
2633         if (fib_lookup(net, &fl, &res)) {
2634                 res.fi = NULL;
2635                 if (oldflp->oif) {
2636                         /* Apparently, routing tables are wrong. Assume,
2637                            that the destination is on link.
2638
2639                            WHY? DW.
2640                            Because we are allowed to send to iface
2641                            even if it has NO routes and NO assigned
2642                            addresses. When oif is specified, routing
2643                            tables are looked up with only one purpose:
2644                            to catch if destination is gatewayed, rather than
2645                            direct. Moreover, if MSG_DONTROUTE is set,
2646                            we send packet, ignoring both routing tables
2647                            and ifaddr state. --ANK
2648
2649
2650                            We could make it even if oif is unknown,
2651                            likely IPv6, but we do not.
2652                          */
2653
2654                         if (fl.fl4_src == 0)
2655                                 fl.fl4_src = inet_select_addr(dev_out, 0,
2656                                                               RT_SCOPE_LINK);
2657                         res.type = RTN_UNICAST;
2658                         goto make_route;
2659                 }
2660                 if (dev_out)
2661                         dev_put(dev_out);
2662                 err = -ENETUNREACH;
2663                 goto out;
2664         }
2665         free_res = 1;
2666
2667         if (res.type == RTN_LOCAL) {
2668                 if (!fl.fl4_src)
2669                         fl.fl4_src = fl.fl4_dst;
2670                 if (dev_out)
2671                         dev_put(dev_out);
2672                 dev_out = net->loopback_dev;
2673                 dev_hold(dev_out);
2674                 fl.oif = dev_out->ifindex;
2675                 if (res.fi)
2676                         fib_info_put(res.fi);
2677                 res.fi = NULL;
2678                 flags |= RTCF_LOCAL;
2679                 goto make_route;
2680         }
2681
2682 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2683         if (res.fi->fib_nhs > 1 && fl.oif == 0)
2684                 fib_select_multipath(&fl, &res);
2685         else
2686 #endif
2687         if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
2688                 fib_select_default(net, &fl, &res);
2689
2690         if (!fl.fl4_src)
2691                 fl.fl4_src = FIB_RES_PREFSRC(res);
2692
2693         if (dev_out)
2694                 dev_put(dev_out);
2695         dev_out = FIB_RES_DEV(res);
2696         dev_hold(dev_out);
2697         fl.oif = dev_out->ifindex;
2698
2699
2700 make_route:
2701         err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
2702
2703
2704         if (free_res)
2705                 fib_res_put(&res);
2706         if (dev_out)
2707                 dev_put(dev_out);
2708 out:    return err;
2709 }
2710
2711 int __ip_route_output_key(struct net *net, struct rtable **rp,
2712                           const struct flowi *flp)
2713 {
2714         unsigned hash;
2715         struct rtable *rth;
2716
2717         if (!rt_caching(net))
2718                 goto slow_output;
2719
2720         hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
2721
2722         rcu_read_lock_bh();
2723         for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
2724                 rth = rcu_dereference_bh(rth->u.dst.rt_next)) {
2725                 if (rth->fl.fl4_dst == flp->fl4_dst &&
2726                     rth->fl.fl4_src == flp->fl4_src &&
2727                     rth->fl.iif == 0 &&
2728                     rth->fl.oif == flp->oif &&
2729                     rth->fl.mark == flp->mark &&
2730                     !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2731                             (IPTOS_RT_MASK | RTO_ONLINK)) &&
2732                     net_eq(dev_net(rth->u.dst.dev), net) &&
2733                     !rt_is_expired(rth)) {
2734                         dst_use(&rth->u.dst, jiffies);
2735                         RT_CACHE_STAT_INC(out_hit);
2736                         rcu_read_unlock_bh();
2737                         *rp = rth;
2738                         return 0;
2739                 }
2740                 RT_CACHE_STAT_INC(out_hlist_search);
2741         }
2742         rcu_read_unlock_bh();
2743
2744 slow_output:
2745         return ip_route_output_slow(net, rp, flp);
2746 }
2747
2748 EXPORT_SYMBOL_GPL(__ip_route_output_key);
2749
2750 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2751 {
2752 }
2753
2754 static struct dst_ops ipv4_dst_blackhole_ops = {
2755         .family                 =       AF_INET,
2756         .protocol               =       cpu_to_be16(ETH_P_IP),
2757         .destroy                =       ipv4_dst_destroy,
2758         .check                  =       ipv4_dst_check,
2759         .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
2760         .entries                =       ATOMIC_INIT(0),
2761 };
2762
2763
2764 static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi *flp)
2765 {
2766         struct rtable *ort = *rp;
2767         struct rtable *rt = (struct rtable *)
2768                 dst_alloc(&ipv4_dst_blackhole_ops);
2769
2770         if (rt) {
2771                 struct dst_entry *new = &rt->u.dst;
2772
2773                 atomic_set(&new->__refcnt, 1);
2774                 new->__use = 1;
2775                 new->input = dst_discard;
2776                 new->output = dst_discard;
2777                 memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
2778
2779                 new->dev = ort->u.dst.dev;
2780                 if (new->dev)
2781                         dev_hold(new->dev);
2782
2783                 rt->fl = ort->fl;
2784
2785                 rt->idev = ort->idev;
2786                 if (rt->idev)
2787                         in_dev_hold(rt->idev);
2788                 rt->rt_genid = rt_genid(net);
2789                 rt->rt_flags = ort->rt_flags;
2790                 rt->rt_type = ort->rt_type;
2791                 rt->rt_dst = ort->rt_dst;
2792                 rt->rt_src = ort->rt_src;
2793                 rt->rt_iif = ort->rt_iif;
2794                 rt->rt_gateway = ort->rt_gateway;
2795                 rt->rt_spec_dst = ort->rt_spec_dst;
2796                 rt->peer = ort->peer;
2797                 if (rt->peer)
2798                         atomic_inc(&rt->peer->refcnt);
2799
2800                 dst_free(new);
2801         }
2802
2803         dst_release(&(*rp)->u.dst);
2804         *rp = rt;
2805         return (rt ? 0 : -ENOMEM);
2806 }
2807
2808 int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
2809                          struct sock *sk, int flags)
2810 {
2811         int err;
2812
2813         if ((err = __ip_route_output_key(net, rp, flp)) != 0)
2814                 return err;
2815
2816         if (flp->proto) {
2817                 if (!flp->fl4_src)
2818                         flp->fl4_src = (*rp)->rt_src;
2819                 if (!flp->fl4_dst)
2820                         flp->fl4_dst = (*rp)->rt_dst;
2821                 err = __xfrm_lookup(net, (struct dst_entry **)rp, flp, sk,
2822                                     flags ? XFRM_LOOKUP_WAIT : 0);
2823                 if (err == -EREMOTE)
2824                         err = ipv4_dst_blackhole(net, rp, flp);
2825
2826                 return err;
2827         }
2828
2829         return 0;
2830 }
2831
2832 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2833
2834 int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
2835 {
2836         return ip_route_output_flow(net, rp, flp, NULL, 0);
2837 }
2838
2839 static int rt_fill_info(struct net *net,
2840                         struct sk_buff *skb, u32 pid, u32 seq, int event,
2841                         int nowait, unsigned int flags)
2842 {
2843         struct rtable *rt = skb_rtable(skb);
2844         struct rtmsg *r;
2845         struct nlmsghdr *nlh;
2846         long expires;
2847         u32 id = 0, ts = 0, tsage = 0, error;
2848
2849         nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2850         if (nlh == NULL)
2851                 return -EMSGSIZE;
2852
2853         r = nlmsg_data(nlh);
2854         r->rtm_family    = AF_INET;
2855         r->rtm_dst_len  = 32;
2856         r->rtm_src_len  = 0;
2857         r->rtm_tos      = rt->fl.fl4_tos;
2858         r->rtm_table    = RT_TABLE_MAIN;
2859         NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
2860         r->rtm_type     = rt->rt_type;
2861         r->rtm_scope    = RT_SCOPE_UNIVERSE;
2862         r->rtm_protocol = RTPROT_UNSPEC;
2863         r->rtm_flags    = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2864         if (rt->rt_flags & RTCF_NOTIFY)
2865                 r->rtm_flags |= RTM_F_NOTIFY;
2866
2867         NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
2868
2869         if (rt->fl.fl4_src) {
2870                 r->rtm_src_len = 32;
2871                 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
2872         }
2873         if (rt->u.dst.dev)
2874                 NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex);
2875 #ifdef CONFIG_NET_CLS_ROUTE
2876         if (rt->u.dst.tclassid)
2877                 NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid);
2878 #endif
2879         if (rt->fl.iif)
2880                 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2881         else if (rt->rt_src != rt->fl.fl4_src)
2882                 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
2883
2884         if (rt->rt_dst != rt->rt_gateway)
2885                 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
2886
2887         if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
2888                 goto nla_put_failure;
2889
2890         error = rt->u.dst.error;
2891         expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
2892         if (rt->peer) {
2893                 id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
2894                 if (rt->peer->tcp_ts_stamp) {
2895                         ts = rt->peer->tcp_ts;
2896                         tsage = get_seconds() - rt->peer->tcp_ts_stamp;
2897                 }
2898         }
2899
2900         if (rt->fl.iif) {
2901 #ifdef CONFIG_IP_MROUTE
2902                 __be32 dst = rt->rt_dst;
2903
2904                 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2905                     IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2906                         int err = ipmr_get_route(net, skb, r, nowait);
2907                         if (err <= 0) {
2908                                 if (!nowait) {
2909                                         if (err == 0)
2910                                                 return 0;
2911                                         goto nla_put_failure;
2912                                 } else {
2913                                         if (err == -EMSGSIZE)
2914                                                 goto nla_put_failure;
2915                                         error = err;
2916                                 }
2917                         }
2918                 } else
2919 #endif
2920                         NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
2921         }
2922
2923         if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage,
2924                                expires, error) < 0)
2925                 goto nla_put_failure;
2926
2927         return nlmsg_end(skb, nlh);
2928
2929 nla_put_failure:
2930         nlmsg_cancel(skb, nlh);
2931         return -EMSGSIZE;
2932 }
2933
2934 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2935 {
2936         struct net *net = sock_net(in_skb->sk);
2937         struct rtmsg *rtm;
2938         struct nlattr *tb[RTA_MAX+1];
2939         struct rtable *rt = NULL;
2940         __be32 dst = 0;
2941         __be32 src = 0;
2942         u32 iif;
2943         int err;
2944         struct sk_buff *skb;
2945
2946         err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2947         if (err < 0)
2948                 goto errout;
2949
2950         rtm = nlmsg_data(nlh);
2951
2952         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2953         if (skb == NULL) {
2954                 err = -ENOBUFS;
2955                 goto errout;
2956         }
2957
2958         /* Reserve room for dummy headers, this skb can pass
2959            through good chunk of routing engine.
2960          */
2961         skb_reset_mac_header(skb);
2962         skb_reset_network_header(skb);
2963
2964         /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2965         ip_hdr(skb)->protocol = IPPROTO_ICMP;
2966         skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2967
2968         src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2969         dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2970         iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2971
2972         if (iif) {
2973                 struct net_device *dev;
2974
2975                 dev = __dev_get_by_index(net, iif);
2976                 if (dev == NULL) {
2977                         err = -ENODEV;
2978                         goto errout_free;
2979                 }
2980
2981                 skb->protocol   = htons(ETH_P_IP);
2982                 skb->dev        = dev;
2983                 local_bh_disable();
2984                 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2985                 local_bh_enable();
2986
2987                 rt = skb_rtable(skb);
2988                 if (err == 0 && rt->u.dst.error)
2989                         err = -rt->u.dst.error;
2990         } else {
2991                 struct flowi fl = {
2992                         .nl_u = {
2993                                 .ip4_u = {
2994                                         .daddr = dst,
2995                                         .saddr = src,
2996                                         .tos = rtm->rtm_tos,
2997                                 },
2998                         },
2999                         .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
3000                 };
3001                 err = ip_route_output_key(net, &rt, &fl);
3002         }
3003
3004         if (err)
3005                 goto errout_free;
3006
3007         skb_dst_set(skb, &rt->u.dst);
3008         if (rtm->rtm_flags & RTM_F_NOTIFY)
3009                 rt->rt_flags |= RTCF_NOTIFY;
3010
3011         err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
3012                            RTM_NEWROUTE, 0, 0);
3013         if (err <= 0)
3014                 goto errout_free;
3015
3016         err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
3017 errout:
3018         return err;
3019
3020 errout_free:
3021         kfree_skb(skb);
3022         goto errout;
3023 }
3024
3025 int ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb)
3026 {
3027         struct rtable *rt;
3028         int h, s_h;
3029         int idx, s_idx;
3030         struct net *net;
3031
3032         net = sock_net(skb->sk);
3033
3034         s_h = cb->args[0];
3035         if (s_h < 0)
3036                 s_h = 0;
3037         s_idx = idx = cb->args[1];
3038         for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
3039                 if (!rt_hash_table[h].chain)
3040                         continue;
3041                 rcu_read_lock_bh();
3042                 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
3043                      rt = rcu_dereference_bh(rt->u.dst.rt_next), idx++) {
3044                         if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx)
3045                                 continue;
3046                         if (rt_is_expired(rt))
3047                                 continue;
3048                         skb_dst_set(skb, dst_clone(&rt->u.dst));
3049                         if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
3050                                          cb->nlh->nlmsg_seq, RTM_NEWROUTE,
3051                                          1, NLM_F_MULTI) <= 0) {
3052                                 skb_dst_drop(skb);
3053                                 rcu_read_unlock_bh();
3054                                 goto done;
3055                         }
3056                         skb_dst_drop(skb);
3057                 }
3058                 rcu_read_unlock_bh();
3059         }
3060
3061 done:
3062         cb->args[0] = h;
3063         cb->args[1] = idx;
3064         return skb->len;
3065 }
3066
3067 void ip_rt_multicast_event(struct in_device *in_dev)
3068 {
3069         rt_cache_flush(dev_net(in_dev->dev), 0);
3070 }
3071
3072 #ifdef CONFIG_SYSCTL
3073 static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
3074                                         void __user *buffer,
3075                                         size_t *lenp, loff_t *ppos)
3076 {
3077         if (write) {
3078                 int flush_delay;
3079                 ctl_table ctl;
3080                 struct net *net;
3081
3082                 memcpy(&ctl, __ctl, sizeof(ctl));
3083                 ctl.data = &flush_delay;
3084                 proc_dointvec(&ctl, write, buffer, lenp, ppos);
3085
3086                 net = (struct net *)__ctl->extra1;
3087                 rt_cache_flush(net, flush_delay);
3088                 return 0;
3089         }
3090
3091         return -EINVAL;
3092 }
3093
3094 static void rt_secret_reschedule(int old)
3095 {
3096         struct net *net;
3097         int new = ip_rt_secret_interval;
3098         int diff = new - old;
3099
3100         if (!diff)
3101                 return;
3102
3103         rtnl_lock();
3104         for_each_net(net) {
3105                 int deleted = del_timer_sync(&net->ipv4.rt_secret_timer);
3106
3107                 if (!new)
3108                         continue;
3109
3110                 if (deleted) {
3111                         long time = net->ipv4.rt_secret_timer.expires - jiffies;
3112
3113                         if (time <= 0 || (time += diff) <= 0)
3114                                 time = 0;
3115
3116                         net->ipv4.rt_secret_timer.expires = time;
3117                 } else
3118                         net->ipv4.rt_secret_timer.expires = new;
3119
3120                 net->ipv4.rt_secret_timer.expires += jiffies;
3121                 add_timer(&net->ipv4.rt_secret_timer);
3122         }
3123         rtnl_unlock();
3124 }
3125
3126 static int ipv4_sysctl_rt_secret_interval(ctl_table *ctl, int write,
3127                                           void __user *buffer, size_t *lenp,
3128                                           loff_t *ppos)
3129 {
3130         int old = ip_rt_secret_interval;
3131         int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3132
3133         rt_secret_reschedule(old);
3134
3135         return ret;
3136 }
3137
3138 static ctl_table ipv4_route_table[] = {
3139         {
3140                 .procname       = "gc_thresh",
3141                 .data           = &ipv4_dst_ops.gc_thresh,
3142                 .maxlen         = sizeof(int),
3143                 .mode           = 0644,
3144                 .proc_handler   = proc_dointvec,
3145         },
3146         {
3147                 .procname       = "max_size",
3148                 .data           = &ip_rt_max_size,
3149                 .maxlen         = sizeof(int),
3150                 .mode           = 0644,
3151                 .proc_handler   = proc_dointvec,
3152         },
3153         {
3154                 /*  Deprecated. Use gc_min_interval_ms */
3155
3156                 .procname       = "gc_min_interval",
3157                 .data           = &ip_rt_gc_min_interval,
3158                 .maxlen         = sizeof(int),
3159                 .mode           = 0644,
3160                 .proc_handler   = proc_dointvec_jiffies,
3161         },
3162         {
3163                 .procname       = "gc_min_interval_ms",
3164                 .data           = &ip_rt_gc_min_interval,
3165                 .maxlen         = sizeof(int),
3166                 .mode           = 0644,
3167                 .proc_handler   = proc_dointvec_ms_jiffies,
3168         },
3169         {
3170                 .procname       = "gc_timeout",
3171                 .data           = &ip_rt_gc_timeout,
3172                 .maxlen         = sizeof(int),
3173                 .mode           = 0644,
3174                 .proc_handler   = proc_dointvec_jiffies,
3175         },
3176         {
3177                 .procname       = "gc_interval",
3178                 .data           = &ip_rt_gc_interval,
3179                 .maxlen         = sizeof(int),
3180                 .mode           = 0644,
3181                 .proc_handler   = proc_dointvec_jiffies,
3182         },
3183         {
3184                 .procname       = "redirect_load",
3185                 .data           = &ip_rt_redirect_load,
3186                 .maxlen         = sizeof(int),
3187                 .mode           = 0644,
3188                 .proc_handler   = proc_dointvec,
3189         },
3190         {
3191                 .procname       = "redirect_number",
3192                 .data           = &ip_rt_redirect_number,
3193                 .maxlen         = sizeof(int),
3194                 .mode           = 0644,
3195                 .proc_handler   = proc_dointvec,
3196         },
3197         {
3198                 .procname       = "redirect_silence",
3199                 .data           = &ip_rt_redirect_silence,
3200                 .maxlen         = sizeof(int),
3201                 .mode           = 0644,
3202                 .proc_handler   = proc_dointvec,
3203         },
3204         {
3205                 .procname       = "error_cost",
3206                 .data           = &ip_rt_error_cost,
3207                 .maxlen         = sizeof(int),
3208                 .mode           = 0644,
3209                 .proc_handler   = proc_dointvec,
3210         },
3211         {
3212                 .procname       = "error_burst",
3213                 .data           = &ip_rt_error_burst,
3214                 .maxlen         = sizeof(int),
3215                 .mode           = 0644,
3216                 .proc_handler   = proc_dointvec,
3217         },
3218         {
3219                 .procname       = "gc_elasticity",
3220                 .data           = &ip_rt_gc_elasticity,
3221                 .maxlen         = sizeof(int),
3222                 .mode           = 0644,
3223                 .proc_handler   = proc_dointvec,
3224         },
3225         {
3226                 .procname       = "mtu_expires",
3227                 .data           = &ip_rt_mtu_expires,
3228                 .maxlen         = sizeof(int),
3229                 .mode           = 0644,
3230                 .proc_handler   = proc_dointvec_jiffies,
3231         },
3232         {
3233                 .procname       = "min_pmtu",
3234                 .data           = &ip_rt_min_pmtu,
3235                 .maxlen         = sizeof(int),
3236                 .mode           = 0644,
3237                 .proc_handler   = proc_dointvec,
3238         },
3239         {
3240                 .procname       = "min_adv_mss",
3241                 .data           = &ip_rt_min_advmss,
3242                 .maxlen         = sizeof(int),
3243                 .mode           = 0644,
3244                 .proc_handler   = proc_dointvec,
3245         },
3246         {
3247                 .procname       = "secret_interval",
3248                 .data           = &ip_rt_secret_interval,
3249                 .maxlen         = sizeof(int),
3250                 .mode           = 0644,
3251                 .proc_handler   = ipv4_sysctl_rt_secret_interval,
3252         },
3253         { }
3254 };
3255
3256 static struct ctl_table empty[1];
3257
3258 static struct ctl_table ipv4_skeleton[] =
3259 {
3260         { .procname = "route", 
3261           .mode = 0555, .child = ipv4_route_table},
3262         { .procname = "neigh", 
3263           .mode = 0555, .child = empty},
3264         { }
3265 };
3266
3267 static __net_initdata struct ctl_path ipv4_path[] = {
3268         { .procname = "net", },
3269         { .procname = "ipv4", },
3270         { },
3271 };
3272
3273 static struct ctl_table ipv4_route_flush_table[] = {
3274         {
3275                 .procname       = "flush",
3276                 .maxlen         = sizeof(int),
3277                 .mode           = 0200,
3278                 .proc_handler   = ipv4_sysctl_rtcache_flush,
3279         },
3280         { },
3281 };
3282
3283 static __net_initdata struct ctl_path ipv4_route_path[] = {
3284         { .procname = "net", },
3285         { .procname = "ipv4", },
3286         { .procname = "route", },
3287         { },
3288 };
3289
3290 static __net_init int sysctl_route_net_init(struct net *net)
3291 {
3292         struct ctl_table *tbl;
3293
3294         tbl = ipv4_route_flush_table;
3295         if (!net_eq(net, &init_net)) {
3296                 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3297                 if (tbl == NULL)
3298                         goto err_dup;
3299         }
3300         tbl[0].extra1 = net;
3301
3302         net->ipv4.route_hdr =
3303                 register_net_sysctl_table(net, ipv4_route_path, tbl);
3304         if (net->ipv4.route_hdr == NULL)
3305                 goto err_reg;
3306         return 0;
3307
3308 err_reg:
3309         if (tbl != ipv4_route_flush_table)
3310                 kfree(tbl);
3311 err_dup:
3312         return -ENOMEM;
3313 }
3314
3315 static __net_exit void sysctl_route_net_exit(struct net *net)
3316 {
3317         struct ctl_table *tbl;
3318
3319         tbl = net->ipv4.route_hdr->ctl_table_arg;
3320         unregister_net_sysctl_table(net->ipv4.route_hdr);
3321         BUG_ON(tbl == ipv4_route_flush_table);
3322         kfree(tbl);
3323 }
3324
3325 static __net_initdata struct pernet_operations sysctl_route_ops = {
3326         .init = sysctl_route_net_init,
3327         .exit = sysctl_route_net_exit,
3328 };
3329 #endif
3330
3331
3332 static __net_init int rt_secret_timer_init(struct net *net)
3333 {
3334         atomic_set(&net->ipv4.rt_genid,
3335                         (int) ((num_physpages ^ (num_physpages>>8)) ^
3336                         (jiffies ^ (jiffies >> 7))));
3337
3338         net->ipv4.rt_secret_timer.function = rt_secret_rebuild;
3339         net->ipv4.rt_secret_timer.data = (unsigned long)net;
3340         init_timer_deferrable(&net->ipv4.rt_secret_timer);
3341
3342         if (ip_rt_secret_interval) {
3343                 net->ipv4.rt_secret_timer.expires =
3344                         jiffies + net_random() % ip_rt_secret_interval +
3345                         ip_rt_secret_interval;
3346                 add_timer(&net->ipv4.rt_secret_timer);
3347         }
3348         return 0;
3349 }
3350
3351 static __net_exit void rt_secret_timer_exit(struct net *net)
3352 {
3353         del_timer_sync(&net->ipv4.rt_secret_timer);
3354 }
3355
3356 static __net_initdata struct pernet_operations rt_secret_timer_ops = {
3357         .init = rt_secret_timer_init,
3358         .exit = rt_secret_timer_exit,
3359 };
3360
3361
3362 #ifdef CONFIG_NET_CLS_ROUTE
3363 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3364 #endif /* CONFIG_NET_CLS_ROUTE */
3365
3366 static __initdata unsigned long rhash_entries;
3367 static int __init set_rhash_entries(char *str)
3368 {
3369         if (!str)
3370                 return 0;
3371         rhash_entries = simple_strtoul(str, &str, 0);
3372         return 1;
3373 }
3374 __setup("rhash_entries=", set_rhash_entries);
3375
3376 int __init ip_rt_init(void)
3377 {
3378         int rc = 0;
3379
3380 #ifdef CONFIG_NET_CLS_ROUTE
3381         ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3382         if (!ip_rt_acct)
3383                 panic("IP: failed to allocate ip_rt_acct\n");
3384 #endif
3385
3386         ipv4_dst_ops.kmem_cachep =
3387                 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3388                                   SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3389
3390         ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3391
3392         rt_hash_table = (struct rt_hash_bucket *)
3393                 alloc_large_system_hash("IP route cache",
3394                                         sizeof(struct rt_hash_bucket),
3395                                         rhash_entries,
3396                                         (totalram_pages >= 128 * 1024) ?
3397                                         15 : 17,
3398                                         0,
3399                                         &rt_hash_log,
3400                                         &rt_hash_mask,
3401                                         rhash_entries ? 0 : 512 * 1024);
3402         memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3403         rt_hash_lock_init();
3404
3405         ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3406         ip_rt_max_size = (rt_hash_mask + 1) * 16;
3407
3408         devinet_init();
3409         ip_fib_init();
3410
3411         /* All the timers, started at system startup tend
3412            to synchronize. Perturb it a bit.
3413          */
3414         INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
3415         expires_ljiffies = jiffies;
3416         schedule_delayed_work(&expires_work,
3417                 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3418
3419         if (register_pernet_subsys(&rt_secret_timer_ops))
3420                 printk(KERN_ERR "Unable to setup rt_secret_timer\n");
3421
3422         if (ip_rt_proc_init())
3423                 printk(KERN_ERR "Unable to create route proc files\n");
3424 #ifdef CONFIG_XFRM
3425         xfrm_init();
3426         xfrm4_init(ip_rt_max_size);
3427 #endif
3428         rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
3429
3430 #ifdef CONFIG_SYSCTL
3431         register_pernet_subsys(&sysctl_route_ops);
3432 #endif
3433         return rc;
3434 }
3435
3436 #ifdef CONFIG_SYSCTL
3437 /*
3438  * We really need to sanitize the damn ipv4 init order, then all
3439  * this nonsense will go away.
3440  */
3441 void __init ip_static_sysctl_init(void)
3442 {
3443         register_sysctl_paths(ipv4_path, ipv4_skeleton);
3444 }
3445 #endif
3446
3447 EXPORT_SYMBOL(__ip_select_ident);
3448 EXPORT_SYMBOL(ip_route_input);
3449 EXPORT_SYMBOL(ip_route_output_key);