2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
65 #include <linux/module.h>
66 #include <asm/uaccess.h>
67 #include <asm/system.h>
68 #include <linux/bitops.h>
69 #include <linux/types.h>
70 #include <linux/kernel.h>
72 #include <linux/bootmem.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/workqueue.h>
83 #include <linux/skbuff.h>
84 #include <linux/inetdevice.h>
85 #include <linux/igmp.h>
86 #include <linux/pkt_sched.h>
87 #include <linux/mroute.h>
88 #include <linux/netfilter_ipv4.h>
89 #include <linux/random.h>
90 #include <linux/jhash.h>
91 #include <linux/rcupdate.h>
92 #include <linux/times.h>
94 #include <net/net_namespace.h>
95 #include <net/protocol.h>
97 #include <net/route.h>
98 #include <net/inetpeer.h>
100 #include <net/ip_fib.h>
103 #include <net/icmp.h>
104 #include <net/xfrm.h>
105 #include <net/netevent.h>
106 #include <net/rtnetlink.h>
108 #include <linux/sysctl.h>
111 #define RT_FL_TOS(oldflp) \
112 ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
114 #define IP_MAX_MTU 0xFFF0
116 #define RT_GC_TIMEOUT (300*HZ)
118 static int ip_rt_max_size;
119 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
120 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
121 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
122 static int ip_rt_redirect_number __read_mostly = 9;
123 static int ip_rt_redirect_load __read_mostly = HZ / 50;
124 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
125 static int ip_rt_error_cost __read_mostly = HZ;
126 static int ip_rt_error_burst __read_mostly = 5 * HZ;
127 static int ip_rt_gc_elasticity __read_mostly = 8;
128 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
129 static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
130 static int ip_rt_min_advmss __read_mostly = 256;
131 static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ;
132 static int rt_chain_length_max __read_mostly = 20;
134 static void rt_worker_func(struct work_struct *work);
135 static DECLARE_DELAYED_WORK(expires_work, rt_worker_func);
138 * Interface to generic destination cache.
141 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
142 static void ipv4_dst_destroy(struct dst_entry *dst);
143 static void ipv4_dst_ifdown(struct dst_entry *dst,
144 struct net_device *dev, int how);
145 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
146 static void ipv4_link_failure(struct sk_buff *skb);
147 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
148 static int rt_garbage_collect(struct dst_ops *ops);
149 static void rt_emergency_hash_rebuild(struct net *net);
152 static struct dst_ops ipv4_dst_ops = {
154 .protocol = __constant_htons(ETH_P_IP),
155 .gc = rt_garbage_collect,
156 .check = ipv4_dst_check,
157 .destroy = ipv4_dst_destroy,
158 .ifdown = ipv4_dst_ifdown,
159 .negative_advice = ipv4_negative_advice,
160 .link_failure = ipv4_link_failure,
161 .update_pmtu = ip_rt_update_pmtu,
162 .local_out = __ip_local_out,
163 .entry_size = sizeof(struct rtable),
164 .entries = ATOMIC_INIT(0),
167 #define ECN_OR_COST(class) TC_PRIO_##class
169 const __u8 ip_tos2prio[16] = {
173 ECN_OR_COST(BESTEFFORT),
179 ECN_OR_COST(INTERACTIVE),
181 ECN_OR_COST(INTERACTIVE),
182 TC_PRIO_INTERACTIVE_BULK,
183 ECN_OR_COST(INTERACTIVE_BULK),
184 TC_PRIO_INTERACTIVE_BULK,
185 ECN_OR_COST(INTERACTIVE_BULK)
193 /* The locking scheme is rather straight forward:
195 * 1) Read-Copy Update protects the buckets of the central route hash.
196 * 2) Only writers remove entries, and they hold the lock
197 * as they look at rtable reference counts.
198 * 3) Only readers acquire references to rtable entries,
199 * they do so with atomic increments and with the
203 struct rt_hash_bucket {
204 struct rtable *chain;
207 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
208 defined(CONFIG_PROVE_LOCKING)
210 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
211 * The size of this table is a power of two and depends on the number of CPUS.
212 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
214 #ifdef CONFIG_LOCKDEP
215 # define RT_HASH_LOCK_SZ 256
218 # define RT_HASH_LOCK_SZ 4096
220 # define RT_HASH_LOCK_SZ 2048
222 # define RT_HASH_LOCK_SZ 1024
224 # define RT_HASH_LOCK_SZ 512
226 # define RT_HASH_LOCK_SZ 256
230 static spinlock_t *rt_hash_locks;
231 # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
233 static __init void rt_hash_lock_init(void)
237 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
240 panic("IP: failed to allocate rt_hash_locks\n");
242 for (i = 0; i < RT_HASH_LOCK_SZ; i++)
243 spin_lock_init(&rt_hash_locks[i]);
246 # define rt_hash_lock_addr(slot) NULL
248 static inline void rt_hash_lock_init(void)
253 static struct rt_hash_bucket *rt_hash_table __read_mostly;
254 static unsigned rt_hash_mask __read_mostly;
255 static unsigned int rt_hash_log __read_mostly;
257 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
258 #define RT_CACHE_STAT_INC(field) \
259 (__raw_get_cpu_var(rt_cache_stat).field++)
261 static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
264 return jhash_3words((__force u32)(__be32)(daddr),
265 (__force u32)(__be32)(saddr),
270 static inline int rt_genid(struct net *net)
272 return atomic_read(&net->ipv4.rt_genid);
275 #ifdef CONFIG_PROC_FS
276 struct rt_cache_iter_state {
277 struct seq_net_private p;
282 static struct rtable *rt_cache_get_first(struct seq_file *seq)
284 struct rt_cache_iter_state *st = seq->private;
285 struct rtable *r = NULL;
287 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
288 if (!rt_hash_table[st->bucket].chain)
291 r = rcu_dereference(rt_hash_table[st->bucket].chain);
293 if (dev_net(r->u.dst.dev) == seq_file_net(seq) &&
294 r->rt_genid == st->genid)
296 r = rcu_dereference(r->u.dst.rt_next);
298 rcu_read_unlock_bh();
303 static struct rtable *__rt_cache_get_next(struct seq_file *seq,
306 struct rt_cache_iter_state *st = seq->private;
308 r = r->u.dst.rt_next;
310 rcu_read_unlock_bh();
312 if (--st->bucket < 0)
314 } while (!rt_hash_table[st->bucket].chain);
316 r = rt_hash_table[st->bucket].chain;
318 return rcu_dereference(r);
321 static struct rtable *rt_cache_get_next(struct seq_file *seq,
324 struct rt_cache_iter_state *st = seq->private;
325 while ((r = __rt_cache_get_next(seq, r)) != NULL) {
326 if (dev_net(r->u.dst.dev) != seq_file_net(seq))
328 if (r->rt_genid == st->genid)
334 static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
336 struct rtable *r = rt_cache_get_first(seq);
339 while (pos && (r = rt_cache_get_next(seq, r)))
341 return pos ? NULL : r;
344 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
346 struct rt_cache_iter_state *st = seq->private;
348 return rt_cache_get_idx(seq, *pos - 1);
349 st->genid = rt_genid(seq_file_net(seq));
350 return SEQ_START_TOKEN;
353 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
357 if (v == SEQ_START_TOKEN)
358 r = rt_cache_get_first(seq);
360 r = rt_cache_get_next(seq, v);
365 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
367 if (v && v != SEQ_START_TOKEN)
368 rcu_read_unlock_bh();
371 static int rt_cache_seq_show(struct seq_file *seq, void *v)
373 if (v == SEQ_START_TOKEN)
374 seq_printf(seq, "%-127s\n",
375 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
376 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
379 struct rtable *r = v;
382 seq_printf(seq, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
383 "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
384 r->u.dst.dev ? r->u.dst.dev->name : "*",
385 (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway,
386 r->rt_flags, atomic_read(&r->u.dst.__refcnt),
387 r->u.dst.__use, 0, (unsigned long)r->rt_src,
388 (dst_metric(&r->u.dst, RTAX_ADVMSS) ?
389 (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0),
390 dst_metric(&r->u.dst, RTAX_WINDOW),
391 (int)((dst_metric(&r->u.dst, RTAX_RTT) >> 3) +
392 dst_metric(&r->u.dst, RTAX_RTTVAR)),
394 r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1,
395 r->u.dst.hh ? (r->u.dst.hh->hh_output ==
397 r->rt_spec_dst, &len);
399 seq_printf(seq, "%*s\n", 127 - len, "");
404 static const struct seq_operations rt_cache_seq_ops = {
405 .start = rt_cache_seq_start,
406 .next = rt_cache_seq_next,
407 .stop = rt_cache_seq_stop,
408 .show = rt_cache_seq_show,
411 static int rt_cache_seq_open(struct inode *inode, struct file *file)
413 return seq_open_net(inode, file, &rt_cache_seq_ops,
414 sizeof(struct rt_cache_iter_state));
417 static const struct file_operations rt_cache_seq_fops = {
418 .owner = THIS_MODULE,
419 .open = rt_cache_seq_open,
422 .release = seq_release_net,
426 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
431 return SEQ_START_TOKEN;
433 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
434 if (!cpu_possible(cpu))
437 return &per_cpu(rt_cache_stat, cpu);
442 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
446 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
447 if (!cpu_possible(cpu))
450 return &per_cpu(rt_cache_stat, cpu);
456 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
461 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
463 struct rt_cache_stat *st = v;
465 if (v == SEQ_START_TOKEN) {
466 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
470 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
471 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
472 atomic_read(&ipv4_dst_ops.entries),
495 static const struct seq_operations rt_cpu_seq_ops = {
496 .start = rt_cpu_seq_start,
497 .next = rt_cpu_seq_next,
498 .stop = rt_cpu_seq_stop,
499 .show = rt_cpu_seq_show,
503 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
505 return seq_open(file, &rt_cpu_seq_ops);
508 static const struct file_operations rt_cpu_seq_fops = {
509 .owner = THIS_MODULE,
510 .open = rt_cpu_seq_open,
513 .release = seq_release,
516 #ifdef CONFIG_NET_CLS_ROUTE
517 static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
518 int length, int *eof, void *data)
522 if ((offset & 3) || (length & 3))
525 if (offset >= sizeof(struct ip_rt_acct) * 256) {
530 if (offset + length >= sizeof(struct ip_rt_acct) * 256) {
531 length = sizeof(struct ip_rt_acct) * 256 - offset;
535 offset /= sizeof(u32);
538 u32 *dst = (u32 *) buffer;
541 memset(dst, 0, length);
543 for_each_possible_cpu(i) {
547 src = ((u32 *) per_cpu_ptr(ip_rt_acct, i)) + offset;
548 for (j = 0; j < length/4; j++)
556 static int __net_init ip_rt_do_proc_init(struct net *net)
558 struct proc_dir_entry *pde;
560 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
565 pde = proc_create("rt_cache", S_IRUGO,
566 net->proc_net_stat, &rt_cpu_seq_fops);
570 #ifdef CONFIG_NET_CLS_ROUTE
571 pde = create_proc_read_entry("rt_acct", 0, net->proc_net,
572 ip_rt_acct_read, NULL);
578 #ifdef CONFIG_NET_CLS_ROUTE
580 remove_proc_entry("rt_cache", net->proc_net_stat);
583 remove_proc_entry("rt_cache", net->proc_net);
588 static void __net_exit ip_rt_do_proc_exit(struct net *net)
590 remove_proc_entry("rt_cache", net->proc_net_stat);
591 remove_proc_entry("rt_cache", net->proc_net);
592 remove_proc_entry("rt_acct", net->proc_net);
595 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
596 .init = ip_rt_do_proc_init,
597 .exit = ip_rt_do_proc_exit,
600 static int __init ip_rt_proc_init(void)
602 return register_pernet_subsys(&ip_rt_proc_ops);
606 static inline int ip_rt_proc_init(void)
610 #endif /* CONFIG_PROC_FS */
612 static inline void rt_free(struct rtable *rt)
614 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
617 static inline void rt_drop(struct rtable *rt)
620 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
623 static inline int rt_fast_clean(struct rtable *rth)
625 /* Kill broadcast/multicast entries very aggresively, if they
626 collide in hash table with more useful entries */
627 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
628 rth->fl.iif && rth->u.dst.rt_next;
631 static inline int rt_valuable(struct rtable *rth)
633 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
637 static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
642 if (atomic_read(&rth->u.dst.__refcnt))
646 if (rth->u.dst.expires &&
647 time_after_eq(jiffies, rth->u.dst.expires))
650 age = jiffies - rth->u.dst.lastuse;
652 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
653 (age <= tmo2 && rt_valuable(rth)))
659 /* Bits of score are:
661 * 30: not quite useless
662 * 29..0: usage counter
664 static inline u32 rt_score(struct rtable *rt)
666 u32 score = jiffies - rt->u.dst.lastuse;
668 score = ~score & ~(3<<30);
674 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
680 static inline bool rt_caching(const struct net *net)
682 return net->ipv4.current_rt_cache_rebuild_count <=
683 net->ipv4.sysctl_rt_cache_rebuild_count;
686 static inline bool compare_hash_inputs(const struct flowi *fl1,
687 const struct flowi *fl2)
689 return (__force u32)(((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
690 (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) |
691 (fl1->iif ^ fl2->iif)) == 0);
694 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
696 return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
697 (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) |
698 (fl1->mark ^ fl2->mark) |
699 (*(u16 *)&fl1->nl_u.ip4_u.tos ^
700 *(u16 *)&fl2->nl_u.ip4_u.tos) |
701 (fl1->oif ^ fl2->oif) |
702 (fl1->iif ^ fl2->iif)) == 0;
705 static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
707 return dev_net(rt1->u.dst.dev) == dev_net(rt2->u.dst.dev);
710 static inline int rt_is_expired(struct rtable *rth)
712 return rth->rt_genid != rt_genid(dev_net(rth->u.dst.dev));
716 * Perform a full scan of hash table and free all entries.
717 * Can be called by a softirq or a process.
718 * In the later case, we want to be reschedule if necessary
720 static void rt_do_flush(int process_context)
723 struct rtable *rth, *next;
724 struct rtable * tail;
726 for (i = 0; i <= rt_hash_mask; i++) {
727 if (process_context && need_resched())
729 rth = rt_hash_table[i].chain;
733 spin_lock_bh(rt_hash_lock_addr(i));
736 struct rtable ** prev, * p;
738 rth = rt_hash_table[i].chain;
740 /* defer releasing the head of the list after spin_unlock */
741 for (tail = rth; tail; tail = tail->u.dst.rt_next)
742 if (!rt_is_expired(tail))
745 rt_hash_table[i].chain = tail;
747 /* call rt_free on entries after the tail requiring flush */
748 prev = &rt_hash_table[i].chain;
749 for (p = *prev; p; p = next) {
750 next = p->u.dst.rt_next;
751 if (!rt_is_expired(p)) {
752 prev = &p->u.dst.rt_next;
760 rth = rt_hash_table[i].chain;
761 rt_hash_table[i].chain = NULL;
764 spin_unlock_bh(rt_hash_lock_addr(i));
766 for (; rth != tail; rth = next) {
767 next = rth->u.dst.rt_next;
774 * While freeing expired entries, we compute average chain length
775 * and standard deviation, using fixed-point arithmetic.
776 * This to have an estimation of rt_chain_length_max
777 * rt_chain_length_max = max(elasticity, AVG + 4*SD)
778 * We use 3 bits for frational part, and 29 (or 61) for magnitude.
782 #define ONE (1UL << FRACT_BITS)
784 static void rt_check_expire(void)
786 static unsigned int rover;
787 unsigned int i = rover, goal;
788 struct rtable *rth, **rthp;
789 unsigned long length = 0, samples = 0;
790 unsigned long sum = 0, sum2 = 0;
793 mult = ((u64)ip_rt_gc_interval) << rt_hash_log;
794 if (ip_rt_gc_timeout > 1)
795 do_div(mult, ip_rt_gc_timeout);
796 goal = (unsigned int)mult;
797 if (goal > rt_hash_mask)
798 goal = rt_hash_mask + 1;
800 for (; goal > 0; goal--) {
801 unsigned long tmo = ip_rt_gc_timeout;
803 i = (i + 1) & rt_hash_mask;
804 rthp = &rt_hash_table[i].chain;
813 spin_lock_bh(rt_hash_lock_addr(i));
814 while ((rth = *rthp) != NULL) {
815 if (rt_is_expired(rth)) {
816 *rthp = rth->u.dst.rt_next;
820 if (rth->u.dst.expires) {
821 /* Entry is expired even if it is in use */
822 if (time_before_eq(jiffies, rth->u.dst.expires)) {
824 rthp = &rth->u.dst.rt_next;
826 * Only bump our length if the hash
827 * inputs on entries n and n+1 are not
828 * the same, we only count entries on
829 * a chain with equal hash inputs once
830 * so that entries for different QOS
831 * levels, and other non-hash input
832 * attributes don't unfairly skew
833 * the length computation
835 if ((*rthp == NULL) ||
836 !compare_hash_inputs(&(*rthp)->fl,
841 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
843 rthp = &rth->u.dst.rt_next;
844 if ((*rthp == NULL) ||
845 !compare_hash_inputs(&(*rthp)->fl,
851 /* Cleanup aged off entries. */
852 *rthp = rth->u.dst.rt_next;
855 spin_unlock_bh(rt_hash_lock_addr(i));
857 sum2 += length*length;
860 unsigned long avg = sum / samples;
861 unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
862 rt_chain_length_max = max_t(unsigned long,
864 (avg + 4*sd) >> FRACT_BITS);
870 * rt_worker_func() is run in process context.
871 * we call rt_check_expire() to scan part of the hash table
873 static void rt_worker_func(struct work_struct *work)
876 schedule_delayed_work(&expires_work, ip_rt_gc_interval);
880 * Pertubation of rt_genid by a small quantity [1..256]
881 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
882 * many times (2^24) without giving recent rt_genid.
883 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
885 static void rt_cache_invalidate(struct net *net)
887 unsigned char shuffle;
889 get_random_bytes(&shuffle, sizeof(shuffle));
890 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
894 * delay < 0 : invalidate cache (fast : entries will be deleted later)
895 * delay >= 0 : invalidate & flush cache (can be long)
897 void rt_cache_flush(struct net *net, int delay)
899 rt_cache_invalidate(net);
901 rt_do_flush(!in_softirq());
905 * We change rt_genid and let gc do the cleanup
907 static void rt_secret_rebuild(unsigned long __net)
909 struct net *net = (struct net *)__net;
910 rt_cache_invalidate(net);
911 mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
914 static void rt_secret_rebuild_oneshot(struct net *net)
916 del_timer_sync(&net->ipv4.rt_secret_timer);
917 rt_cache_invalidate(net);
918 if (ip_rt_secret_interval) {
919 net->ipv4.rt_secret_timer.expires += ip_rt_secret_interval;
920 add_timer(&net->ipv4.rt_secret_timer);
924 static void rt_emergency_hash_rebuild(struct net *net)
926 if (net_ratelimit()) {
927 printk(KERN_WARNING "Route hash chain too long!\n");
928 printk(KERN_WARNING "Adjust your secret_interval!\n");
931 rt_secret_rebuild_oneshot(net);
935 Short description of GC goals.
937 We want to build algorithm, which will keep routing cache
938 at some equilibrium point, when number of aged off entries
939 is kept approximately equal to newly generated ones.
941 Current expiration strength is variable "expire".
942 We try to adjust it dynamically, so that if networking
943 is idle expires is large enough to keep enough of warm entries,
944 and when load increases it reduces to limit cache size.
947 static int rt_garbage_collect(struct dst_ops *ops)
949 static unsigned long expire = RT_GC_TIMEOUT;
950 static unsigned long last_gc;
952 static int equilibrium;
953 struct rtable *rth, **rthp;
954 unsigned long now = jiffies;
958 * Garbage collection is pretty expensive,
959 * do not make it too frequently.
962 RT_CACHE_STAT_INC(gc_total);
964 if (now - last_gc < ip_rt_gc_min_interval &&
965 atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size) {
966 RT_CACHE_STAT_INC(gc_ignored);
970 /* Calculate number of entries, which we want to expire now. */
971 goal = atomic_read(&ipv4_dst_ops.entries) -
972 (ip_rt_gc_elasticity << rt_hash_log);
974 if (equilibrium < ipv4_dst_ops.gc_thresh)
975 equilibrium = ipv4_dst_ops.gc_thresh;
976 goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
978 equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
979 goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
982 /* We are in dangerous area. Try to reduce cache really
985 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
986 equilibrium = atomic_read(&ipv4_dst_ops.entries) - goal;
989 if (now - last_gc >= ip_rt_gc_min_interval)
1000 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
1001 unsigned long tmo = expire;
1003 k = (k + 1) & rt_hash_mask;
1004 rthp = &rt_hash_table[k].chain;
1005 spin_lock_bh(rt_hash_lock_addr(k));
1006 while ((rth = *rthp) != NULL) {
1007 if (!rt_is_expired(rth) &&
1008 !rt_may_expire(rth, tmo, expire)) {
1010 rthp = &rth->u.dst.rt_next;
1013 *rthp = rth->u.dst.rt_next;
1017 spin_unlock_bh(rt_hash_lock_addr(k));
1026 /* Goal is not achieved. We stop process if:
1028 - if expire reduced to zero. Otherwise, expire is halfed.
1029 - if table is not full.
1030 - if we are called from interrupt.
1031 - jiffies check is just fallback/debug loop breaker.
1032 We will not spin here for long time in any case.
1035 RT_CACHE_STAT_INC(gc_goal_miss);
1041 #if RT_CACHE_DEBUG >= 2
1042 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
1043 atomic_read(&ipv4_dst_ops.entries), goal, i);
1046 if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
1048 } while (!in_softirq() && time_before_eq(jiffies, now));
1050 if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
1052 if (net_ratelimit())
1053 printk(KERN_WARNING "dst cache overflow\n");
1054 RT_CACHE_STAT_INC(gc_dst_overflow);
1058 expire += ip_rt_gc_min_interval;
1059 if (expire > ip_rt_gc_timeout ||
1060 atomic_read(&ipv4_dst_ops.entries) < ipv4_dst_ops.gc_thresh)
1061 expire = ip_rt_gc_timeout;
1062 #if RT_CACHE_DEBUG >= 2
1063 printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
1064 atomic_read(&ipv4_dst_ops.entries), goal, rover);
1069 static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
1071 struct rtable *rth, **rthp;
1072 struct rtable *rthi;
1074 struct rtable *cand, **candp;
1077 int attempts = !in_softirq();
1081 min_score = ~(u32)0;
1086 if (!rt_caching(dev_net(rt->u.dst.dev))) {
1091 rthp = &rt_hash_table[hash].chain;
1094 spin_lock_bh(rt_hash_lock_addr(hash));
1095 while ((rth = *rthp) != NULL) {
1096 if (rt_is_expired(rth)) {
1097 *rthp = rth->u.dst.rt_next;
1101 if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
1103 *rthp = rth->u.dst.rt_next;
1105 * Since lookup is lockfree, the deletion
1106 * must be visible to another weakly ordered CPU before
1107 * the insertion at the start of the hash chain.
1109 rcu_assign_pointer(rth->u.dst.rt_next,
1110 rt_hash_table[hash].chain);
1112 * Since lookup is lockfree, the update writes
1113 * must be ordered for consistency on SMP.
1115 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1117 dst_use(&rth->u.dst, now);
1118 spin_unlock_bh(rt_hash_lock_addr(hash));
1125 if (!atomic_read(&rth->u.dst.__refcnt)) {
1126 u32 score = rt_score(rth);
1128 if (score <= min_score) {
1137 rthp = &rth->u.dst.rt_next;
1140 * check to see if the next entry in the chain
1141 * contains the same hash input values as rt. If it does
1142 * This is where we will insert into the list, instead of
1143 * at the head. This groups entries that differ by aspects not
1144 * relvant to the hash function together, which we use to adjust
1147 if (*rthp && compare_hash_inputs(&(*rthp)->fl, &rt->fl))
1152 /* ip_rt_gc_elasticity used to be average length of chain
1153 * length, when exceeded gc becomes really aggressive.
1155 * The second limit is less certain. At the moment it allows
1156 * only 2 entries per bucket. We will see.
1158 if (chain_length > ip_rt_gc_elasticity) {
1159 *candp = cand->u.dst.rt_next;
1163 if (chain_length > rt_chain_length_max) {
1164 struct net *net = dev_net(rt->u.dst.dev);
1165 int num = ++net->ipv4.current_rt_cache_rebuild_count;
1166 if (!rt_caching(dev_net(rt->u.dst.dev))) {
1167 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
1168 rt->u.dst.dev->name, num);
1170 rt_emergency_hash_rebuild(dev_net(rt->u.dst.dev));
1174 /* Try to bind route to arp only if it is output
1175 route or unicast forwarding path.
1177 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
1178 int err = arp_bind_neighbour(&rt->u.dst);
1180 spin_unlock_bh(rt_hash_lock_addr(hash));
1182 if (err != -ENOBUFS) {
1187 /* Neighbour tables are full and nothing
1188 can be released. Try to shrink route cache,
1189 it is most likely it holds some neighbour records.
1191 if (attempts-- > 0) {
1192 int saved_elasticity = ip_rt_gc_elasticity;
1193 int saved_int = ip_rt_gc_min_interval;
1194 ip_rt_gc_elasticity = 1;
1195 ip_rt_gc_min_interval = 0;
1196 rt_garbage_collect(&ipv4_dst_ops);
1197 ip_rt_gc_min_interval = saved_int;
1198 ip_rt_gc_elasticity = saved_elasticity;
1202 if (net_ratelimit())
1203 printk(KERN_WARNING "Neighbour table overflow.\n");
1210 rt->u.dst.rt_next = rthi->u.dst.rt_next;
1212 rt->u.dst.rt_next = rt_hash_table[hash].chain;
1214 #if RT_CACHE_DEBUG >= 2
1215 if (rt->u.dst.rt_next) {
1217 printk(KERN_DEBUG "rt_cache @%02x: " NIPQUAD_FMT, hash,
1218 NIPQUAD(rt->rt_dst));
1219 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
1220 printk(" . " NIPQUAD_FMT, NIPQUAD(trt->rt_dst));
1225 * Since lookup is lockfree, we must make sure
1226 * previous writes to rt are comitted to memory
1227 * before making rt visible to other CPUS.
1230 rcu_assign_pointer(rthi->u.dst.rt_next, rt);
1232 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1234 spin_unlock_bh(rt_hash_lock_addr(hash));
1239 void rt_bind_peer(struct rtable *rt, int create)
1241 static DEFINE_SPINLOCK(rt_peer_lock);
1242 struct inet_peer *peer;
1244 peer = inet_getpeer(rt->rt_dst, create);
1246 spin_lock_bh(&rt_peer_lock);
1247 if (rt->peer == NULL) {
1251 spin_unlock_bh(&rt_peer_lock);
1257 * Peer allocation may fail only in serious out-of-memory conditions. However
1258 * we still can generate some output.
1259 * Random ID selection looks a bit dangerous because we have no chances to
1260 * select ID being unique in a reasonable period of time.
1261 * But broken packet identifier may be better than no packet at all.
1263 static void ip_select_fb_ident(struct iphdr *iph)
1265 static DEFINE_SPINLOCK(ip_fb_id_lock);
1266 static u32 ip_fallback_id;
1269 spin_lock_bh(&ip_fb_id_lock);
1270 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1271 iph->id = htons(salt & 0xFFFF);
1272 ip_fallback_id = salt;
1273 spin_unlock_bh(&ip_fb_id_lock);
1276 void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1278 struct rtable *rt = (struct rtable *) dst;
1281 if (rt->peer == NULL)
1282 rt_bind_peer(rt, 1);
1284 /* If peer is attached to destination, it is never detached,
1285 so that we need not to grab a lock to dereference it.
1288 iph->id = htons(inet_getid(rt->peer, more));
1292 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
1293 __builtin_return_address(0));
1295 ip_select_fb_ident(iph);
1298 static void rt_del(unsigned hash, struct rtable *rt)
1300 struct rtable **rthp, *aux;
1302 rthp = &rt_hash_table[hash].chain;
1303 spin_lock_bh(rt_hash_lock_addr(hash));
1305 while ((aux = *rthp) != NULL) {
1306 if (aux == rt || rt_is_expired(aux)) {
1307 *rthp = aux->u.dst.rt_next;
1311 rthp = &aux->u.dst.rt_next;
1313 spin_unlock_bh(rt_hash_lock_addr(hash));
1316 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1317 __be32 saddr, struct net_device *dev)
1320 struct in_device *in_dev = in_dev_get(dev);
1321 struct rtable *rth, **rthp;
1322 __be32 skeys[2] = { saddr, 0 };
1323 int ikeys[2] = { dev->ifindex, 0 };
1324 struct netevent_redirect netevent;
1331 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev)
1332 || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw)
1333 || ipv4_is_zeronet(new_gw))
1334 goto reject_redirect;
1336 if (!rt_caching(net))
1337 goto reject_redirect;
1339 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1340 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1341 goto reject_redirect;
1342 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1343 goto reject_redirect;
1345 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1346 goto reject_redirect;
1349 for (i = 0; i < 2; i++) {
1350 for (k = 0; k < 2; k++) {
1351 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
1354 rthp=&rt_hash_table[hash].chain;
1357 while ((rth = rcu_dereference(*rthp)) != NULL) {
1360 if (rth->fl.fl4_dst != daddr ||
1361 rth->fl.fl4_src != skeys[i] ||
1362 rth->fl.oif != ikeys[k] ||
1364 rt_is_expired(rth) ||
1365 !net_eq(dev_net(rth->u.dst.dev), net)) {
1366 rthp = &rth->u.dst.rt_next;
1370 if (rth->rt_dst != daddr ||
1371 rth->rt_src != saddr ||
1373 rth->rt_gateway != old_gw ||
1374 rth->u.dst.dev != dev)
1377 dst_hold(&rth->u.dst);
1380 rt = dst_alloc(&ipv4_dst_ops);
1387 /* Copy all the information. */
1389 INIT_RCU_HEAD(&rt->u.dst.rcu_head);
1390 rt->u.dst.__use = 1;
1391 atomic_set(&rt->u.dst.__refcnt, 1);
1392 rt->u.dst.child = NULL;
1394 dev_hold(rt->u.dst.dev);
1396 in_dev_hold(rt->idev);
1397 rt->u.dst.obsolete = 0;
1398 rt->u.dst.lastuse = jiffies;
1399 rt->u.dst.path = &rt->u.dst;
1400 rt->u.dst.neighbour = NULL;
1401 rt->u.dst.hh = NULL;
1402 rt->u.dst.xfrm = NULL;
1403 rt->rt_genid = rt_genid(net);
1404 rt->rt_flags |= RTCF_REDIRECTED;
1406 /* Gateway is different ... */
1407 rt->rt_gateway = new_gw;
1409 /* Redirect received -> path was valid */
1410 dst_confirm(&rth->u.dst);
1413 atomic_inc(&rt->peer->refcnt);
1415 if (arp_bind_neighbour(&rt->u.dst) ||
1416 !(rt->u.dst.neighbour->nud_state &
1418 if (rt->u.dst.neighbour)
1419 neigh_event_send(rt->u.dst.neighbour, NULL);
1425 netevent.old = &rth->u.dst;
1426 netevent.new = &rt->u.dst;
1427 call_netevent_notifiers(NETEVENT_REDIRECT,
1431 if (!rt_intern_hash(hash, rt, &rt))
1444 #ifdef CONFIG_IP_ROUTE_VERBOSE
1445 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1446 printk(KERN_INFO "Redirect from " NIPQUAD_FMT " on %s about "
1447 NIPQUAD_FMT " ignored.\n"
1448 " Advised path = " NIPQUAD_FMT " -> " NIPQUAD_FMT "\n",
1449 NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw),
1450 NIPQUAD(saddr), NIPQUAD(daddr));
1455 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1457 struct rtable *rt = (struct rtable *)dst;
1458 struct dst_entry *ret = dst;
1461 if (dst->obsolete) {
1464 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1465 rt->u.dst.expires) {
1466 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1468 rt_genid(dev_net(dst->dev)));
1469 #if RT_CACHE_DEBUG >= 1
1470 printk(KERN_DEBUG "ipv4_negative_advice: redirect to "
1471 NIPQUAD_FMT "/%02x dropped\n",
1472 NIPQUAD(rt->rt_dst), rt->fl.fl4_tos);
1483 * 1. The first ip_rt_redirect_number redirects are sent
1484 * with exponential backoff, then we stop sending them at all,
1485 * assuming that the host ignores our redirects.
1486 * 2. If we did not see packets requiring redirects
1487 * during ip_rt_redirect_silence, we assume that the host
1488 * forgot redirected route and start to send redirects again.
1490 * This algorithm is much cheaper and more intelligent than dumb load limiting
1493 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1494 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1497 void ip_rt_send_redirect(struct sk_buff *skb)
1499 struct rtable *rt = skb->rtable;
1500 struct in_device *in_dev = in_dev_get(rt->u.dst.dev);
1505 if (!IN_DEV_TX_REDIRECTS(in_dev))
1508 /* No redirected packets during ip_rt_redirect_silence;
1509 * reset the algorithm.
1511 if (time_after(jiffies, rt->u.dst.rate_last + ip_rt_redirect_silence))
1512 rt->u.dst.rate_tokens = 0;
1514 /* Too many ignored redirects; do not send anything
1515 * set u.dst.rate_last to the last seen redirected packet.
1517 if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) {
1518 rt->u.dst.rate_last = jiffies;
1522 /* Check for load limit; set rate_last to the latest sent
1525 if (rt->u.dst.rate_tokens == 0 ||
1527 (rt->u.dst.rate_last +
1528 (ip_rt_redirect_load << rt->u.dst.rate_tokens)))) {
1529 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1530 rt->u.dst.rate_last = jiffies;
1531 ++rt->u.dst.rate_tokens;
1532 #ifdef CONFIG_IP_ROUTE_VERBOSE
1533 if (IN_DEV_LOG_MARTIANS(in_dev) &&
1534 rt->u.dst.rate_tokens == ip_rt_redirect_number &&
1536 printk(KERN_WARNING "host " NIPQUAD_FMT "/if%d ignores "
1537 "redirects for " NIPQUAD_FMT " to " NIPQUAD_FMT ".\n",
1538 NIPQUAD(rt->rt_src), rt->rt_iif,
1539 NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_gateway));
1546 static int ip_error(struct sk_buff *skb)
1548 struct rtable *rt = skb->rtable;
1552 switch (rt->u.dst.error) {
1557 code = ICMP_HOST_UNREACH;
1560 code = ICMP_NET_UNREACH;
1561 IP_INC_STATS_BH(dev_net(rt->u.dst.dev),
1562 IPSTATS_MIB_INNOROUTES);
1565 code = ICMP_PKT_FILTERED;
1570 rt->u.dst.rate_tokens += now - rt->u.dst.rate_last;
1571 if (rt->u.dst.rate_tokens > ip_rt_error_burst)
1572 rt->u.dst.rate_tokens = ip_rt_error_burst;
1573 rt->u.dst.rate_last = now;
1574 if (rt->u.dst.rate_tokens >= ip_rt_error_cost) {
1575 rt->u.dst.rate_tokens -= ip_rt_error_cost;
1576 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1579 out: kfree_skb(skb);
1584 * The last two values are not from the RFC but
1585 * are needed for AMPRnet AX.25 paths.
1588 static const unsigned short mtu_plateau[] =
1589 {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1591 static inline unsigned short guess_mtu(unsigned short old_mtu)
1595 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1596 if (old_mtu > mtu_plateau[i])
1597 return mtu_plateau[i];
1601 unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1602 unsigned short new_mtu,
1603 struct net_device *dev)
1606 unsigned short old_mtu = ntohs(iph->tot_len);
1608 int ikeys[2] = { dev->ifindex, 0 };
1609 __be32 skeys[2] = { iph->saddr, 0, };
1610 __be32 daddr = iph->daddr;
1611 unsigned short est_mtu = 0;
1613 if (ipv4_config.no_pmtu_disc)
1616 for (k = 0; k < 2; k++) {
1617 for (i = 0; i < 2; i++) {
1618 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
1622 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
1623 rth = rcu_dereference(rth->u.dst.rt_next)) {
1624 unsigned short mtu = new_mtu;
1626 if (rth->fl.fl4_dst != daddr ||
1627 rth->fl.fl4_src != skeys[i] ||
1628 rth->rt_dst != daddr ||
1629 rth->rt_src != iph->saddr ||
1630 rth->fl.oif != ikeys[k] ||
1632 dst_metric_locked(&rth->u.dst, RTAX_MTU) ||
1633 !net_eq(dev_net(rth->u.dst.dev), net) ||
1637 if (new_mtu < 68 || new_mtu >= old_mtu) {
1639 /* BSD 4.2 compatibility hack :-( */
1641 old_mtu >= dst_mtu(&rth->u.dst) &&
1642 old_mtu >= 68 + (iph->ihl << 2))
1643 old_mtu -= iph->ihl << 2;
1645 mtu = guess_mtu(old_mtu);
1647 if (mtu <= dst_mtu(&rth->u.dst)) {
1648 if (mtu < dst_mtu(&rth->u.dst)) {
1649 dst_confirm(&rth->u.dst);
1650 if (mtu < ip_rt_min_pmtu) {
1651 mtu = ip_rt_min_pmtu;
1652 rth->u.dst.metrics[RTAX_LOCK-1] |=
1655 rth->u.dst.metrics[RTAX_MTU-1] = mtu;
1656 dst_set_expires(&rth->u.dst,
1665 return est_mtu ? : new_mtu;
1668 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1670 if (dst_mtu(dst) > mtu && mtu >= 68 &&
1671 !(dst_metric_locked(dst, RTAX_MTU))) {
1672 if (mtu < ip_rt_min_pmtu) {
1673 mtu = ip_rt_min_pmtu;
1674 dst->metrics[RTAX_LOCK-1] |= (1 << RTAX_MTU);
1676 dst->metrics[RTAX_MTU-1] = mtu;
1677 dst_set_expires(dst, ip_rt_mtu_expires);
1678 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
1682 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1687 static void ipv4_dst_destroy(struct dst_entry *dst)
1689 struct rtable *rt = (struct rtable *) dst;
1690 struct inet_peer *peer = rt->peer;
1691 struct in_device *idev = rt->idev;
1704 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
1707 struct rtable *rt = (struct rtable *) dst;
1708 struct in_device *idev = rt->idev;
1709 if (dev != dev_net(dev)->loopback_dev && idev && idev->dev == dev) {
1710 struct in_device *loopback_idev =
1711 in_dev_get(dev_net(dev)->loopback_dev);
1712 if (loopback_idev) {
1713 rt->idev = loopback_idev;
1719 static void ipv4_link_failure(struct sk_buff *skb)
1723 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1727 dst_set_expires(&rt->u.dst, 0);
1730 static int ip_rt_bug(struct sk_buff *skb)
1732 printk(KERN_DEBUG "ip_rt_bug: " NIPQUAD_FMT " -> " NIPQUAD_FMT ", %s\n",
1733 NIPQUAD(ip_hdr(skb)->saddr), NIPQUAD(ip_hdr(skb)->daddr),
1734 skb->dev ? skb->dev->name : "?");
1740 We do not cache source address of outgoing interface,
1741 because it is used only by IP RR, TS and SRR options,
1742 so that it out of fast path.
1744 BTW remember: "addr" is allowed to be not aligned
1748 void ip_rt_get_source(u8 *addr, struct rtable *rt)
1751 struct fib_result res;
1753 if (rt->fl.iif == 0)
1755 else if (fib_lookup(dev_net(rt->u.dst.dev), &rt->fl, &res) == 0) {
1756 src = FIB_RES_PREFSRC(res);
1759 src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway,
1761 memcpy(addr, &src, 4);
1764 #ifdef CONFIG_NET_CLS_ROUTE
1765 static void set_class_tag(struct rtable *rt, u32 tag)
1767 if (!(rt->u.dst.tclassid & 0xFFFF))
1768 rt->u.dst.tclassid |= tag & 0xFFFF;
1769 if (!(rt->u.dst.tclassid & 0xFFFF0000))
1770 rt->u.dst.tclassid |= tag & 0xFFFF0000;
1774 static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1776 struct fib_info *fi = res->fi;
1779 if (FIB_RES_GW(*res) &&
1780 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1781 rt->rt_gateway = FIB_RES_GW(*res);
1782 memcpy(rt->u.dst.metrics, fi->fib_metrics,
1783 sizeof(rt->u.dst.metrics));
1784 if (fi->fib_mtu == 0) {
1785 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
1786 if (dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
1787 rt->rt_gateway != rt->rt_dst &&
1788 rt->u.dst.dev->mtu > 576)
1789 rt->u.dst.metrics[RTAX_MTU-1] = 576;
1791 #ifdef CONFIG_NET_CLS_ROUTE
1792 rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
1795 rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu;
1797 if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0)
1798 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
1799 if (dst_mtu(&rt->u.dst) > IP_MAX_MTU)
1800 rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
1801 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0)
1802 rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40,
1804 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) > 65535 - 40)
1805 rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
1807 #ifdef CONFIG_NET_CLS_ROUTE
1808 #ifdef CONFIG_IP_MULTIPLE_TABLES
1809 set_class_tag(rt, fib_rules_tclass(res));
1811 set_class_tag(rt, itag);
1813 rt->rt_type = res->type;
1816 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1817 u8 tos, struct net_device *dev, int our)
1822 struct in_device *in_dev = in_dev_get(dev);
1825 /* Primary sanity checks. */
1830 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1831 ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
1834 if (ipv4_is_zeronet(saddr)) {
1835 if (!ipv4_is_local_multicast(daddr))
1837 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1838 } else if (fib_validate_source(saddr, 0, tos, 0,
1839 dev, &spec_dst, &itag) < 0)
1842 rth = dst_alloc(&ipv4_dst_ops);
1846 rth->u.dst.output= ip_rt_bug;
1848 atomic_set(&rth->u.dst.__refcnt, 1);
1849 rth->u.dst.flags= DST_HOST;
1850 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1851 rth->u.dst.flags |= DST_NOPOLICY;
1852 rth->fl.fl4_dst = daddr;
1853 rth->rt_dst = daddr;
1854 rth->fl.fl4_tos = tos;
1855 rth->fl.mark = skb->mark;
1856 rth->fl.fl4_src = saddr;
1857 rth->rt_src = saddr;
1858 #ifdef CONFIG_NET_CLS_ROUTE
1859 rth->u.dst.tclassid = itag;
1862 rth->fl.iif = dev->ifindex;
1863 rth->u.dst.dev = init_net.loopback_dev;
1864 dev_hold(rth->u.dst.dev);
1865 rth->idev = in_dev_get(rth->u.dst.dev);
1867 rth->rt_gateway = daddr;
1868 rth->rt_spec_dst= spec_dst;
1869 rth->rt_genid = rt_genid(dev_net(dev));
1870 rth->rt_flags = RTCF_MULTICAST;
1871 rth->rt_type = RTN_MULTICAST;
1873 rth->u.dst.input= ip_local_deliver;
1874 rth->rt_flags |= RTCF_LOCAL;
1877 #ifdef CONFIG_IP_MROUTE
1878 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1879 rth->u.dst.input = ip_mr_input;
1881 RT_CACHE_STAT_INC(in_slow_mc);
1884 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1885 return rt_intern_hash(hash, rth, &skb->rtable);
1897 static void ip_handle_martian_source(struct net_device *dev,
1898 struct in_device *in_dev,
1899 struct sk_buff *skb,
1903 RT_CACHE_STAT_INC(in_martian_src);
1904 #ifdef CONFIG_IP_ROUTE_VERBOSE
1905 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1907 * RFC1812 recommendation, if source is martian,
1908 * the only hint is MAC header.
1910 printk(KERN_WARNING "martian source " NIPQUAD_FMT " from "
1911 NIPQUAD_FMT", on dev %s\n",
1912 NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
1913 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1915 const unsigned char *p = skb_mac_header(skb);
1916 printk(KERN_WARNING "ll header: ");
1917 for (i = 0; i < dev->hard_header_len; i++, p++) {
1919 if (i < (dev->hard_header_len - 1))
1928 static int __mkroute_input(struct sk_buff *skb,
1929 struct fib_result *res,
1930 struct in_device *in_dev,
1931 __be32 daddr, __be32 saddr, u32 tos,
1932 struct rtable **result)
1937 struct in_device *out_dev;
1942 /* get a working reference to the output device */
1943 out_dev = in_dev_get(FIB_RES_DEV(*res));
1944 if (out_dev == NULL) {
1945 if (net_ratelimit())
1946 printk(KERN_CRIT "Bug in ip_route_input" \
1947 "_slow(). Please, report\n");
1952 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
1953 in_dev->dev, &spec_dst, &itag);
1955 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1963 flags |= RTCF_DIRECTSRC;
1965 if (out_dev == in_dev && err &&
1966 (IN_DEV_SHARED_MEDIA(out_dev) ||
1967 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1968 flags |= RTCF_DOREDIRECT;
1970 if (skb->protocol != htons(ETH_P_IP)) {
1971 /* Not IP (i.e. ARP). Do not create route, if it is
1972 * invalid for proxy arp. DNAT routes are always valid.
1974 if (out_dev == in_dev) {
1981 rth = dst_alloc(&ipv4_dst_ops);
1987 atomic_set(&rth->u.dst.__refcnt, 1);
1988 rth->u.dst.flags= DST_HOST;
1989 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1990 rth->u.dst.flags |= DST_NOPOLICY;
1991 if (IN_DEV_CONF_GET(out_dev, NOXFRM))
1992 rth->u.dst.flags |= DST_NOXFRM;
1993 rth->fl.fl4_dst = daddr;
1994 rth->rt_dst = daddr;
1995 rth->fl.fl4_tos = tos;
1996 rth->fl.mark = skb->mark;
1997 rth->fl.fl4_src = saddr;
1998 rth->rt_src = saddr;
1999 rth->rt_gateway = daddr;
2001 rth->fl.iif = in_dev->dev->ifindex;
2002 rth->u.dst.dev = (out_dev)->dev;
2003 dev_hold(rth->u.dst.dev);
2004 rth->idev = in_dev_get(rth->u.dst.dev);
2006 rth->rt_spec_dst= spec_dst;
2008 rth->u.dst.input = ip_forward;
2009 rth->u.dst.output = ip_output;
2010 rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev));
2012 rt_set_nexthop(rth, res, itag);
2014 rth->rt_flags = flags;
2019 /* release the working reference to the output device */
2020 in_dev_put(out_dev);
2024 static int ip_mkroute_input(struct sk_buff *skb,
2025 struct fib_result *res,
2026 const struct flowi *fl,
2027 struct in_device *in_dev,
2028 __be32 daddr, __be32 saddr, u32 tos)
2030 struct rtable* rth = NULL;
2034 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2035 if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
2036 fib_select_multipath(fl, res);
2039 /* create a routing cache entry */
2040 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2044 /* put it into the cache */
2045 hash = rt_hash(daddr, saddr, fl->iif,
2046 rt_genid(dev_net(rth->u.dst.dev)));
2047 return rt_intern_hash(hash, rth, &skb->rtable);
2051 * NOTE. We drop all the packets that has local source
2052 * addresses, because every properly looped back packet
2053 * must have correct destination already attached by output routine.
2055 * Such approach solves two big problems:
2056 * 1. Not simplex devices are handled properly.
2057 * 2. IP spoofing attempts are filtered with 100% of guarantee.
2060 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2061 u8 tos, struct net_device *dev)
2063 struct fib_result res;
2064 struct in_device *in_dev = in_dev_get(dev);
2065 struct flowi fl = { .nl_u = { .ip4_u =
2069 .scope = RT_SCOPE_UNIVERSE,
2072 .iif = dev->ifindex };
2075 struct rtable * rth;
2080 struct net * net = dev_net(dev);
2082 /* IP on this device is disabled. */
2087 /* Check for the most weird martians, which can be not detected
2091 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
2092 ipv4_is_loopback(saddr))
2093 goto martian_source;
2095 if (daddr == htonl(0xFFFFFFFF) || (saddr == 0 && daddr == 0))
2098 /* Accept zero addresses only to limited broadcast;
2099 * I even do not know to fix it or not. Waiting for complains :-)
2101 if (ipv4_is_zeronet(saddr))
2102 goto martian_source;
2104 if (ipv4_is_lbcast(daddr) || ipv4_is_zeronet(daddr) ||
2105 ipv4_is_loopback(daddr))
2106 goto martian_destination;
2109 * Now we are ready to route packet.
2111 if ((err = fib_lookup(net, &fl, &res)) != 0) {
2112 if (!IN_DEV_FORWARD(in_dev))
2118 RT_CACHE_STAT_INC(in_slow_tot);
2120 if (res.type == RTN_BROADCAST)
2123 if (res.type == RTN_LOCAL) {
2125 result = fib_validate_source(saddr, daddr, tos,
2126 net->loopback_dev->ifindex,
2127 dev, &spec_dst, &itag);
2129 goto martian_source;
2131 flags |= RTCF_DIRECTSRC;
2136 if (!IN_DEV_FORWARD(in_dev))
2138 if (res.type != RTN_UNICAST)
2139 goto martian_destination;
2141 err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
2149 if (skb->protocol != htons(ETH_P_IP))
2152 if (ipv4_is_zeronet(saddr))
2153 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2155 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
2158 goto martian_source;
2160 flags |= RTCF_DIRECTSRC;
2162 flags |= RTCF_BROADCAST;
2163 res.type = RTN_BROADCAST;
2164 RT_CACHE_STAT_INC(in_brd);
2167 rth = dst_alloc(&ipv4_dst_ops);
2171 rth->u.dst.output= ip_rt_bug;
2172 rth->rt_genid = rt_genid(net);
2174 atomic_set(&rth->u.dst.__refcnt, 1);
2175 rth->u.dst.flags= DST_HOST;
2176 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2177 rth->u.dst.flags |= DST_NOPOLICY;
2178 rth->fl.fl4_dst = daddr;
2179 rth->rt_dst = daddr;
2180 rth->fl.fl4_tos = tos;
2181 rth->fl.mark = skb->mark;
2182 rth->fl.fl4_src = saddr;
2183 rth->rt_src = saddr;
2184 #ifdef CONFIG_NET_CLS_ROUTE
2185 rth->u.dst.tclassid = itag;
2188 rth->fl.iif = dev->ifindex;
2189 rth->u.dst.dev = net->loopback_dev;
2190 dev_hold(rth->u.dst.dev);
2191 rth->idev = in_dev_get(rth->u.dst.dev);
2192 rth->rt_gateway = daddr;
2193 rth->rt_spec_dst= spec_dst;
2194 rth->u.dst.input= ip_local_deliver;
2195 rth->rt_flags = flags|RTCF_LOCAL;
2196 if (res.type == RTN_UNREACHABLE) {
2197 rth->u.dst.input= ip_error;
2198 rth->u.dst.error= -err;
2199 rth->rt_flags &= ~RTCF_LOCAL;
2201 rth->rt_type = res.type;
2202 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
2203 err = rt_intern_hash(hash, rth, &skb->rtable);
2207 RT_CACHE_STAT_INC(in_no_route);
2208 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2209 res.type = RTN_UNREACHABLE;
2215 * Do not cache martian addresses: they should be logged (RFC1812)
2217 martian_destination:
2218 RT_CACHE_STAT_INC(in_martian_dst);
2219 #ifdef CONFIG_IP_ROUTE_VERBOSE
2220 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
2221 printk(KERN_WARNING "martian destination " NIPQUAD_FMT " from "
2222 NIPQUAD_FMT ", dev %s\n",
2223 NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
2227 err = -EHOSTUNREACH;
2239 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2243 int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2244 u8 tos, struct net_device *dev)
2246 struct rtable * rth;
2248 int iif = dev->ifindex;
2253 if (!rt_caching(net))
2256 tos &= IPTOS_RT_MASK;
2257 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
2260 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2261 rth = rcu_dereference(rth->u.dst.rt_next)) {
2262 if (((rth->fl.fl4_dst ^ daddr) |
2263 (rth->fl.fl4_src ^ saddr) |
2264 (rth->fl.iif ^ iif) |
2266 (rth->fl.fl4_tos ^ tos)) == 0 &&
2267 rth->fl.mark == skb->mark &&
2268 net_eq(dev_net(rth->u.dst.dev), net) &&
2269 !rt_is_expired(rth)) {
2270 dst_use(&rth->u.dst, jiffies);
2271 RT_CACHE_STAT_INC(in_hit);
2276 RT_CACHE_STAT_INC(in_hlist_search);
2281 /* Multicast recognition logic is moved from route cache to here.
2282 The problem was that too many Ethernet cards have broken/missing
2283 hardware multicast filters :-( As result the host on multicasting
2284 network acquires a lot of useless route cache entries, sort of
2285 SDR messages from all the world. Now we try to get rid of them.
2286 Really, provided software IP multicast filter is organized
2287 reasonably (at least, hashed), it does not result in a slowdown
2288 comparing with route cache reject entries.
2289 Note, that multicast routers are not affected, because
2290 route cache entry is created eventually.
2292 if (ipv4_is_multicast(daddr)) {
2293 struct in_device *in_dev;
2296 if ((in_dev = __in_dev_get_rcu(dev)) != NULL) {
2297 int our = ip_check_mc(in_dev, daddr, saddr,
2298 ip_hdr(skb)->protocol);
2300 #ifdef CONFIG_IP_MROUTE
2301 || (!ipv4_is_local_multicast(daddr) &&
2302 IN_DEV_MFORWARD(in_dev))
2306 return ip_route_input_mc(skb, daddr, saddr,
2313 return ip_route_input_slow(skb, daddr, saddr, tos, dev);
2316 static int __mkroute_output(struct rtable **result,
2317 struct fib_result *res,
2318 const struct flowi *fl,
2319 const struct flowi *oldflp,
2320 struct net_device *dev_out,
2324 struct in_device *in_dev;
2325 u32 tos = RT_FL_TOS(oldflp);
2328 if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
2331 if (fl->fl4_dst == htonl(0xFFFFFFFF))
2332 res->type = RTN_BROADCAST;
2333 else if (ipv4_is_multicast(fl->fl4_dst))
2334 res->type = RTN_MULTICAST;
2335 else if (ipv4_is_lbcast(fl->fl4_dst) || ipv4_is_zeronet(fl->fl4_dst))
2338 if (dev_out->flags & IFF_LOOPBACK)
2339 flags |= RTCF_LOCAL;
2341 /* get work reference to inet device */
2342 in_dev = in_dev_get(dev_out);
2346 if (res->type == RTN_BROADCAST) {
2347 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2349 fib_info_put(res->fi);
2352 } else if (res->type == RTN_MULTICAST) {
2353 flags |= RTCF_MULTICAST|RTCF_LOCAL;
2354 if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
2356 flags &= ~RTCF_LOCAL;
2357 /* If multicast route do not exist use
2358 default one, but do not gateway in this case.
2361 if (res->fi && res->prefixlen < 4) {
2362 fib_info_put(res->fi);
2368 rth = dst_alloc(&ipv4_dst_ops);
2374 atomic_set(&rth->u.dst.__refcnt, 1);
2375 rth->u.dst.flags= DST_HOST;
2376 if (IN_DEV_CONF_GET(in_dev, NOXFRM))
2377 rth->u.dst.flags |= DST_NOXFRM;
2378 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2379 rth->u.dst.flags |= DST_NOPOLICY;
2381 rth->fl.fl4_dst = oldflp->fl4_dst;
2382 rth->fl.fl4_tos = tos;
2383 rth->fl.fl4_src = oldflp->fl4_src;
2384 rth->fl.oif = oldflp->oif;
2385 rth->fl.mark = oldflp->mark;
2386 rth->rt_dst = fl->fl4_dst;
2387 rth->rt_src = fl->fl4_src;
2388 rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
2389 /* get references to the devices that are to be hold by the routing
2391 rth->u.dst.dev = dev_out;
2393 rth->idev = in_dev_get(dev_out);
2394 rth->rt_gateway = fl->fl4_dst;
2395 rth->rt_spec_dst= fl->fl4_src;
2397 rth->u.dst.output=ip_output;
2398 rth->rt_genid = rt_genid(dev_net(dev_out));
2400 RT_CACHE_STAT_INC(out_slow_tot);
2402 if (flags & RTCF_LOCAL) {
2403 rth->u.dst.input = ip_local_deliver;
2404 rth->rt_spec_dst = fl->fl4_dst;
2406 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2407 rth->rt_spec_dst = fl->fl4_src;
2408 if (flags & RTCF_LOCAL &&
2409 !(dev_out->flags & IFF_LOOPBACK)) {
2410 rth->u.dst.output = ip_mc_output;
2411 RT_CACHE_STAT_INC(out_slow_mc);
2413 #ifdef CONFIG_IP_MROUTE
2414 if (res->type == RTN_MULTICAST) {
2415 if (IN_DEV_MFORWARD(in_dev) &&
2416 !ipv4_is_local_multicast(oldflp->fl4_dst)) {
2417 rth->u.dst.input = ip_mr_input;
2418 rth->u.dst.output = ip_mc_output;
2424 rt_set_nexthop(rth, res, 0);
2426 rth->rt_flags = flags;
2430 /* release work reference to inet device */
2436 static int ip_mkroute_output(struct rtable **rp,
2437 struct fib_result *res,
2438 const struct flowi *fl,
2439 const struct flowi *oldflp,
2440 struct net_device *dev_out,
2443 struct rtable *rth = NULL;
2444 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
2447 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
2448 rt_genid(dev_net(dev_out)));
2449 err = rt_intern_hash(hash, rth, rp);
2456 * Major route resolver routine.
2459 static int ip_route_output_slow(struct net *net, struct rtable **rp,
2460 const struct flowi *oldflp)
2462 u32 tos = RT_FL_TOS(oldflp);
2463 struct flowi fl = { .nl_u = { .ip4_u =
2464 { .daddr = oldflp->fl4_dst,
2465 .saddr = oldflp->fl4_src,
2466 .tos = tos & IPTOS_RT_MASK,
2467 .scope = ((tos & RTO_ONLINK) ?
2471 .mark = oldflp->mark,
2472 .iif = net->loopback_dev->ifindex,
2473 .oif = oldflp->oif };
2474 struct fib_result res;
2476 struct net_device *dev_out = NULL;
2482 #ifdef CONFIG_IP_MULTIPLE_TABLES
2486 if (oldflp->fl4_src) {
2488 if (ipv4_is_multicast(oldflp->fl4_src) ||
2489 ipv4_is_lbcast(oldflp->fl4_src) ||
2490 ipv4_is_zeronet(oldflp->fl4_src))
2493 /* I removed check for oif == dev_out->oif here.
2494 It was wrong for two reasons:
2495 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2496 is assigned to multiple interfaces.
2497 2. Moreover, we are allowed to send packets with saddr
2498 of another iface. --ANK
2501 if (oldflp->oif == 0
2502 && (ipv4_is_multicast(oldflp->fl4_dst) ||
2503 oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
2504 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2505 dev_out = ip_dev_find(net, oldflp->fl4_src);
2506 if (dev_out == NULL)
2509 /* Special hack: user can direct multicasts
2510 and limited broadcast via necessary interface
2511 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2512 This hack is not just for fun, it allows
2513 vic,vat and friends to work.
2514 They bind socket to loopback, set ttl to zero
2515 and expect that it will work.
2516 From the viewpoint of routing cache they are broken,
2517 because we are not allowed to build multicast path
2518 with loopback source addr (look, routing cache
2519 cannot know, that ttl is zero, so that packet
2520 will not leave this host and route is valid).
2521 Luckily, this hack is good workaround.
2524 fl.oif = dev_out->ifindex;
2528 if (!(oldflp->flags & FLOWI_FLAG_ANYSRC)) {
2529 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2530 dev_out = ip_dev_find(net, oldflp->fl4_src);
2531 if (dev_out == NULL)
2540 dev_out = dev_get_by_index(net, oldflp->oif);
2542 if (dev_out == NULL)
2545 /* RACE: Check return value of inet_select_addr instead. */
2546 if (__in_dev_get_rtnl(dev_out) == NULL) {
2548 goto out; /* Wrong error code */
2551 if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
2552 oldflp->fl4_dst == htonl(0xFFFFFFFF)) {
2554 fl.fl4_src = inet_select_addr(dev_out, 0,
2559 if (ipv4_is_multicast(oldflp->fl4_dst))
2560 fl.fl4_src = inet_select_addr(dev_out, 0,
2562 else if (!oldflp->fl4_dst)
2563 fl.fl4_src = inet_select_addr(dev_out, 0,
2569 fl.fl4_dst = fl.fl4_src;
2571 fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
2574 dev_out = net->loopback_dev;
2576 fl.oif = net->loopback_dev->ifindex;
2577 res.type = RTN_LOCAL;
2578 flags |= RTCF_LOCAL;
2582 if (fib_lookup(net, &fl, &res)) {
2585 /* Apparently, routing tables are wrong. Assume,
2586 that the destination is on link.
2589 Because we are allowed to send to iface
2590 even if it has NO routes and NO assigned
2591 addresses. When oif is specified, routing
2592 tables are looked up with only one purpose:
2593 to catch if destination is gatewayed, rather than
2594 direct. Moreover, if MSG_DONTROUTE is set,
2595 we send packet, ignoring both routing tables
2596 and ifaddr state. --ANK
2599 We could make it even if oif is unknown,
2600 likely IPv6, but we do not.
2603 if (fl.fl4_src == 0)
2604 fl.fl4_src = inet_select_addr(dev_out, 0,
2606 res.type = RTN_UNICAST;
2616 if (res.type == RTN_LOCAL) {
2618 fl.fl4_src = fl.fl4_dst;
2621 dev_out = net->loopback_dev;
2623 fl.oif = dev_out->ifindex;
2625 fib_info_put(res.fi);
2627 flags |= RTCF_LOCAL;
2631 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2632 if (res.fi->fib_nhs > 1 && fl.oif == 0)
2633 fib_select_multipath(&fl, &res);
2636 if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
2637 fib_select_default(net, &fl, &res);
2640 fl.fl4_src = FIB_RES_PREFSRC(res);
2644 dev_out = FIB_RES_DEV(res);
2646 fl.oif = dev_out->ifindex;
2650 err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
2660 int __ip_route_output_key(struct net *net, struct rtable **rp,
2661 const struct flowi *flp)
2666 if (!rt_caching(net))
2669 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
2672 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2673 rth = rcu_dereference(rth->u.dst.rt_next)) {
2674 if (rth->fl.fl4_dst == flp->fl4_dst &&
2675 rth->fl.fl4_src == flp->fl4_src &&
2677 rth->fl.oif == flp->oif &&
2678 rth->fl.mark == flp->mark &&
2679 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2680 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2681 net_eq(dev_net(rth->u.dst.dev), net) &&
2682 !rt_is_expired(rth)) {
2683 dst_use(&rth->u.dst, jiffies);
2684 RT_CACHE_STAT_INC(out_hit);
2685 rcu_read_unlock_bh();
2689 RT_CACHE_STAT_INC(out_hlist_search);
2691 rcu_read_unlock_bh();
2694 return ip_route_output_slow(net, rp, flp);
2697 EXPORT_SYMBOL_GPL(__ip_route_output_key);
2699 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2703 static struct dst_ops ipv4_dst_blackhole_ops = {
2705 .protocol = __constant_htons(ETH_P_IP),
2706 .destroy = ipv4_dst_destroy,
2707 .check = ipv4_dst_check,
2708 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2709 .entry_size = sizeof(struct rtable),
2710 .entries = ATOMIC_INIT(0),
2714 static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi *flp)
2716 struct rtable *ort = *rp;
2717 struct rtable *rt = (struct rtable *)
2718 dst_alloc(&ipv4_dst_blackhole_ops);
2721 struct dst_entry *new = &rt->u.dst;
2723 atomic_set(&new->__refcnt, 1);
2725 new->input = dst_discard;
2726 new->output = dst_discard;
2727 memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
2729 new->dev = ort->u.dst.dev;
2735 rt->idev = ort->idev;
2737 in_dev_hold(rt->idev);
2738 rt->rt_genid = rt_genid(net);
2739 rt->rt_flags = ort->rt_flags;
2740 rt->rt_type = ort->rt_type;
2741 rt->rt_dst = ort->rt_dst;
2742 rt->rt_src = ort->rt_src;
2743 rt->rt_iif = ort->rt_iif;
2744 rt->rt_gateway = ort->rt_gateway;
2745 rt->rt_spec_dst = ort->rt_spec_dst;
2746 rt->peer = ort->peer;
2748 atomic_inc(&rt->peer->refcnt);
2753 dst_release(&(*rp)->u.dst);
2755 return (rt ? 0 : -ENOMEM);
2758 int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
2759 struct sock *sk, int flags)
2763 if ((err = __ip_route_output_key(net, rp, flp)) != 0)
2768 flp->fl4_src = (*rp)->rt_src;
2770 flp->fl4_dst = (*rp)->rt_dst;
2771 err = __xfrm_lookup((struct dst_entry **)rp, flp, sk,
2772 flags ? XFRM_LOOKUP_WAIT : 0);
2773 if (err == -EREMOTE)
2774 err = ipv4_dst_blackhole(net, rp, flp);
2782 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2784 int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
2786 return ip_route_output_flow(net, rp, flp, NULL, 0);
2789 static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
2790 int nowait, unsigned int flags)
2792 struct rtable *rt = skb->rtable;
2794 struct nlmsghdr *nlh;
2796 u32 id = 0, ts = 0, tsage = 0, error;
2798 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2802 r = nlmsg_data(nlh);
2803 r->rtm_family = AF_INET;
2804 r->rtm_dst_len = 32;
2806 r->rtm_tos = rt->fl.fl4_tos;
2807 r->rtm_table = RT_TABLE_MAIN;
2808 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
2809 r->rtm_type = rt->rt_type;
2810 r->rtm_scope = RT_SCOPE_UNIVERSE;
2811 r->rtm_protocol = RTPROT_UNSPEC;
2812 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2813 if (rt->rt_flags & RTCF_NOTIFY)
2814 r->rtm_flags |= RTM_F_NOTIFY;
2816 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
2818 if (rt->fl.fl4_src) {
2819 r->rtm_src_len = 32;
2820 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
2823 NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex);
2824 #ifdef CONFIG_NET_CLS_ROUTE
2825 if (rt->u.dst.tclassid)
2826 NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid);
2829 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2830 else if (rt->rt_src != rt->fl.fl4_src)
2831 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
2833 if (rt->rt_dst != rt->rt_gateway)
2834 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
2836 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
2837 goto nla_put_failure;
2839 error = rt->u.dst.error;
2840 expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
2842 id = rt->peer->ip_id_count;
2843 if (rt->peer->tcp_ts_stamp) {
2844 ts = rt->peer->tcp_ts;
2845 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
2850 #ifdef CONFIG_IP_MROUTE
2851 __be32 dst = rt->rt_dst;
2853 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2854 IPV4_DEVCONF_ALL(&init_net, MC_FORWARDING)) {
2855 int err = ipmr_get_route(skb, r, nowait);
2860 goto nla_put_failure;
2862 if (err == -EMSGSIZE)
2863 goto nla_put_failure;
2869 NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
2872 if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage,
2873 expires, error) < 0)
2874 goto nla_put_failure;
2876 return nlmsg_end(skb, nlh);
2879 nlmsg_cancel(skb, nlh);
2883 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2885 struct net *net = sock_net(in_skb->sk);
2887 struct nlattr *tb[RTA_MAX+1];
2888 struct rtable *rt = NULL;
2893 struct sk_buff *skb;
2895 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2899 rtm = nlmsg_data(nlh);
2901 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2907 /* Reserve room for dummy headers, this skb can pass
2908 through good chunk of routing engine.
2910 skb_reset_mac_header(skb);
2911 skb_reset_network_header(skb);
2913 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2914 ip_hdr(skb)->protocol = IPPROTO_ICMP;
2915 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2917 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2918 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2919 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2922 struct net_device *dev;
2924 dev = __dev_get_by_index(net, iif);
2930 skb->protocol = htons(ETH_P_IP);
2933 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2937 if (err == 0 && rt->u.dst.error)
2938 err = -rt->u.dst.error;
2945 .tos = rtm->rtm_tos,
2948 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2950 err = ip_route_output_key(net, &rt, &fl);
2957 if (rtm->rtm_flags & RTM_F_NOTIFY)
2958 rt->rt_flags |= RTCF_NOTIFY;
2960 err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
2961 RTM_NEWROUTE, 0, 0);
2965 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2974 int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2981 net = sock_net(skb->sk);
2986 s_idx = idx = cb->args[1];
2987 for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
2988 if (!rt_hash_table[h].chain)
2991 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
2992 rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
2993 if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx)
2995 if (rt_is_expired(rt))
2997 skb->dst = dst_clone(&rt->u.dst);
2998 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
2999 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
3000 1, NLM_F_MULTI) <= 0) {
3001 dst_release(xchg(&skb->dst, NULL));
3002 rcu_read_unlock_bh();
3005 dst_release(xchg(&skb->dst, NULL));
3007 rcu_read_unlock_bh();
3016 void ip_rt_multicast_event(struct in_device *in_dev)
3018 rt_cache_flush(dev_net(in_dev->dev), 0);
3021 #ifdef CONFIG_SYSCTL
3022 static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
3023 struct file *filp, void __user *buffer,
3024 size_t *lenp, loff_t *ppos)
3031 memcpy(&ctl, __ctl, sizeof(ctl));
3032 ctl.data = &flush_delay;
3033 proc_dointvec(&ctl, write, filp, buffer, lenp, ppos);
3035 net = (struct net *)__ctl->extra1;
3036 rt_cache_flush(net, flush_delay);
3043 static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table,
3044 void __user *oldval,
3045 size_t __user *oldlenp,
3046 void __user *newval,
3051 if (newlen != sizeof(int))
3053 if (get_user(delay, (int __user *)newval))
3055 net = (struct net *)table->extra1;
3056 rt_cache_flush(net, delay);
3060 static void rt_secret_reschedule(int old)
3063 int new = ip_rt_secret_interval;
3064 int diff = new - old;
3071 int deleted = del_timer_sync(&net->ipv4.rt_secret_timer);
3077 long time = net->ipv4.rt_secret_timer.expires - jiffies;
3079 if (time <= 0 || (time += diff) <= 0)
3082 net->ipv4.rt_secret_timer.expires = time;
3084 net->ipv4.rt_secret_timer.expires = new;
3086 net->ipv4.rt_secret_timer.expires += jiffies;
3087 add_timer(&net->ipv4.rt_secret_timer);
3092 static int ipv4_sysctl_rt_secret_interval(ctl_table *ctl, int write,
3094 void __user *buffer, size_t *lenp,
3097 int old = ip_rt_secret_interval;
3098 int ret = proc_dointvec_jiffies(ctl, write, filp, buffer, lenp, ppos);
3100 rt_secret_reschedule(old);
3105 static int ipv4_sysctl_rt_secret_interval_strategy(ctl_table *table,
3106 void __user *oldval,
3107 size_t __user *oldlenp,
3108 void __user *newval,
3111 int old = ip_rt_secret_interval;
3112 int ret = sysctl_jiffies(table, oldval, oldlenp, newval, newlen);
3114 rt_secret_reschedule(old);
3119 static ctl_table ipv4_route_table[] = {
3121 .ctl_name = NET_IPV4_ROUTE_GC_THRESH,
3122 .procname = "gc_thresh",
3123 .data = &ipv4_dst_ops.gc_thresh,
3124 .maxlen = sizeof(int),
3126 .proc_handler = &proc_dointvec,
3129 .ctl_name = NET_IPV4_ROUTE_MAX_SIZE,
3130 .procname = "max_size",
3131 .data = &ip_rt_max_size,
3132 .maxlen = sizeof(int),
3134 .proc_handler = &proc_dointvec,
3137 /* Deprecated. Use gc_min_interval_ms */
3139 .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL,
3140 .procname = "gc_min_interval",
3141 .data = &ip_rt_gc_min_interval,
3142 .maxlen = sizeof(int),
3144 .proc_handler = &proc_dointvec_jiffies,
3145 .strategy = &sysctl_jiffies,
3148 .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS,
3149 .procname = "gc_min_interval_ms",
3150 .data = &ip_rt_gc_min_interval,
3151 .maxlen = sizeof(int),
3153 .proc_handler = &proc_dointvec_ms_jiffies,
3154 .strategy = &sysctl_ms_jiffies,
3157 .ctl_name = NET_IPV4_ROUTE_GC_TIMEOUT,
3158 .procname = "gc_timeout",
3159 .data = &ip_rt_gc_timeout,
3160 .maxlen = sizeof(int),
3162 .proc_handler = &proc_dointvec_jiffies,
3163 .strategy = &sysctl_jiffies,
3166 .ctl_name = NET_IPV4_ROUTE_GC_INTERVAL,
3167 .procname = "gc_interval",
3168 .data = &ip_rt_gc_interval,
3169 .maxlen = sizeof(int),
3171 .proc_handler = &proc_dointvec_jiffies,
3172 .strategy = &sysctl_jiffies,
3175 .ctl_name = NET_IPV4_ROUTE_REDIRECT_LOAD,
3176 .procname = "redirect_load",
3177 .data = &ip_rt_redirect_load,
3178 .maxlen = sizeof(int),
3180 .proc_handler = &proc_dointvec,
3183 .ctl_name = NET_IPV4_ROUTE_REDIRECT_NUMBER,
3184 .procname = "redirect_number",
3185 .data = &ip_rt_redirect_number,
3186 .maxlen = sizeof(int),
3188 .proc_handler = &proc_dointvec,
3191 .ctl_name = NET_IPV4_ROUTE_REDIRECT_SILENCE,
3192 .procname = "redirect_silence",
3193 .data = &ip_rt_redirect_silence,
3194 .maxlen = sizeof(int),
3196 .proc_handler = &proc_dointvec,
3199 .ctl_name = NET_IPV4_ROUTE_ERROR_COST,
3200 .procname = "error_cost",
3201 .data = &ip_rt_error_cost,
3202 .maxlen = sizeof(int),
3204 .proc_handler = &proc_dointvec,
3207 .ctl_name = NET_IPV4_ROUTE_ERROR_BURST,
3208 .procname = "error_burst",
3209 .data = &ip_rt_error_burst,
3210 .maxlen = sizeof(int),
3212 .proc_handler = &proc_dointvec,
3215 .ctl_name = NET_IPV4_ROUTE_GC_ELASTICITY,
3216 .procname = "gc_elasticity",
3217 .data = &ip_rt_gc_elasticity,
3218 .maxlen = sizeof(int),
3220 .proc_handler = &proc_dointvec,
3223 .ctl_name = NET_IPV4_ROUTE_MTU_EXPIRES,
3224 .procname = "mtu_expires",
3225 .data = &ip_rt_mtu_expires,
3226 .maxlen = sizeof(int),
3228 .proc_handler = &proc_dointvec_jiffies,
3229 .strategy = &sysctl_jiffies,
3232 .ctl_name = NET_IPV4_ROUTE_MIN_PMTU,
3233 .procname = "min_pmtu",
3234 .data = &ip_rt_min_pmtu,
3235 .maxlen = sizeof(int),
3237 .proc_handler = &proc_dointvec,
3240 .ctl_name = NET_IPV4_ROUTE_MIN_ADVMSS,
3241 .procname = "min_adv_mss",
3242 .data = &ip_rt_min_advmss,
3243 .maxlen = sizeof(int),
3245 .proc_handler = &proc_dointvec,
3248 .ctl_name = NET_IPV4_ROUTE_SECRET_INTERVAL,
3249 .procname = "secret_interval",
3250 .data = &ip_rt_secret_interval,
3251 .maxlen = sizeof(int),
3253 .proc_handler = &ipv4_sysctl_rt_secret_interval,
3254 .strategy = &ipv4_sysctl_rt_secret_interval_strategy,
3259 static struct ctl_table empty[1];
3261 static struct ctl_table ipv4_skeleton[] =
3263 { .procname = "route", .ctl_name = NET_IPV4_ROUTE,
3264 .mode = 0555, .child = ipv4_route_table},
3265 { .procname = "neigh", .ctl_name = NET_IPV4_NEIGH,
3266 .mode = 0555, .child = empty},
3270 static __net_initdata struct ctl_path ipv4_path[] = {
3271 { .procname = "net", .ctl_name = CTL_NET, },
3272 { .procname = "ipv4", .ctl_name = NET_IPV4, },
3276 static struct ctl_table ipv4_route_flush_table[] = {
3278 .ctl_name = NET_IPV4_ROUTE_FLUSH,
3279 .procname = "flush",
3280 .maxlen = sizeof(int),
3282 .proc_handler = &ipv4_sysctl_rtcache_flush,
3283 .strategy = &ipv4_sysctl_rtcache_flush_strategy,
3288 static __net_initdata struct ctl_path ipv4_route_path[] = {
3289 { .procname = "net", .ctl_name = CTL_NET, },
3290 { .procname = "ipv4", .ctl_name = NET_IPV4, },
3291 { .procname = "route", .ctl_name = NET_IPV4_ROUTE, },
3295 static __net_init int sysctl_route_net_init(struct net *net)
3297 struct ctl_table *tbl;
3299 tbl = ipv4_route_flush_table;
3300 if (net != &init_net) {
3301 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3305 tbl[0].extra1 = net;
3307 net->ipv4.route_hdr =
3308 register_net_sysctl_table(net, ipv4_route_path, tbl);
3309 if (net->ipv4.route_hdr == NULL)
3314 if (tbl != ipv4_route_flush_table)
3320 static __net_exit void sysctl_route_net_exit(struct net *net)
3322 struct ctl_table *tbl;
3324 tbl = net->ipv4.route_hdr->ctl_table_arg;
3325 unregister_net_sysctl_table(net->ipv4.route_hdr);
3326 BUG_ON(tbl == ipv4_route_flush_table);
3330 static __net_initdata struct pernet_operations sysctl_route_ops = {
3331 .init = sysctl_route_net_init,
3332 .exit = sysctl_route_net_exit,
3337 static __net_init int rt_secret_timer_init(struct net *net)
3339 atomic_set(&net->ipv4.rt_genid,
3340 (int) ((num_physpages ^ (num_physpages>>8)) ^
3341 (jiffies ^ (jiffies >> 7))));
3343 net->ipv4.rt_secret_timer.function = rt_secret_rebuild;
3344 net->ipv4.rt_secret_timer.data = (unsigned long)net;
3345 init_timer_deferrable(&net->ipv4.rt_secret_timer);
3347 if (ip_rt_secret_interval) {
3348 net->ipv4.rt_secret_timer.expires =
3349 jiffies + net_random() % ip_rt_secret_interval +
3350 ip_rt_secret_interval;
3351 add_timer(&net->ipv4.rt_secret_timer);
3356 static __net_exit void rt_secret_timer_exit(struct net *net)
3358 del_timer_sync(&net->ipv4.rt_secret_timer);
3361 static __net_initdata struct pernet_operations rt_secret_timer_ops = {
3362 .init = rt_secret_timer_init,
3363 .exit = rt_secret_timer_exit,
3367 #ifdef CONFIG_NET_CLS_ROUTE
3368 struct ip_rt_acct *ip_rt_acct __read_mostly;
3369 #endif /* CONFIG_NET_CLS_ROUTE */
3371 static __initdata unsigned long rhash_entries;
3372 static int __init set_rhash_entries(char *str)
3376 rhash_entries = simple_strtoul(str, &str, 0);
3379 __setup("rhash_entries=", set_rhash_entries);
3381 int __init ip_rt_init(void)
3385 #ifdef CONFIG_NET_CLS_ROUTE
3386 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct));
3388 panic("IP: failed to allocate ip_rt_acct\n");
3391 ipv4_dst_ops.kmem_cachep =
3392 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3393 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3395 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3397 rt_hash_table = (struct rt_hash_bucket *)
3398 alloc_large_system_hash("IP route cache",
3399 sizeof(struct rt_hash_bucket),
3401 (num_physpages >= 128 * 1024) ?
3407 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3408 rt_hash_lock_init();
3410 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3411 ip_rt_max_size = (rt_hash_mask + 1) * 16;
3416 /* All the timers, started at system startup tend
3417 to synchronize. Perturb it a bit.
3419 schedule_delayed_work(&expires_work,
3420 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3422 if (register_pernet_subsys(&rt_secret_timer_ops))
3423 printk(KERN_ERR "Unable to setup rt_secret_timer\n");
3425 if (ip_rt_proc_init())
3426 printk(KERN_ERR "Unable to create route proc files\n");
3431 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
3433 #ifdef CONFIG_SYSCTL
3434 register_pernet_subsys(&sysctl_route_ops);
3439 #ifdef CONFIG_SYSCTL
3441 * We really need to sanitize the damn ipv4 init order, then all
3442 * this nonsense will go away.
3444 void __init ip_static_sysctl_init(void)
3446 register_sysctl_paths(ipv4_path, ipv4_skeleton);
3450 EXPORT_SYMBOL(__ip_select_ident);
3451 EXPORT_SYMBOL(ip_route_input);
3452 EXPORT_SYMBOL(ip_route_output_key);