2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
15 * This work is based on the LPC-trie which is originally described in:
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.csc.kth.se/~snilsson/software/dyntrie2/
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
26 * Code from fib_hash has been reused which includes the following header:
29 * INET An implementation of the TCP/IP protocol suite for the LINUX
30 * operating system. INET is implemented using the BSD Socket
31 * interface as the means of communication with the user level.
33 * IPv4 FIB: lookup engine and maintenance routines.
36 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
38 * This program is free software; you can redistribute it and/or
39 * modify it under the terms of the GNU General Public License
40 * as published by the Free Software Foundation; either version
41 * 2 of the License, or (at your option) any later version.
43 * Substantial contributions to this work comes from:
45 * David S. Miller, <davem@davemloft.net>
46 * Stephen Hemminger <shemminger@osdl.org>
47 * Paul E. McKenney <paulmck@us.ibm.com>
48 * Patrick McHardy <kaber@trash.net>
51 #define VERSION "0.409"
53 #include <asm/uaccess.h>
54 #include <linux/bitops.h>
55 #include <linux/types.h>
56 #include <linux/kernel.h>
58 #include <linux/string.h>
59 #include <linux/socket.h>
60 #include <linux/sockios.h>
61 #include <linux/errno.h>
63 #include <linux/inet.h>
64 #include <linux/inetdevice.h>
65 #include <linux/netdevice.h>
66 #include <linux/if_arp.h>
67 #include <linux/proc_fs.h>
68 #include <linux/rcupdate.h>
69 #include <linux/skbuff.h>
70 #include <linux/netlink.h>
71 #include <linux/init.h>
72 #include <linux/list.h>
73 #include <linux/slab.h>
74 #include <linux/export.h>
75 #include <net/net_namespace.h>
77 #include <net/protocol.h>
78 #include <net/route.h>
81 #include <net/ip_fib.h>
82 #include "fib_lookup.h"
84 #define MAX_STAT_DEPTH 32
86 #define KEYLENGTH (8*sizeof(t_key))
87 #define KEY_MAX ((t_key)~0)
89 typedef unsigned int t_key;
91 #define IS_TNODE(n) ((n)->bits)
92 #define IS_LEAF(n) (!(n)->bits)
94 #define get_index(_key, _kv) (((_key) ^ (_kv)->key) >> (_kv)->pos)
99 t_key empty_children; /* KEYLENGTH bits needed */
100 t_key full_children; /* KEYLENGTH bits needed */
101 struct tnode __rcu *parent;
104 unsigned char pos; /* 2log(KEYLENGTH) bits needed */
105 unsigned char bits; /* 2log(KEYLENGTH) bits needed */
108 /* This list pointer if valid if (pos | bits) == 0 (LEAF) */
109 struct hlist_head leaf;
110 /* This array is valid if (pos | bits) > 0 (TNODE) */
111 struct tnode __rcu *tnode[0];
115 #define TNODE_SIZE(n) offsetof(struct tnode, tnode[n])
116 #define LEAF_SIZE TNODE_SIZE(1)
118 #ifdef CONFIG_IP_FIB_TRIE_STATS
119 struct trie_use_stats {
121 unsigned int backtrack;
122 unsigned int semantic_match_passed;
123 unsigned int semantic_match_miss;
124 unsigned int null_node_hit;
125 unsigned int resize_node_skipped;
130 unsigned int totdepth;
131 unsigned int maxdepth;
134 unsigned int nullpointers;
135 unsigned int prefixes;
136 unsigned int nodesizes[MAX_STAT_DEPTH];
140 struct tnode __rcu *trie;
141 #ifdef CONFIG_IP_FIB_TRIE_STATS
142 struct trie_use_stats __percpu *stats;
146 static void resize(struct trie *t, struct tnode *tn);
147 static size_t tnode_free_size;
150 * synchronize_rcu after call_rcu for that many pages; it should be especially
151 * useful before resizing the root node with PREEMPT_NONE configs; the value was
152 * obtained experimentally, aiming to avoid visible slowdown.
154 static const int sync_pages = 128;
156 static struct kmem_cache *fn_alias_kmem __read_mostly;
157 static struct kmem_cache *trie_leaf_kmem __read_mostly;
159 /* caller must hold RTNL */
160 #define node_parent(n) rtnl_dereference((n)->parent)
162 /* caller must hold RCU read lock or RTNL */
163 #define node_parent_rcu(n) rcu_dereference_rtnl((n)->parent)
165 /* wrapper for rcu_assign_pointer */
166 static inline void node_set_parent(struct tnode *n, struct tnode *tp)
169 rcu_assign_pointer(n->parent, tp);
172 #define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER((n)->parent, p)
174 /* This provides us with the number of children in this node, in the case of a
175 * leaf this will return 0 meaning none of the children are accessible.
177 static inline unsigned long tnode_child_length(const struct tnode *tn)
179 return (1ul << tn->bits) & ~(1ul);
182 /* caller must hold RTNL */
183 static inline struct tnode *tnode_get_child(const struct tnode *tn,
186 return rtnl_dereference(tn->tnode[i]);
189 /* caller must hold RCU read lock or RTNL */
190 static inline struct tnode *tnode_get_child_rcu(const struct tnode *tn,
193 return rcu_dereference_rtnl(tn->tnode[i]);
196 static inline struct fib_table *trie_get_table(struct trie *t)
198 unsigned long *tb_data = (unsigned long *)t;
200 return container_of(tb_data, struct fib_table, tb_data[0]);
203 /* To understand this stuff, an understanding of keys and all their bits is
204 * necessary. Every node in the trie has a key associated with it, but not
205 * all of the bits in that key are significant.
207 * Consider a node 'n' and its parent 'tp'.
209 * If n is a leaf, every bit in its key is significant. Its presence is
210 * necessitated by path compression, since during a tree traversal (when
211 * searching for a leaf - unless we are doing an insertion) we will completely
212 * ignore all skipped bits we encounter. Thus we need to verify, at the end of
213 * a potentially successful search, that we have indeed been walking the
216 * Note that we can never "miss" the correct key in the tree if present by
217 * following the wrong path. Path compression ensures that segments of the key
218 * that are the same for all keys with a given prefix are skipped, but the
219 * skipped part *is* identical for each node in the subtrie below the skipped
220 * bit! trie_insert() in this implementation takes care of that.
222 * if n is an internal node - a 'tnode' here, the various parts of its key
223 * have many different meanings.
226 * _________________________________________________________________
227 * | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
228 * -----------------------------------------------------------------
229 * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
231 * _________________________________________________________________
232 * | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
233 * -----------------------------------------------------------------
234 * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
241 * First, let's just ignore the bits that come before the parent tp, that is
242 * the bits from (tp->pos + tp->bits) to 31. They are *known* but at this
243 * point we do not use them for anything.
245 * The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
246 * index into the parent's child array. That is, they will be used to find
247 * 'n' among tp's children.
249 * The bits from (n->pos + n->bits) to (tn->pos - 1) - "S" - are skipped bits
252 * All the bits we have seen so far are significant to the node n. The rest
253 * of the bits are really not needed or indeed known in n->key.
255 * The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
256 * n's child array, and will of course be different for each child.
258 * The rest of the bits, from 0 to (n->pos + n->bits), are completely unknown
262 static const int halve_threshold = 25;
263 static const int inflate_threshold = 50;
264 static const int halve_threshold_root = 15;
265 static const int inflate_threshold_root = 30;
267 static void __alias_free_mem(struct rcu_head *head)
269 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
270 kmem_cache_free(fn_alias_kmem, fa);
273 static inline void alias_free_mem_rcu(struct fib_alias *fa)
275 call_rcu(&fa->rcu, __alias_free_mem);
278 #define TNODE_KMALLOC_MAX \
279 ilog2((PAGE_SIZE - TNODE_SIZE(0)) / sizeof(struct tnode *))
280 #define TNODE_VMALLOC_MAX \
281 ilog2((SIZE_MAX - TNODE_SIZE(0)) / sizeof(struct tnode *))
283 static void __node_free_rcu(struct rcu_head *head)
285 struct tnode *n = container_of(head, struct tnode, rcu);
288 kmem_cache_free(trie_leaf_kmem, n);
289 else if (n->bits <= TNODE_KMALLOC_MAX)
295 #define node_free(n) call_rcu(&n->rcu, __node_free_rcu)
297 static struct tnode *tnode_alloc(int bits)
301 /* verify bits is within bounds */
302 if (bits > TNODE_VMALLOC_MAX)
305 /* determine size and verify it is non-zero and didn't overflow */
306 size = TNODE_SIZE(1ul << bits);
308 if (size <= PAGE_SIZE)
309 return kzalloc(size, GFP_KERNEL);
311 return vzalloc(size);
314 static inline void empty_child_inc(struct tnode *n)
316 ++n->empty_children ? : ++n->full_children;
319 static inline void empty_child_dec(struct tnode *n)
321 n->empty_children-- ? : n->full_children--;
324 static struct tnode *leaf_new(t_key key, struct fib_alias *fa)
326 struct tnode *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
329 /* set key and pos to reflect full key value
330 * any trailing zeros in the key should be ignored
331 * as the nodes are searched
334 l->slen = fa->fa_slen;
336 /* set bits to 0 indicating we are not a tnode */
339 /* link leaf to fib alias */
340 INIT_HLIST_HEAD(&l->leaf);
341 hlist_add_head(&fa->fa_list, &l->leaf);
346 static struct tnode *tnode_new(t_key key, int pos, int bits)
348 struct tnode *tn = tnode_alloc(bits);
349 unsigned int shift = pos + bits;
351 /* verify bits and pos their msb bits clear and values are valid */
352 BUG_ON(!bits || (shift > KEYLENGTH));
359 tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
360 if (bits == KEYLENGTH)
361 tn->full_children = 1;
363 tn->empty_children = 1ul << bits;
366 pr_debug("AT %p s=%zu %zu\n", tn, TNODE_SIZE(0),
367 sizeof(struct tnode *) << bits);
371 /* Check whether a tnode 'n' is "full", i.e. it is an internal node
372 * and no bits are skipped. See discussion in dyntree paper p. 6
374 static inline int tnode_full(const struct tnode *tn, const struct tnode *n)
376 return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n);
379 /* Add a child at position i overwriting the old value.
380 * Update the value of full_children and empty_children.
382 static void put_child(struct tnode *tn, unsigned long i, struct tnode *n)
384 struct tnode *chi = tnode_get_child(tn, i);
387 BUG_ON(i >= tnode_child_length(tn));
389 /* update emptyChildren, overflow into fullChildren */
390 if (n == NULL && chi != NULL)
392 if (n != NULL && chi == NULL)
395 /* update fullChildren */
396 wasfull = tnode_full(tn, chi);
397 isfull = tnode_full(tn, n);
399 if (wasfull && !isfull)
401 else if (!wasfull && isfull)
404 if (n && (tn->slen < n->slen))
407 rcu_assign_pointer(tn->tnode[i], n);
410 static void update_children(struct tnode *tn)
414 /* update all of the child parent pointers */
415 for (i = tnode_child_length(tn); i;) {
416 struct tnode *inode = tnode_get_child(tn, --i);
421 /* Either update the children of a tnode that
422 * already belongs to us or update the child
423 * to point to ourselves.
425 if (node_parent(inode) == tn)
426 update_children(inode);
428 node_set_parent(inode, tn);
432 static inline void put_child_root(struct tnode *tp, struct trie *t,
433 t_key key, struct tnode *n)
436 put_child(tp, get_index(key, tp), n);
438 rcu_assign_pointer(t->trie, n);
441 static inline void tnode_free_init(struct tnode *tn)
446 static inline void tnode_free_append(struct tnode *tn, struct tnode *n)
448 n->rcu.next = tn->rcu.next;
449 tn->rcu.next = &n->rcu;
452 static void tnode_free(struct tnode *tn)
454 struct callback_head *head = &tn->rcu;
458 tnode_free_size += TNODE_SIZE(1ul << tn->bits);
461 tn = container_of(head, struct tnode, rcu);
464 if (tnode_free_size >= PAGE_SIZE * sync_pages) {
470 static void replace(struct trie *t, struct tnode *oldtnode, struct tnode *tn)
472 struct tnode *tp = node_parent(oldtnode);
475 /* setup the parent pointer out of and back into this node */
476 NODE_INIT_PARENT(tn, tp);
477 put_child_root(tp, t, tn->key, tn);
479 /* update all of the child parent pointers */
482 /* all pointers should be clean so we are done */
483 tnode_free(oldtnode);
485 /* resize children now that oldtnode is freed */
486 for (i = tnode_child_length(tn); i;) {
487 struct tnode *inode = tnode_get_child(tn, --i);
489 /* resize child node */
490 if (tnode_full(tn, inode))
495 static int inflate(struct trie *t, struct tnode *oldtnode)
501 pr_debug("In inflate\n");
503 tn = tnode_new(oldtnode->key, oldtnode->pos - 1, oldtnode->bits + 1);
507 /* prepare oldtnode to be freed */
508 tnode_free_init(oldtnode);
510 /* Assemble all of the pointers in our cluster, in this case that
511 * represents all of the pointers out of our allocated nodes that
512 * point to existing tnodes and the links between our allocated
515 for (i = tnode_child_length(oldtnode), m = 1u << tn->pos; i;) {
516 struct tnode *inode = tnode_get_child(oldtnode, --i);
517 struct tnode *node0, *node1;
524 /* A leaf or an internal node with skipped bits */
525 if (!tnode_full(oldtnode, inode)) {
526 put_child(tn, get_index(inode->key, tn), inode);
530 /* drop the node in the old tnode free list */
531 tnode_free_append(oldtnode, inode);
533 /* An internal node with two children */
534 if (inode->bits == 1) {
535 put_child(tn, 2 * i + 1, tnode_get_child(inode, 1));
536 put_child(tn, 2 * i, tnode_get_child(inode, 0));
540 /* We will replace this node 'inode' with two new
541 * ones, 'node0' and 'node1', each with half of the
542 * original children. The two new nodes will have
543 * a position one bit further down the key and this
544 * means that the "significant" part of their keys
545 * (see the discussion near the top of this file)
546 * will differ by one bit, which will be "0" in
547 * node0's key and "1" in node1's key. Since we are
548 * moving the key position by one step, the bit that
549 * we are moving away from - the bit at position
550 * (tn->pos) - is the one that will differ between
551 * node0 and node1. So... we synthesize that bit in the
554 node1 = tnode_new(inode->key | m, inode->pos, inode->bits - 1);
557 node0 = tnode_new(inode->key, inode->pos, inode->bits - 1);
559 tnode_free_append(tn, node1);
562 tnode_free_append(tn, node0);
564 /* populate child pointers in new nodes */
565 for (k = tnode_child_length(inode), j = k / 2; j;) {
566 put_child(node1, --j, tnode_get_child(inode, --k));
567 put_child(node0, j, tnode_get_child(inode, j));
568 put_child(node1, --j, tnode_get_child(inode, --k));
569 put_child(node0, j, tnode_get_child(inode, j));
572 /* link new nodes to parent */
573 NODE_INIT_PARENT(node1, tn);
574 NODE_INIT_PARENT(node0, tn);
576 /* link parent to nodes */
577 put_child(tn, 2 * i + 1, node1);
578 put_child(tn, 2 * i, node0);
581 /* setup the parent pointers into and out of this node */
582 replace(t, oldtnode, tn);
586 /* all pointers should be clean so we are done */
591 static int halve(struct trie *t, struct tnode *oldtnode)
596 pr_debug("In halve\n");
598 tn = tnode_new(oldtnode->key, oldtnode->pos + 1, oldtnode->bits - 1);
602 /* prepare oldtnode to be freed */
603 tnode_free_init(oldtnode);
605 /* Assemble all of the pointers in our cluster, in this case that
606 * represents all of the pointers out of our allocated nodes that
607 * point to existing tnodes and the links between our allocated
610 for (i = tnode_child_length(oldtnode); i;) {
611 struct tnode *node1 = tnode_get_child(oldtnode, --i);
612 struct tnode *node0 = tnode_get_child(oldtnode, --i);
615 /* At least one of the children is empty */
616 if (!node1 || !node0) {
617 put_child(tn, i / 2, node1 ? : node0);
621 /* Two nonempty children */
622 inode = tnode_new(node0->key, oldtnode->pos, 1);
627 tnode_free_append(tn, inode);
629 /* initialize pointers out of node */
630 put_child(inode, 1, node1);
631 put_child(inode, 0, node0);
632 NODE_INIT_PARENT(inode, tn);
634 /* link parent to node */
635 put_child(tn, i / 2, inode);
638 /* setup the parent pointers into and out of this node */
639 replace(t, oldtnode, tn);
644 static void collapse(struct trie *t, struct tnode *oldtnode)
646 struct tnode *n, *tp;
649 /* scan the tnode looking for that one child that might still exist */
650 for (n = NULL, i = tnode_child_length(oldtnode); !n && i;)
651 n = tnode_get_child(oldtnode, --i);
653 /* compress one level */
654 tp = node_parent(oldtnode);
655 put_child_root(tp, t, oldtnode->key, n);
656 node_set_parent(n, tp);
662 static unsigned char update_suffix(struct tnode *tn)
664 unsigned char slen = tn->pos;
665 unsigned long stride, i;
667 /* search though the list of children looking for nodes that might
668 * have a suffix greater than the one we currently have. This is
669 * why we start with a stride of 2 since a stride of 1 would
670 * represent the nodes with suffix length equal to tn->pos
672 for (i = 0, stride = 0x2ul ; i < tnode_child_length(tn); i += stride) {
673 struct tnode *n = tnode_get_child(tn, i);
675 if (!n || (n->slen <= slen))
678 /* update stride and slen based on new value */
679 stride <<= (n->slen - slen);
683 /* if slen covers all but the last bit we can stop here
684 * there will be nothing longer than that since only node
685 * 0 and 1 << (bits - 1) could have that as their suffix
688 if ((slen + 1) >= (tn->pos + tn->bits))
697 /* From "Implementing a dynamic compressed trie" by Stefan Nilsson of
698 * the Helsinki University of Technology and Matti Tikkanen of Nokia
699 * Telecommunications, page 6:
700 * "A node is doubled if the ratio of non-empty children to all
701 * children in the *doubled* node is at least 'high'."
703 * 'high' in this instance is the variable 'inflate_threshold'. It
704 * is expressed as a percentage, so we multiply it with
705 * tnode_child_length() and instead of multiplying by 2 (since the
706 * child array will be doubled by inflate()) and multiplying
707 * the left-hand side by 100 (to handle the percentage thing) we
708 * multiply the left-hand side by 50.
710 * The left-hand side may look a bit weird: tnode_child_length(tn)
711 * - tn->empty_children is of course the number of non-null children
712 * in the current node. tn->full_children is the number of "full"
713 * children, that is non-null tnodes with a skip value of 0.
714 * All of those will be doubled in the resulting inflated tnode, so
715 * we just count them one extra time here.
717 * A clearer way to write this would be:
719 * to_be_doubled = tn->full_children;
720 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
723 * new_child_length = tnode_child_length(tn) * 2;
725 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
727 * if (new_fill_factor >= inflate_threshold)
729 * ...and so on, tho it would mess up the while () loop.
732 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
736 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
737 * inflate_threshold * new_child_length
739 * expand not_to_be_doubled and to_be_doubled, and shorten:
740 * 100 * (tnode_child_length(tn) - tn->empty_children +
741 * tn->full_children) >= inflate_threshold * new_child_length
743 * expand new_child_length:
744 * 100 * (tnode_child_length(tn) - tn->empty_children +
745 * tn->full_children) >=
746 * inflate_threshold * tnode_child_length(tn) * 2
749 * 50 * (tn->full_children + tnode_child_length(tn) -
750 * tn->empty_children) >= inflate_threshold *
751 * tnode_child_length(tn)
754 static bool should_inflate(const struct tnode *tp, const struct tnode *tn)
756 unsigned long used = tnode_child_length(tn);
757 unsigned long threshold = used;
759 /* Keep root node larger */
760 threshold *= tp ? inflate_threshold : inflate_threshold_root;
761 used -= tn->empty_children;
762 used += tn->full_children;
764 /* if bits == KEYLENGTH then pos = 0, and will fail below */
766 return (used > 1) && tn->pos && ((50 * used) >= threshold);
769 static bool should_halve(const struct tnode *tp, const struct tnode *tn)
771 unsigned long used = tnode_child_length(tn);
772 unsigned long threshold = used;
774 /* Keep root node larger */
775 threshold *= tp ? halve_threshold : halve_threshold_root;
776 used -= tn->empty_children;
778 /* if bits == KEYLENGTH then used = 100% on wrap, and will fail below */
780 return (used > 1) && (tn->bits > 1) && ((100 * used) < threshold);
783 static bool should_collapse(const struct tnode *tn)
785 unsigned long used = tnode_child_length(tn);
787 used -= tn->empty_children;
789 /* account for bits == KEYLENGTH case */
790 if ((tn->bits == KEYLENGTH) && tn->full_children)
793 /* One child or none, time to drop us from the trie */
798 static void resize(struct trie *t, struct tnode *tn)
800 struct tnode *tp = node_parent(tn);
801 struct tnode __rcu **cptr;
802 int max_work = MAX_WORK;
804 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
805 tn, inflate_threshold, halve_threshold);
807 /* track the tnode via the pointer from the parent instead of
808 * doing it ourselves. This way we can let RCU fully do its
809 * thing without us interfering
811 cptr = tp ? &tp->tnode[get_index(tn->key, tp)] : &t->trie;
812 BUG_ON(tn != rtnl_dereference(*cptr));
814 /* Double as long as the resulting node has a number of
815 * nonempty nodes that are above the threshold.
817 while (should_inflate(tp, tn) && max_work) {
818 if (inflate(t, tn)) {
819 #ifdef CONFIG_IP_FIB_TRIE_STATS
820 this_cpu_inc(t->stats->resize_node_skipped);
826 tn = rtnl_dereference(*cptr);
829 /* Return if at least one inflate is run */
830 if (max_work != MAX_WORK)
833 /* Halve as long as the number of empty children in this
834 * node is above threshold.
836 while (should_halve(tp, tn) && max_work) {
838 #ifdef CONFIG_IP_FIB_TRIE_STATS
839 this_cpu_inc(t->stats->resize_node_skipped);
845 tn = rtnl_dereference(*cptr);
848 /* Only one child remains */
849 if (should_collapse(tn)) {
854 /* Return if at least one deflate was run */
855 if (max_work != MAX_WORK)
858 /* push the suffix length to the parent node */
859 if (tn->slen > tn->pos) {
860 unsigned char slen = update_suffix(tn);
862 if (tp && (slen > tp->slen))
867 static void leaf_pull_suffix(struct tnode *tp, struct tnode *l)
869 while (tp && (tp->slen > tp->pos) && (tp->slen > l->slen)) {
870 if (update_suffix(tp) > l->slen)
872 tp = node_parent(tp);
876 static void leaf_push_suffix(struct tnode *tn, struct tnode *l)
878 /* if this is a new leaf then tn will be NULL and we can sort
879 * out parent suffix lengths as a part of trie_rebalance
881 while (tn && (tn->slen < l->slen)) {
883 tn = node_parent(tn);
887 /* rcu_read_lock needs to be hold by caller from readside */
888 static struct tnode *fib_find_node(struct trie *t, struct tnode **tn, u32 key)
890 struct tnode *pn = NULL, *n = rcu_dereference_rtnl(t->trie);
893 unsigned long index = get_index(key, n);
895 /* This bit of code is a bit tricky but it combines multiple
896 * checks into a single check. The prefix consists of the
897 * prefix plus zeros for the bits in the cindex. The index
898 * is the difference between the key and this value. From
899 * this we can actually derive several pieces of data.
900 * if (index >= (1ul << bits))
901 * we have a mismatch in skip bits and failed
903 * we know the value is cindex
905 * This check is safe even if bits == KEYLENGTH due to the
906 * fact that we can only allocate a node with 32 bits if a
907 * long is greater than 32 bits.
909 if (index >= (1ul << n->bits)) {
914 /* we have found a leaf. Prefixes have already been compared */
919 n = tnode_get_child_rcu(n, index);
927 /* Return the first fib alias matching TOS with
928 * priority less than or equal to PRIO.
930 static struct fib_alias *fib_find_alias(struct hlist_head *fah, u8 slen,
933 struct fib_alias *fa;
938 hlist_for_each_entry(fa, fah, fa_list) {
939 if (fa->fa_slen < slen)
941 if (fa->fa_slen != slen)
943 if (fa->fa_tos > tos)
945 if (fa->fa_info->fib_priority >= prio || fa->fa_tos < tos)
952 static void trie_rebalance(struct trie *t, struct tnode *tn)
957 tp = node_parent(tn);
963 /* only used from updater-side */
964 static int fib_insert_node(struct trie *t, struct tnode *tp,
965 struct fib_alias *new, t_key key)
969 l = leaf_new(key, new);
973 /* retrieve child from parent node */
975 n = tnode_get_child(tp, get_index(key, tp));
977 n = rcu_dereference_rtnl(t->trie);
979 /* Case 2: n is a LEAF or a TNODE and the key doesn't match.
981 * Add a new tnode here
982 * first tnode need some special handling
983 * leaves us in position for handling as case 3
988 tn = tnode_new(key, __fls(key ^ n->key), 1);
994 /* initialize routes out of node */
995 NODE_INIT_PARENT(tn, tp);
996 put_child(tn, get_index(key, tn) ^ 1, n);
998 /* start adding routes into the node */
999 put_child_root(tp, t, key, tn);
1000 node_set_parent(n, tn);
1002 /* parent now has a NULL spot where the leaf can go */
1006 /* Case 3: n is NULL, and will just insert a new leaf */
1007 NODE_INIT_PARENT(l, tp);
1008 put_child_root(tp, t, key, l);
1009 trie_rebalance(t, tp);
1014 static int fib_insert_alias(struct trie *t, struct tnode *tp,
1015 struct tnode *l, struct fib_alias *new,
1016 struct fib_alias *fa, t_key key)
1019 return fib_insert_node(t, tp, new, key);
1022 hlist_add_before_rcu(&new->fa_list, &fa->fa_list);
1024 struct fib_alias *last;
1026 hlist_for_each_entry(last, &l->leaf, fa_list) {
1027 if (new->fa_slen < last->fa_slen)
1033 hlist_add_behind_rcu(&new->fa_list, &fa->fa_list);
1035 hlist_add_head_rcu(&new->fa_list, &l->leaf);
1038 /* if we added to the tail node then we need to update slen */
1039 if (l->slen < new->fa_slen) {
1040 l->slen = new->fa_slen;
1041 leaf_push_suffix(tp, l);
1047 /* Caller must hold RTNL. */
1048 int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1050 struct trie *t = (struct trie *)tb->tb_data;
1051 struct fib_alias *fa, *new_fa;
1052 struct tnode *l, *tp;
1053 struct fib_info *fi;
1054 u8 plen = cfg->fc_dst_len;
1055 u8 slen = KEYLENGTH - plen;
1056 u8 tos = cfg->fc_tos;
1060 if (plen > KEYLENGTH)
1063 key = ntohl(cfg->fc_dst);
1065 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
1067 if ((plen < KEYLENGTH) && (key << plen))
1070 fi = fib_create_info(cfg);
1076 l = fib_find_node(t, &tp, key);
1077 fa = l ? fib_find_alias(&l->leaf, slen, tos, fi->fib_priority) : NULL;
1079 /* Now fa, if non-NULL, points to the first fib alias
1080 * with the same keys [prefix,tos,priority], if such key already
1081 * exists or to the node before which we will insert new one.
1083 * If fa is NULL, we will need to allocate a new one and
1084 * insert to the tail of the section matching the suffix length
1088 if (fa && fa->fa_tos == tos &&
1089 fa->fa_info->fib_priority == fi->fib_priority) {
1090 struct fib_alias *fa_first, *fa_match;
1093 if (cfg->fc_nlflags & NLM_F_EXCL)
1097 * 1. Find exact match for type, scope, fib_info to avoid
1099 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1103 hlist_for_each_entry_from(fa, fa_list) {
1104 if ((fa->fa_slen != slen) || (fa->fa_tos != tos))
1106 if (fa->fa_info->fib_priority != fi->fib_priority)
1108 if (fa->fa_type == cfg->fc_type &&
1109 fa->fa_info == fi) {
1115 if (cfg->fc_nlflags & NLM_F_REPLACE) {
1116 struct fib_info *fi_drop;
1126 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1130 fi_drop = fa->fa_info;
1131 new_fa->fa_tos = fa->fa_tos;
1132 new_fa->fa_info = fi;
1133 new_fa->fa_type = cfg->fc_type;
1134 state = fa->fa_state;
1135 new_fa->fa_state = state & ~FA_S_ACCESSED;
1136 new_fa->fa_slen = fa->fa_slen;
1138 hlist_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1139 alias_free_mem_rcu(fa);
1141 fib_release_info(fi_drop);
1142 if (state & FA_S_ACCESSED)
1143 rt_cache_flush(cfg->fc_nlinfo.nl_net);
1144 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1145 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
1149 /* Error if we find a perfect match which
1150 * uses the same scope, type, and nexthop
1156 if (!(cfg->fc_nlflags & NLM_F_APPEND))
1160 if (!(cfg->fc_nlflags & NLM_F_CREATE))
1164 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1168 new_fa->fa_info = fi;
1169 new_fa->fa_tos = tos;
1170 new_fa->fa_type = cfg->fc_type;
1171 new_fa->fa_state = 0;
1172 new_fa->fa_slen = slen;
1174 /* Insert new entry to the list. */
1175 err = fib_insert_alias(t, tp, l, new_fa, fa, key);
1177 goto out_free_new_fa;
1180 tb->tb_num_default++;
1182 rt_cache_flush(cfg->fc_nlinfo.nl_net);
1183 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
1184 &cfg->fc_nlinfo, 0);
1189 kmem_cache_free(fn_alias_kmem, new_fa);
1191 fib_release_info(fi);
1196 static inline t_key prefix_mismatch(t_key key, struct tnode *n)
1198 t_key prefix = n->key;
1200 return (key ^ prefix) & (prefix | -prefix);
1203 /* should be called with rcu_read_lock */
1204 int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
1205 struct fib_result *res, int fib_flags)
1207 struct trie *t = (struct trie *)tb->tb_data;
1208 #ifdef CONFIG_IP_FIB_TRIE_STATS
1209 struct trie_use_stats __percpu *stats = t->stats;
1211 const t_key key = ntohl(flp->daddr);
1212 struct tnode *n, *pn;
1213 struct fib_alias *fa;
1214 unsigned long index;
1217 n = rcu_dereference(t->trie);
1221 #ifdef CONFIG_IP_FIB_TRIE_STATS
1222 this_cpu_inc(stats->gets);
1228 /* Step 1: Travel to the longest prefix match in the trie */
1230 index = get_index(key, n);
1232 /* This bit of code is a bit tricky but it combines multiple
1233 * checks into a single check. The prefix consists of the
1234 * prefix plus zeros for the "bits" in the prefix. The index
1235 * is the difference between the key and this value. From
1236 * this we can actually derive several pieces of data.
1237 * if (index >= (1ul << bits))
1238 * we have a mismatch in skip bits and failed
1240 * we know the value is cindex
1242 * This check is safe even if bits == KEYLENGTH due to the
1243 * fact that we can only allocate a node with 32 bits if a
1244 * long is greater than 32 bits.
1246 if (index >= (1ul << n->bits))
1249 /* we have found a leaf. Prefixes have already been compared */
1253 /* only record pn and cindex if we are going to be chopping
1254 * bits later. Otherwise we are just wasting cycles.
1256 if (n->slen > n->pos) {
1261 n = tnode_get_child_rcu(n, index);
1266 /* Step 2: Sort out leaves and begin backtracing for longest prefix */
1268 /* record the pointer where our next node pointer is stored */
1269 struct tnode __rcu **cptr = n->tnode;
1271 /* This test verifies that none of the bits that differ
1272 * between the key and the prefix exist in the region of
1273 * the lsb and higher in the prefix.
1275 if (unlikely(prefix_mismatch(key, n)) || (n->slen == n->pos))
1278 /* exit out and process leaf */
1279 if (unlikely(IS_LEAF(n)))
1282 /* Don't bother recording parent info. Since we are in
1283 * prefix match mode we will have to come back to wherever
1284 * we started this traversal anyway
1287 while ((n = rcu_dereference(*cptr)) == NULL) {
1289 #ifdef CONFIG_IP_FIB_TRIE_STATS
1291 this_cpu_inc(stats->null_node_hit);
1293 /* If we are at cindex 0 there are no more bits for
1294 * us to strip at this level so we must ascend back
1295 * up one level to see if there are any more bits to
1296 * be stripped there.
1299 t_key pkey = pn->key;
1301 pn = node_parent_rcu(pn);
1304 #ifdef CONFIG_IP_FIB_TRIE_STATS
1305 this_cpu_inc(stats->backtrack);
1307 /* Get Child's index */
1308 cindex = get_index(pkey, pn);
1311 /* strip the least significant bit from the cindex */
1312 cindex &= cindex - 1;
1314 /* grab pointer for next child node */
1315 cptr = &pn->tnode[cindex];
1320 /* this line carries forward the xor from earlier in the function */
1321 index = key ^ n->key;
1323 /* Step 3: Process the leaf, if that fails fall back to backtracing */
1324 hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) {
1325 struct fib_info *fi = fa->fa_info;
1328 if ((index >= (1ul << fa->fa_slen)) &&
1329 ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen != KEYLENGTH)))
1331 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1335 if (fa->fa_info->fib_scope < flp->flowi4_scope)
1337 fib_alias_accessed(fa);
1338 err = fib_props[fa->fa_type].error;
1339 if (unlikely(err < 0)) {
1340 #ifdef CONFIG_IP_FIB_TRIE_STATS
1341 this_cpu_inc(stats->semantic_match_passed);
1345 if (fi->fib_flags & RTNH_F_DEAD)
1347 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
1348 const struct fib_nh *nh = &fi->fib_nh[nhsel];
1350 if (nh->nh_flags & RTNH_F_DEAD)
1352 if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
1355 if (!(fib_flags & FIB_LOOKUP_NOREF))
1356 atomic_inc(&fi->fib_clntref);
1358 res->prefixlen = KEYLENGTH - fa->fa_slen;
1359 res->nh_sel = nhsel;
1360 res->type = fa->fa_type;
1361 res->scope = fi->fib_scope;
1364 res->fa_head = &n->leaf;
1365 #ifdef CONFIG_IP_FIB_TRIE_STATS
1366 this_cpu_inc(stats->semantic_match_passed);
1371 #ifdef CONFIG_IP_FIB_TRIE_STATS
1372 this_cpu_inc(stats->semantic_match_miss);
1376 EXPORT_SYMBOL_GPL(fib_table_lookup);
1378 static void fib_remove_alias(struct trie *t, struct tnode *tp,
1379 struct tnode *l, struct fib_alias *old)
1381 /* record the location of the previous list_info entry */
1382 struct hlist_node **pprev = old->fa_list.pprev;
1383 struct fib_alias *fa = hlist_entry(pprev, typeof(*fa), fa_list.next);
1385 /* remove the fib_alias from the list */
1386 hlist_del_rcu(&old->fa_list);
1388 /* if we emptied the list this leaf will be freed and we can sort
1389 * out parent suffix lengths as a part of trie_rebalance
1391 if (hlist_empty(&l->leaf)) {
1392 put_child_root(tp, t, l->key, NULL);
1394 trie_rebalance(t, tp);
1398 /* only access fa if it is pointing at the last valid hlist_node */
1402 /* update the trie with the latest suffix length */
1403 l->slen = fa->fa_slen;
1404 leaf_pull_suffix(tp, l);
1407 /* Caller must hold RTNL. */
1408 int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
1410 struct trie *t = (struct trie *) tb->tb_data;
1411 struct fib_alias *fa, *fa_to_delete;
1412 struct tnode *l, *tp;
1413 u8 plen = cfg->fc_dst_len;
1414 u8 slen = KEYLENGTH - plen;
1415 u8 tos = cfg->fc_tos;
1418 if (plen > KEYLENGTH)
1421 key = ntohl(cfg->fc_dst);
1423 if ((plen < KEYLENGTH) && (key << plen))
1426 l = fib_find_node(t, &tp, key);
1430 fa = fib_find_alias(&l->leaf, slen, tos, 0);
1434 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1436 fa_to_delete = NULL;
1437 hlist_for_each_entry_from(fa, fa_list) {
1438 struct fib_info *fi = fa->fa_info;
1440 if ((fa->fa_slen != slen) || (fa->fa_tos != tos))
1443 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1444 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
1445 fa->fa_info->fib_scope == cfg->fc_scope) &&
1446 (!cfg->fc_prefsrc ||
1447 fi->fib_prefsrc == cfg->fc_prefsrc) &&
1448 (!cfg->fc_protocol ||
1449 fi->fib_protocol == cfg->fc_protocol) &&
1450 fib_nh_match(cfg, fi) == 0) {
1459 rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id,
1460 &cfg->fc_nlinfo, 0);
1463 tb->tb_num_default--;
1465 fib_remove_alias(t, tp, l, fa_to_delete);
1467 if (fa_to_delete->fa_state & FA_S_ACCESSED)
1468 rt_cache_flush(cfg->fc_nlinfo.nl_net);
1470 fib_release_info(fa_to_delete->fa_info);
1471 alias_free_mem_rcu(fa_to_delete);
1475 /* Scan for the next leaf starting at the provided key value */
1476 static struct tnode *leaf_walk_rcu(struct tnode **tn, t_key key)
1478 struct tnode *pn, *n = *tn;
1479 unsigned long cindex;
1481 /* record parent node for backtracing */
1483 cindex = n ? get_index(key, n) : 0;
1485 /* this loop is meant to try and find the key in the trie */
1487 unsigned long idx = get_index(key, n);
1489 /* guarantee forward progress on the keys */
1490 if (IS_LEAF(n) && (n->key >= key))
1492 if (idx >= (1ul << n->bits))
1495 /* record parent and next child index */
1499 /* descend into the next child */
1500 n = tnode_get_child_rcu(pn, cindex++);
1503 /* this loop will search for the next leaf with a greater key */
1505 /* if we exhausted the parent node we will need to climb */
1506 if (cindex >= (1ul << pn->bits)) {
1507 t_key pkey = pn->key;
1509 pn = node_parent_rcu(pn);
1513 cindex = get_index(pkey, pn) + 1;
1517 /* grab the next available node */
1518 n = tnode_get_child_rcu(pn, cindex++);
1522 /* no need to compare keys since we bumped the index */
1526 /* Rescan start scanning in new node */
1532 return NULL; /* Root of trie */
1534 /* if we are at the limit for keys just return NULL for the tnode */
1535 *tn = (n->key == KEY_MAX) ? NULL : pn;
1539 /* Caller must hold RTNL */
1540 void fib_table_flush_external(struct fib_table *tb)
1542 struct trie *t = (struct trie *)tb->tb_data;
1543 struct fib_alias *fa;
1544 struct tnode *n, *pn;
1545 unsigned long cindex;
1549 n = rcu_dereference(t->trie);
1556 while (IS_TNODE(n)) {
1557 /* record pn and cindex for leaf walking */
1559 cindex = 1ul << n->bits;
1561 /* walk trie in reverse order */
1563 while (!(cindex--)) {
1564 t_key pkey = pn->key;
1567 pn = node_parent(n);
1569 /* resize completed node */
1572 /* if we got the root we are done */
1576 cindex = get_index(pkey, pn);
1579 /* grab the next available node */
1580 n = tnode_get_child(pn, cindex);
1584 hlist_for_each_entry(fa, &n->leaf, fa_list) {
1585 struct fib_info *fi = fa->fa_info;
1587 if (fi && (fi->fib_flags & RTNH_F_EXTERNAL)) {
1588 netdev_switch_fib_ipv4_del(n->key,
1589 KEYLENGTH - fa->fa_slen,
1591 fa->fa_type, tb->tb_id);
1595 /* if trie is leaf only loop is completed */
1600 /* Caller must hold RTNL. */
1601 int fib_table_flush(struct fib_table *tb)
1603 struct trie *t = (struct trie *)tb->tb_data;
1604 struct hlist_node *tmp;
1605 struct fib_alias *fa;
1606 struct tnode *n, *pn;
1607 unsigned long cindex;
1611 n = rcu_dereference(t->trie);
1613 goto flush_complete;
1618 while (IS_TNODE(n)) {
1619 /* record pn and cindex for leaf walking */
1621 cindex = 1ul << n->bits;
1623 /* walk trie in reverse order */
1625 while (!(cindex--)) {
1626 t_key pkey = pn->key;
1629 pn = node_parent(n);
1631 /* resize completed node */
1634 /* if we got the root we are done */
1636 goto flush_complete;
1638 cindex = get_index(pkey, pn);
1641 /* grab the next available node */
1642 n = tnode_get_child(pn, cindex);
1646 /* track slen in case any prefixes survive */
1649 hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
1650 struct fib_info *fi = fa->fa_info;
1652 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1653 hlist_del_rcu(&fa->fa_list);
1654 fib_release_info(fa->fa_info);
1655 alias_free_mem_rcu(fa);
1664 /* update leaf slen */
1667 if (hlist_empty(&n->leaf)) {
1668 put_child_root(pn, t, n->key, NULL);
1671 leaf_pull_suffix(pn, n);
1674 /* if trie is leaf only loop is completed */
1678 pr_debug("trie_flush found=%d\n", found);
1682 static void __trie_free_rcu(struct rcu_head *head)
1684 struct fib_table *tb = container_of(head, struct fib_table, rcu);
1685 #ifdef CONFIG_IP_FIB_TRIE_STATS
1686 struct trie *t = (struct trie *)tb->tb_data;
1688 free_percpu(t->stats);
1689 #endif /* CONFIG_IP_FIB_TRIE_STATS */
1693 void fib_free_table(struct fib_table *tb)
1695 call_rcu(&tb->rcu, __trie_free_rcu);
1698 static int fn_trie_dump_leaf(struct tnode *l, struct fib_table *tb,
1699 struct sk_buff *skb, struct netlink_callback *cb)
1701 __be32 xkey = htonl(l->key);
1702 struct fib_alias *fa;
1708 /* rcu_read_lock is hold by caller */
1709 hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
1715 if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
1721 KEYLENGTH - fa->fa_slen,
1723 fa->fa_info, NLM_F_MULTI) < 0) {
1734 /* rcu_read_lock needs to be hold by caller from readside */
1735 int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
1736 struct netlink_callback *cb)
1738 struct trie *t = (struct trie *)tb->tb_data;
1739 struct tnode *l, *tp;
1740 /* Dump starting at last key.
1741 * Note: 0.0.0.0/0 (ie default) is first key.
1743 int count = cb->args[2];
1744 t_key key = cb->args[3];
1746 tp = rcu_dereference_rtnl(t->trie);
1748 while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
1749 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
1751 cb->args[2] = count;
1758 memset(&cb->args[4], 0,
1759 sizeof(cb->args) - 4*sizeof(cb->args[0]));
1761 /* stop loop if key wrapped back to 0 */
1767 cb->args[2] = count;
1772 void __init fib_trie_init(void)
1774 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1775 sizeof(struct fib_alias),
1776 0, SLAB_PANIC, NULL);
1778 trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
1780 0, SLAB_PANIC, NULL);
1784 struct fib_table *fib_trie_table(u32 id)
1786 struct fib_table *tb;
1789 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
1795 tb->tb_default = -1;
1796 tb->tb_num_default = 0;
1798 t = (struct trie *) tb->tb_data;
1799 RCU_INIT_POINTER(t->trie, NULL);
1800 #ifdef CONFIG_IP_FIB_TRIE_STATS
1801 t->stats = alloc_percpu(struct trie_use_stats);
1811 #ifdef CONFIG_PROC_FS
1812 /* Depth first Trie walk iterator */
1813 struct fib_trie_iter {
1814 struct seq_net_private p;
1815 struct fib_table *tb;
1816 struct tnode *tnode;
1821 static struct tnode *fib_trie_get_next(struct fib_trie_iter *iter)
1823 unsigned long cindex = iter->index;
1824 struct tnode *tn = iter->tnode;
1827 /* A single entry routing table */
1831 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
1832 iter->tnode, iter->index, iter->depth);
1834 while (cindex < tnode_child_length(tn)) {
1835 struct tnode *n = tnode_get_child_rcu(tn, cindex);
1840 iter->index = cindex + 1;
1842 /* push down one level */
1853 /* Current node exhausted, pop back up */
1854 p = node_parent_rcu(tn);
1856 cindex = get_index(tn->key, p) + 1;
1866 static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter,
1874 n = rcu_dereference(t->trie);
1891 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
1894 struct fib_trie_iter iter;
1896 memset(s, 0, sizeof(*s));
1899 for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
1901 struct fib_alias *fa;
1904 s->totdepth += iter.depth;
1905 if (iter.depth > s->maxdepth)
1906 s->maxdepth = iter.depth;
1908 hlist_for_each_entry_rcu(fa, &n->leaf, fa_list)
1912 if (n->bits < MAX_STAT_DEPTH)
1913 s->nodesizes[n->bits]++;
1914 s->nullpointers += n->empty_children;
1921 * This outputs /proc/net/fib_triestats
1923 static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
1925 unsigned int i, max, pointers, bytes, avdepth;
1928 avdepth = stat->totdepth*100 / stat->leaves;
1932 seq_printf(seq, "\tAver depth: %u.%02d\n",
1933 avdepth / 100, avdepth % 100);
1934 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
1936 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
1937 bytes = LEAF_SIZE * stat->leaves;
1939 seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes);
1940 bytes += sizeof(struct fib_alias) * stat->prefixes;
1942 seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
1943 bytes += TNODE_SIZE(0) * stat->tnodes;
1945 max = MAX_STAT_DEPTH;
1946 while (max > 0 && stat->nodesizes[max-1] == 0)
1950 for (i = 1; i < max; i++)
1951 if (stat->nodesizes[i] != 0) {
1952 seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
1953 pointers += (1<<i) * stat->nodesizes[i];
1955 seq_putc(seq, '\n');
1956 seq_printf(seq, "\tPointers: %u\n", pointers);
1958 bytes += sizeof(struct tnode *) * pointers;
1959 seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
1960 seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
1963 #ifdef CONFIG_IP_FIB_TRIE_STATS
1964 static void trie_show_usage(struct seq_file *seq,
1965 const struct trie_use_stats __percpu *stats)
1967 struct trie_use_stats s = { 0 };
1970 /* loop through all of the CPUs and gather up the stats */
1971 for_each_possible_cpu(cpu) {
1972 const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu);
1974 s.gets += pcpu->gets;
1975 s.backtrack += pcpu->backtrack;
1976 s.semantic_match_passed += pcpu->semantic_match_passed;
1977 s.semantic_match_miss += pcpu->semantic_match_miss;
1978 s.null_node_hit += pcpu->null_node_hit;
1979 s.resize_node_skipped += pcpu->resize_node_skipped;
1982 seq_printf(seq, "\nCounters:\n---------\n");
1983 seq_printf(seq, "gets = %u\n", s.gets);
1984 seq_printf(seq, "backtracks = %u\n", s.backtrack);
1985 seq_printf(seq, "semantic match passed = %u\n",
1986 s.semantic_match_passed);
1987 seq_printf(seq, "semantic match miss = %u\n", s.semantic_match_miss);
1988 seq_printf(seq, "null node hit= %u\n", s.null_node_hit);
1989 seq_printf(seq, "skipped node resize = %u\n\n", s.resize_node_skipped);
1991 #endif /* CONFIG_IP_FIB_TRIE_STATS */
1993 static void fib_table_print(struct seq_file *seq, struct fib_table *tb)
1995 if (tb->tb_id == RT_TABLE_LOCAL)
1996 seq_puts(seq, "Local:\n");
1997 else if (tb->tb_id == RT_TABLE_MAIN)
1998 seq_puts(seq, "Main:\n");
2000 seq_printf(seq, "Id %d:\n", tb->tb_id);
2004 static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2006 struct net *net = (struct net *)seq->private;
2010 "Basic info: size of leaf:"
2011 " %Zd bytes, size of tnode: %Zd bytes.\n",
2012 LEAF_SIZE, TNODE_SIZE(0));
2014 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2015 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2016 struct fib_table *tb;
2018 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2019 struct trie *t = (struct trie *) tb->tb_data;
2020 struct trie_stat stat;
2025 fib_table_print(seq, tb);
2027 trie_collect_stats(t, &stat);
2028 trie_show_stats(seq, &stat);
2029 #ifdef CONFIG_IP_FIB_TRIE_STATS
2030 trie_show_usage(seq, t->stats);
2038 static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2040 return single_open_net(inode, file, fib_triestat_seq_show);
2043 static const struct file_operations fib_triestat_fops = {
2044 .owner = THIS_MODULE,
2045 .open = fib_triestat_seq_open,
2047 .llseek = seq_lseek,
2048 .release = single_release_net,
2051 static struct tnode *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
2053 struct fib_trie_iter *iter = seq->private;
2054 struct net *net = seq_file_net(seq);
2058 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2059 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2060 struct fib_table *tb;
2062 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2065 for (n = fib_trie_get_first(iter,
2066 (struct trie *) tb->tb_data);
2067 n; n = fib_trie_get_next(iter))
2078 static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2082 return fib_trie_get_idx(seq, *pos);
2085 static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2087 struct fib_trie_iter *iter = seq->private;
2088 struct net *net = seq_file_net(seq);
2089 struct fib_table *tb = iter->tb;
2090 struct hlist_node *tb_node;
2095 /* next node in same table */
2096 n = fib_trie_get_next(iter);
2100 /* walk rest of this hash chain */
2101 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
2102 while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) {
2103 tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
2104 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2109 /* new hash chain */
2110 while (++h < FIB_TABLE_HASHSZ) {
2111 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2112 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2113 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2125 static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2131 static void seq_indent(struct seq_file *seq, int n)
2137 static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
2140 case RT_SCOPE_UNIVERSE: return "universe";
2141 case RT_SCOPE_SITE: return "site";
2142 case RT_SCOPE_LINK: return "link";
2143 case RT_SCOPE_HOST: return "host";
2144 case RT_SCOPE_NOWHERE: return "nowhere";
2146 snprintf(buf, len, "scope=%d", s);
2151 static const char *const rtn_type_names[__RTN_MAX] = {
2152 [RTN_UNSPEC] = "UNSPEC",
2153 [RTN_UNICAST] = "UNICAST",
2154 [RTN_LOCAL] = "LOCAL",
2155 [RTN_BROADCAST] = "BROADCAST",
2156 [RTN_ANYCAST] = "ANYCAST",
2157 [RTN_MULTICAST] = "MULTICAST",
2158 [RTN_BLACKHOLE] = "BLACKHOLE",
2159 [RTN_UNREACHABLE] = "UNREACHABLE",
2160 [RTN_PROHIBIT] = "PROHIBIT",
2161 [RTN_THROW] = "THROW",
2163 [RTN_XRESOLVE] = "XRESOLVE",
2166 static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
2168 if (t < __RTN_MAX && rtn_type_names[t])
2169 return rtn_type_names[t];
2170 snprintf(buf, len, "type %u", t);
2174 /* Pretty print the trie */
2175 static int fib_trie_seq_show(struct seq_file *seq, void *v)
2177 const struct fib_trie_iter *iter = seq->private;
2178 struct tnode *n = v;
2180 if (!node_parent_rcu(n))
2181 fib_table_print(seq, iter->tb);
2184 __be32 prf = htonl(n->key);
2186 seq_indent(seq, iter->depth-1);
2187 seq_printf(seq, " +-- %pI4/%zu %u %u %u\n",
2188 &prf, KEYLENGTH - n->pos - n->bits, n->bits,
2189 n->full_children, n->empty_children);
2191 __be32 val = htonl(n->key);
2192 struct fib_alias *fa;
2194 seq_indent(seq, iter->depth);
2195 seq_printf(seq, " |-- %pI4\n", &val);
2197 hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) {
2198 char buf1[32], buf2[32];
2200 seq_indent(seq, iter->depth + 1);
2201 seq_printf(seq, " /%zu %s %s",
2202 KEYLENGTH - fa->fa_slen,
2203 rtn_scope(buf1, sizeof(buf1),
2204 fa->fa_info->fib_scope),
2205 rtn_type(buf2, sizeof(buf2),
2208 seq_printf(seq, " tos=%d", fa->fa_tos);
2209 seq_putc(seq, '\n');
2216 static const struct seq_operations fib_trie_seq_ops = {
2217 .start = fib_trie_seq_start,
2218 .next = fib_trie_seq_next,
2219 .stop = fib_trie_seq_stop,
2220 .show = fib_trie_seq_show,
2223 static int fib_trie_seq_open(struct inode *inode, struct file *file)
2225 return seq_open_net(inode, file, &fib_trie_seq_ops,
2226 sizeof(struct fib_trie_iter));
2229 static const struct file_operations fib_trie_fops = {
2230 .owner = THIS_MODULE,
2231 .open = fib_trie_seq_open,
2233 .llseek = seq_lseek,
2234 .release = seq_release_net,
2237 struct fib_route_iter {
2238 struct seq_net_private p;
2239 struct fib_table *main_tb;
2240 struct tnode *tnode;
2245 static struct tnode *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
2247 struct fib_table *tb = iter->main_tb;
2248 struct tnode *l, **tp = &iter->tnode;
2252 /* use cache location of next-to-find key */
2253 if (iter->pos > 0 && pos >= iter->pos) {
2257 t = (struct trie *)tb->tb_data;
2258 iter->tnode = rcu_dereference_rtnl(t->trie);
2263 while ((l = leaf_walk_rcu(tp, key)) != NULL) {
2272 /* handle unlikely case of a key wrap */
2278 iter->key = key; /* remember it */
2280 iter->pos = 0; /* forget it */
2285 static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2288 struct fib_route_iter *iter = seq->private;
2289 struct fib_table *tb;
2294 tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
2301 return fib_route_get_idx(iter, *pos);
2303 t = (struct trie *)tb->tb_data;
2304 iter->tnode = rcu_dereference_rtnl(t->trie);
2308 return SEQ_START_TOKEN;
2311 static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2313 struct fib_route_iter *iter = seq->private;
2314 struct tnode *l = NULL;
2315 t_key key = iter->key;
2319 /* only allow key of 0 for start of sequence */
2320 if ((v == SEQ_START_TOKEN) || key)
2321 l = leaf_walk_rcu(&iter->tnode, key);
2324 iter->key = l->key + 1;
2333 static void fib_route_seq_stop(struct seq_file *seq, void *v)
2339 static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2341 unsigned int flags = 0;
2343 if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT)
2345 if (fi && fi->fib_nh->nh_gw)
2346 flags |= RTF_GATEWAY;
2347 if (mask == htonl(0xFFFFFFFF))
2354 * This outputs /proc/net/route.
2355 * The format of the file is not supposed to be changed
2356 * and needs to be same as fib_hash output to avoid breaking
2359 static int fib_route_seq_show(struct seq_file *seq, void *v)
2361 struct fib_alias *fa;
2362 struct tnode *l = v;
2365 if (v == SEQ_START_TOKEN) {
2366 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2367 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2372 prefix = htonl(l->key);
2374 hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
2375 const struct fib_info *fi = fa->fa_info;
2376 __be32 mask = inet_make_mask(KEYLENGTH - fa->fa_slen);
2377 unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
2379 if ((fa->fa_type == RTN_BROADCAST) ||
2380 (fa->fa_type == RTN_MULTICAST))
2383 seq_setwidth(seq, 127);
2387 "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
2388 "%d\t%08X\t%d\t%u\t%u",
2389 fi->fib_dev ? fi->fib_dev->name : "*",
2391 fi->fib_nh->nh_gw, flags, 0, 0,
2395 fi->fib_advmss + 40 : 0),
2400 "*\t%08X\t%08X\t%04X\t%d\t%u\t"
2401 "%d\t%08X\t%d\t%u\t%u",
2402 prefix, 0, flags, 0, 0, 0,
2411 static const struct seq_operations fib_route_seq_ops = {
2412 .start = fib_route_seq_start,
2413 .next = fib_route_seq_next,
2414 .stop = fib_route_seq_stop,
2415 .show = fib_route_seq_show,
2418 static int fib_route_seq_open(struct inode *inode, struct file *file)
2420 return seq_open_net(inode, file, &fib_route_seq_ops,
2421 sizeof(struct fib_route_iter));
2424 static const struct file_operations fib_route_fops = {
2425 .owner = THIS_MODULE,
2426 .open = fib_route_seq_open,
2428 .llseek = seq_lseek,
2429 .release = seq_release_net,
2432 int __net_init fib_proc_init(struct net *net)
2434 if (!proc_create("fib_trie", S_IRUGO, net->proc_net, &fib_trie_fops))
2437 if (!proc_create("fib_triestat", S_IRUGO, net->proc_net,
2438 &fib_triestat_fops))
2441 if (!proc_create("route", S_IRUGO, net->proc_net, &fib_route_fops))
2447 remove_proc_entry("fib_triestat", net->proc_net);
2449 remove_proc_entry("fib_trie", net->proc_net);
2454 void __net_exit fib_proc_exit(struct net *net)
2456 remove_proc_entry("fib_trie", net->proc_net);
2457 remove_proc_entry("fib_triestat", net->proc_net);
2458 remove_proc_entry("route", net->proc_net);
2461 #endif /* CONFIG_PROC_FS */