]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
netfilter: add and use jump label for xt_tee
authorFlorian Westphal <fw@strlen.de>
Tue, 14 Jul 2015 15:51:09 +0000 (17:51 +0200)
committerPablo Neira Ayuso <pablo@netfilter.org>
Wed, 15 Jul 2015 16:18:06 +0000 (18:18 +0200)
Don't bother testing if we need to switch to alternate stack
unless TEE target is used.

Suggested-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
include/linux/netfilter/x_tables.h
net/ipv4/netfilter/ip_tables.c
net/ipv6/netfilter/ip6_tables.c
net/netfilter/x_tables.c
net/netfilter/xt_TEE.c

index 149284557ca70f7fa14c67258f9df1f258212db3..b006b719183fc40aef93d6eae1e90f302979a4dc 100644 (file)
@@ -3,6 +3,7 @@
 
 
 #include <linux/netdevice.h>
+#include <linux/static_key.h>
 #include <uapi/linux/netfilter/x_tables.h>
 
 /**
@@ -280,6 +281,12 @@ void xt_free_table_info(struct xt_table_info *info);
  */
 DECLARE_PER_CPU(seqcount_t, xt_recseq);
 
+/* xt_tee_enabled - true if x_tables needs to handle reentrancy
+ *
+ * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
+ */
+extern struct static_key xt_tee_enabled;
+
 /**
  * xt_write_recseq_begin - start of a write section
  *
index a2e4b018a254fd6c285d08bbdd4cbefafbcb99bd..ff585bdbf850ad42d9ecd955b0263188d1e78901 100644 (file)
@@ -340,7 +340,8 @@ ipt_do_table(struct sk_buff *skb,
         * For recursion via REJECT or SYNPROXY the stack will be clobbered
         * but it is no problem since absolute verdict is issued by these.
         */
-       jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
+       if (static_key_false(&xt_tee_enabled))
+               jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
 
        e = get_entry(table_base, private->hook_entry[hook]);
 
index 531281f0ff86e71c29cf31d866b7dabadf71ed83..ea6d105063c2d4b886684babf92131c9fb1f6416 100644 (file)
@@ -366,7 +366,8 @@ ip6t_do_table(struct sk_buff *skb,
         * For recursion via REJECT or SYNPROXY the stack will be clobbered
         * but it is no problem since absolute verdict is issued by these.
         */
-       jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
+       if (static_key_false(&xt_tee_enabled))
+               jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
 
        e = get_entry(table_base, private->hook_entry[hook]);
 
index 154447e519ab1f75b5203a46a34ef7adf568000b..9b42b5ea6dcd68c8398c501aa5af81b6dfa83ae8 100644 (file)
@@ -727,6 +727,9 @@ EXPORT_SYMBOL_GPL(xt_compat_unlock);
 DEFINE_PER_CPU(seqcount_t, xt_recseq);
 EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
 
+struct static_key xt_tee_enabled __read_mostly;
+EXPORT_SYMBOL_GPL(xt_tee_enabled);
+
 static int xt_jumpstack_alloc(struct xt_table_info *i)
 {
        unsigned int size;
index 8950e79c4dc935b8f265d87537677cd4df5f0a14..c5d6556dbc5e407cffca198ac5fe66b97a0cb908 100644 (file)
@@ -251,6 +251,7 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
        } else
                info->priv = NULL;
 
+       static_key_slow_inc(&xt_tee_enabled);
        return 0;
 }
 
@@ -262,6 +263,7 @@ static void tee_tg_destroy(const struct xt_tgdtor_param *par)
                unregister_netdevice_notifier(&info->priv->notifier);
                kfree(info->priv);
        }
+       static_key_slow_dec(&xt_tee_enabled);
 }
 
 static struct xt_target tee_tg_reg[] __read_mostly = {