2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (C) 2006-2010 Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/cache.h>
14 #include <linux/capability.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/icmp.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter/x_tables.h>
30 #include <linux/netfilter_ipv4/ip_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv4 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) WARN_ON(!(x))
57 #define IP_NF_ASSERT(x)
61 /* All the better to debug you with... */
66 void *ipt_alloc_initial_table(const struct xt_table *info)
68 return xt_alloc_initial_table(ipt, IPT);
70 EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
72 /* Returns whether matches rule or not. */
73 /* Performance critical - called for every packet */
75 ip_packet_match(const struct iphdr *ip,
78 const struct ipt_ip *ipinfo,
83 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
85 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
87 FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
89 dprintf("Source or dest mismatch.\n");
91 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
92 &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr,
93 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
94 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
95 &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr,
96 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
100 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
102 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
103 dprintf("VIA in mismatch (%s vs %s).%s\n",
104 indev, ipinfo->iniface,
105 ipinfo->invflags & IPT_INV_VIA_IN ? " (INV)" : "");
109 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
111 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
112 dprintf("VIA out mismatch (%s vs %s).%s\n",
113 outdev, ipinfo->outiface,
114 ipinfo->invflags & IPT_INV_VIA_OUT ? " (INV)" : "");
118 /* Check specific protocol */
120 FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
121 dprintf("Packet protocol %hi does not match %hi.%s\n",
122 ip->protocol, ipinfo->proto,
123 ipinfo->invflags & IPT_INV_PROTO ? " (INV)" : "");
127 /* If we have a fragment rule but the packet is not a fragment
128 * then we return zero */
129 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
130 dprintf("Fragment rule but not fragment.%s\n",
131 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
139 ip_checkentry(const struct ipt_ip *ip)
141 if (ip->flags & ~IPT_F_MASK) {
142 duprintf("Unknown flag bits set: %08X\n",
143 ip->flags & ~IPT_F_MASK);
146 if (ip->invflags & ~IPT_INV_MASK) {
147 duprintf("Unknown invflag bits set: %08X\n",
148 ip->invflags & ~IPT_INV_MASK);
155 ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
157 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
162 /* Performance critical */
163 static inline struct ipt_entry *
164 get_entry(const void *base, unsigned int offset)
166 return (struct ipt_entry *)(base + offset);
169 /* All zeroes == unconditional rule. */
170 /* Mildly perf critical (only if packet tracing is on) */
171 static inline bool unconditional(const struct ipt_entry *e)
173 static const struct ipt_ip uncond;
175 return e->target_offset == sizeof(struct ipt_entry) &&
176 memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
180 /* for const-correctness */
181 static inline const struct xt_entry_target *
182 ipt_get_target_c(const struct ipt_entry *e)
184 return ipt_get_target((struct ipt_entry *)e);
187 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
188 static const char *const hooknames[] = {
189 [NF_INET_PRE_ROUTING] = "PREROUTING",
190 [NF_INET_LOCAL_IN] = "INPUT",
191 [NF_INET_FORWARD] = "FORWARD",
192 [NF_INET_LOCAL_OUT] = "OUTPUT",
193 [NF_INET_POST_ROUTING] = "POSTROUTING",
196 enum nf_ip_trace_comments {
197 NF_IP_TRACE_COMMENT_RULE,
198 NF_IP_TRACE_COMMENT_RETURN,
199 NF_IP_TRACE_COMMENT_POLICY,
202 static const char *const comments[] = {
203 [NF_IP_TRACE_COMMENT_RULE] = "rule",
204 [NF_IP_TRACE_COMMENT_RETURN] = "return",
205 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
208 static struct nf_loginfo trace_loginfo = {
209 .type = NF_LOG_TYPE_LOG,
213 .logflags = NF_LOG_MASK,
218 /* Mildly perf critical (only if packet tracing is on) */
220 get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
221 const char *hookname, const char **chainname,
222 const char **comment, unsigned int *rulenum)
224 const struct xt_standard_target *t = (void *)ipt_get_target_c(s);
226 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
227 /* Head of user chain: ERROR target with chainname */
228 *chainname = t->target.data;
233 if (unconditional(s) &&
234 strcmp(t->target.u.kernel.target->name,
235 XT_STANDARD_TARGET) == 0 &&
237 /* Tail of chains: STANDARD target (return/policy) */
238 *comment = *chainname == hookname
239 ? comments[NF_IP_TRACE_COMMENT_POLICY]
240 : comments[NF_IP_TRACE_COMMENT_RETURN];
249 static void trace_packet(struct net *net,
250 const struct sk_buff *skb,
252 const struct net_device *in,
253 const struct net_device *out,
254 const char *tablename,
255 const struct xt_table_info *private,
256 const struct ipt_entry *e)
258 const struct ipt_entry *root;
259 const char *hookname, *chainname, *comment;
260 const struct ipt_entry *iter;
261 unsigned int rulenum = 0;
263 root = get_entry(private->entries, private->hook_entry[hook]);
265 hookname = chainname = hooknames[hook];
266 comment = comments[NF_IP_TRACE_COMMENT_RULE];
268 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
269 if (get_chainname_rulenum(iter, e, hookname,
270 &chainname, &comment, &rulenum) != 0)
273 nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo,
274 "TRACE: %s:%s:%s:%u ",
275 tablename, chainname, comment, rulenum);
280 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
282 return (void *)entry + entry->next_offset;
285 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
287 ipt_do_table(struct sk_buff *skb,
288 const struct nf_hook_state *state,
289 struct xt_table *table)
291 unsigned int hook = state->hook;
292 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
293 const struct iphdr *ip;
294 /* Initializing verdict to NF_DROP keeps gcc happy. */
295 unsigned int verdict = NF_DROP;
296 const char *indev, *outdev;
297 const void *table_base;
298 struct ipt_entry *e, **jumpstack;
299 unsigned int stackidx, cpu;
300 const struct xt_table_info *private;
301 struct xt_action_param acpar;
307 indev = state->in ? state->in->name : nulldevname;
308 outdev = state->out ? state->out->name : nulldevname;
309 /* We handle fragments by dealing with the first fragment as
310 * if it was a normal packet. All other fragments are treated
311 * normally, except that they will NEVER match rules that ask
312 * things we don't know, ie. tcp syn flag or ports). If the
313 * rule is also a fragment-specific rule, non-fragments won't
315 acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
316 acpar.thoff = ip_hdrlen(skb);
317 acpar.hotdrop = false;
318 acpar.net = state->net;
319 acpar.in = state->in;
320 acpar.out = state->out;
321 acpar.family = NFPROTO_IPV4;
322 acpar.hooknum = hook;
324 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
326 addend = xt_write_recseq_begin();
327 private = table->private;
328 cpu = smp_processor_id();
330 * Ensure we load private-> members after we've fetched the base
333 smp_read_barrier_depends();
334 table_base = private->entries;
335 jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
337 /* Switch to alternate jumpstack if we're being invoked via TEE.
338 * TEE issues XT_CONTINUE verdict on original skb so we must not
339 * clobber the jumpstack.
341 * For recursion via REJECT or SYNPROXY the stack will be clobbered
342 * but it is no problem since absolute verdict is issued by these.
344 if (static_key_false(&xt_tee_enabled))
345 jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
347 e = get_entry(table_base, private->hook_entry[hook]);
349 pr_debug("Entering %s(hook %u), UF %p\n",
351 get_entry(table_base, private->underflow[hook]));
354 const struct xt_entry_target *t;
355 const struct xt_entry_match *ematch;
356 struct xt_counters *counter;
359 if (!ip_packet_match(ip, indev, outdev,
360 &e->ip, acpar.fragoff)) {
362 e = ipt_next_entry(e);
366 xt_ematch_foreach(ematch, e) {
367 acpar.match = ematch->u.kernel.match;
368 acpar.matchinfo = ematch->data;
369 if (!acpar.match->match(skb, &acpar))
373 counter = xt_get_this_cpu_counter(&e->counters);
374 ADD_COUNTER(*counter, skb->len, 1);
376 t = ipt_get_target(e);
377 IP_NF_ASSERT(t->u.kernel.target);
379 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
380 /* The packet is traced: log it */
381 if (unlikely(skb->nf_trace))
382 trace_packet(state->net, skb, hook, state->in,
383 state->out, table->name, private, e);
385 /* Standard target? */
386 if (!t->u.kernel.target->target) {
389 v = ((struct xt_standard_target *)t)->verdict;
391 /* Pop from stack? */
392 if (v != XT_RETURN) {
393 verdict = (unsigned int)(-v) - 1;
397 e = get_entry(table_base,
398 private->underflow[hook]);
399 pr_debug("Underflow (this is normal) "
402 e = jumpstack[--stackidx];
403 pr_debug("Pulled %p out from pos %u\n",
405 e = ipt_next_entry(e);
409 if (table_base + v != ipt_next_entry(e) &&
410 !(e->ip.flags & IPT_F_GOTO)) {
411 jumpstack[stackidx++] = e;
412 pr_debug("Pushed %p into pos %u\n",
416 e = get_entry(table_base, v);
420 acpar.target = t->u.kernel.target;
421 acpar.targinfo = t->data;
423 verdict = t->u.kernel.target->target(skb, &acpar);
424 /* Target might have changed stuff. */
426 if (verdict == XT_CONTINUE)
427 e = ipt_next_entry(e);
431 } while (!acpar.hotdrop);
432 pr_debug("Exiting %s; sp at %u\n", __func__, stackidx);
434 xt_write_recseq_end(addend);
437 #ifdef DEBUG_ALLOW_ALL
446 /* Figures out from what hook each rule can be called: returns 0 if
447 there are loops. Puts hook bitmask in comefrom. */
449 mark_source_chains(const struct xt_table_info *newinfo,
450 unsigned int valid_hooks, void *entry0)
454 /* No recursion; use packet counter to save back ptrs (reset
455 to 0 as we leave), and comefrom to save source hook bitmask */
456 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
457 unsigned int pos = newinfo->hook_entry[hook];
458 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
460 if (!(valid_hooks & (1 << hook)))
463 /* Set initial back pointer. */
464 e->counters.pcnt = pos;
467 const struct xt_standard_target *t
468 = (void *)ipt_get_target_c(e);
469 int visited = e->comefrom & (1 << hook);
471 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
472 pr_err("iptables: loop hook %u pos %u %08X.\n",
473 hook, pos, e->comefrom);
476 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
478 /* Unconditional return/END. */
479 if ((unconditional(e) &&
480 (strcmp(t->target.u.user.name,
481 XT_STANDARD_TARGET) == 0) &&
482 t->verdict < 0) || visited) {
483 unsigned int oldpos, size;
485 if ((strcmp(t->target.u.user.name,
486 XT_STANDARD_TARGET) == 0) &&
487 t->verdict < -NF_MAX_VERDICT - 1) {
488 duprintf("mark_source_chains: bad "
489 "negative verdict (%i)\n",
494 /* Return: backtrack through the last
497 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
498 #ifdef DEBUG_IP_FIREWALL_USER
500 & (1 << NF_INET_NUMHOOKS)) {
501 duprintf("Back unset "
508 pos = e->counters.pcnt;
509 e->counters.pcnt = 0;
511 /* We're at the start. */
515 e = (struct ipt_entry *)
517 } while (oldpos == pos + e->next_offset);
520 size = e->next_offset;
521 e = (struct ipt_entry *)
522 (entry0 + pos + size);
523 e->counters.pcnt = pos;
526 int newpos = t->verdict;
528 if (strcmp(t->target.u.user.name,
529 XT_STANDARD_TARGET) == 0 &&
531 if (newpos > newinfo->size -
532 sizeof(struct ipt_entry)) {
533 duprintf("mark_source_chains: "
534 "bad verdict (%i)\n",
538 /* This a jump; chase it. */
539 duprintf("Jump rule %u -> %u\n",
542 /* ... this is a fallthru */
543 newpos = pos + e->next_offset;
545 e = (struct ipt_entry *)
547 e->counters.pcnt = pos;
552 duprintf("Finished chain %u\n", hook);
557 static void cleanup_match(struct xt_entry_match *m, struct net *net)
559 struct xt_mtdtor_param par;
562 par.match = m->u.kernel.match;
563 par.matchinfo = m->data;
564 par.family = NFPROTO_IPV4;
565 if (par.match->destroy != NULL)
566 par.match->destroy(&par);
567 module_put(par.match->me);
571 check_entry(const struct ipt_entry *e)
573 const struct xt_entry_target *t;
575 if (!ip_checkentry(&e->ip))
578 if (e->target_offset + sizeof(struct xt_entry_target) >
582 t = ipt_get_target_c(e);
583 if (e->target_offset + t->u.target_size > e->next_offset)
590 check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
592 const struct ipt_ip *ip = par->entryinfo;
595 par->match = m->u.kernel.match;
596 par->matchinfo = m->data;
598 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
599 ip->proto, ip->invflags & IPT_INV_PROTO);
601 duprintf("check failed for `%s'.\n", par->match->name);
608 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
610 struct xt_match *match;
613 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
616 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
617 return PTR_ERR(match);
619 m->u.kernel.match = match;
621 ret = check_match(m, par);
627 module_put(m->u.kernel.match->me);
631 static int check_target(struct ipt_entry *e, struct net *net, const char *name)
633 struct xt_entry_target *t = ipt_get_target(e);
634 struct xt_tgchk_param par = {
638 .target = t->u.kernel.target,
640 .hook_mask = e->comefrom,
641 .family = NFPROTO_IPV4,
645 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
646 e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
648 duprintf("check failed for `%s'.\n",
649 t->u.kernel.target->name);
656 find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
659 struct xt_entry_target *t;
660 struct xt_target *target;
663 struct xt_mtchk_param mtpar;
664 struct xt_entry_match *ematch;
666 e->counters.pcnt = xt_percpu_counter_alloc();
667 if (IS_ERR_VALUE(e->counters.pcnt))
673 mtpar.entryinfo = &e->ip;
674 mtpar.hook_mask = e->comefrom;
675 mtpar.family = NFPROTO_IPV4;
676 xt_ematch_foreach(ematch, e) {
677 ret = find_check_match(ematch, &mtpar);
679 goto cleanup_matches;
683 t = ipt_get_target(e);
684 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
686 if (IS_ERR(target)) {
687 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
688 ret = PTR_ERR(target);
689 goto cleanup_matches;
691 t->u.kernel.target = target;
693 ret = check_target(e, net, name);
699 module_put(t->u.kernel.target->me);
701 xt_ematch_foreach(ematch, e) {
704 cleanup_match(ematch, net);
707 xt_percpu_counter_free(e->counters.pcnt);
712 static bool check_underflow(const struct ipt_entry *e)
714 const struct xt_entry_target *t;
715 unsigned int verdict;
717 if (!unconditional(e))
719 t = ipt_get_target_c(e);
720 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
722 verdict = ((struct xt_standard_target *)t)->verdict;
723 verdict = -verdict - 1;
724 return verdict == NF_DROP || verdict == NF_ACCEPT;
728 check_entry_size_and_hooks(struct ipt_entry *e,
729 struct xt_table_info *newinfo,
730 const unsigned char *base,
731 const unsigned char *limit,
732 const unsigned int *hook_entries,
733 const unsigned int *underflows,
734 unsigned int valid_hooks)
739 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
740 (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
741 (unsigned char *)e + e->next_offset > limit) {
742 duprintf("Bad offset %p\n", e);
747 < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) {
748 duprintf("checking: element %p size %u\n",
753 err = check_entry(e);
757 /* Check hooks & underflows */
758 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
759 if (!(valid_hooks & (1 << h)))
761 if ((unsigned char *)e - base == hook_entries[h])
762 newinfo->hook_entry[h] = hook_entries[h];
763 if ((unsigned char *)e - base == underflows[h]) {
764 if (!check_underflow(e)) {
765 pr_debug("Underflows must be unconditional and "
766 "use the STANDARD target with "
770 newinfo->underflow[h] = underflows[h];
774 /* Clear counters and comefrom */
775 e->counters = ((struct xt_counters) { 0, 0 });
781 cleanup_entry(struct ipt_entry *e, struct net *net)
783 struct xt_tgdtor_param par;
784 struct xt_entry_target *t;
785 struct xt_entry_match *ematch;
787 /* Cleanup all matches */
788 xt_ematch_foreach(ematch, e)
789 cleanup_match(ematch, net);
790 t = ipt_get_target(e);
793 par.target = t->u.kernel.target;
794 par.targinfo = t->data;
795 par.family = NFPROTO_IPV4;
796 if (par.target->destroy != NULL)
797 par.target->destroy(&par);
798 module_put(par.target->me);
799 xt_percpu_counter_free(e->counters.pcnt);
802 /* Checks and translates the user-supplied table segment (held in
805 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
806 const struct ipt_replace *repl)
808 struct ipt_entry *iter;
812 newinfo->size = repl->size;
813 newinfo->number = repl->num_entries;
815 /* Init all hooks to impossible value. */
816 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
817 newinfo->hook_entry[i] = 0xFFFFFFFF;
818 newinfo->underflow[i] = 0xFFFFFFFF;
821 duprintf("translate_table: size %u\n", newinfo->size);
823 /* Walk through entries, checking offsets. */
824 xt_entry_foreach(iter, entry0, newinfo->size) {
825 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
833 if (strcmp(ipt_get_target(iter)->u.user.name,
834 XT_ERROR_TARGET) == 0)
835 ++newinfo->stacksize;
838 if (i != repl->num_entries) {
839 duprintf("translate_table: %u not %u entries\n",
840 i, repl->num_entries);
844 /* Check hooks all assigned */
845 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
846 /* Only hooks which are valid */
847 if (!(repl->valid_hooks & (1 << i)))
849 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
850 duprintf("Invalid hook entry %u %u\n",
851 i, repl->hook_entry[i]);
854 if (newinfo->underflow[i] == 0xFFFFFFFF) {
855 duprintf("Invalid underflow %u %u\n",
856 i, repl->underflow[i]);
861 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
864 /* Finally, each sanity check must pass */
866 xt_entry_foreach(iter, entry0, newinfo->size) {
867 ret = find_check_entry(iter, net, repl->name, repl->size);
874 xt_entry_foreach(iter, entry0, newinfo->size) {
877 cleanup_entry(iter, net);
886 get_counters(const struct xt_table_info *t,
887 struct xt_counters counters[])
889 struct ipt_entry *iter;
893 for_each_possible_cpu(cpu) {
894 seqcount_t *s = &per_cpu(xt_recseq, cpu);
897 xt_entry_foreach(iter, t->entries, t->size) {
898 struct xt_counters *tmp;
902 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
904 start = read_seqcount_begin(s);
907 } while (read_seqcount_retry(s, start));
909 ADD_COUNTER(counters[i], bcnt, pcnt);
910 ++i; /* macro does multi eval of i */
915 static struct xt_counters *alloc_counters(const struct xt_table *table)
917 unsigned int countersize;
918 struct xt_counters *counters;
919 const struct xt_table_info *private = table->private;
921 /* We need atomic snapshot of counters: rest doesn't change
922 (other than comefrom, which userspace doesn't care
924 countersize = sizeof(struct xt_counters) * private->number;
925 counters = vzalloc(countersize);
927 if (counters == NULL)
928 return ERR_PTR(-ENOMEM);
930 get_counters(private, counters);
936 copy_entries_to_user(unsigned int total_size,
937 const struct xt_table *table,
938 void __user *userptr)
940 unsigned int off, num;
941 const struct ipt_entry *e;
942 struct xt_counters *counters;
943 const struct xt_table_info *private = table->private;
945 const void *loc_cpu_entry;
947 counters = alloc_counters(table);
948 if (IS_ERR(counters))
949 return PTR_ERR(counters);
951 loc_cpu_entry = private->entries;
952 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
957 /* FIXME: use iterator macros --RR */
958 /* ... then go back and fix counters and names */
959 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
961 const struct xt_entry_match *m;
962 const struct xt_entry_target *t;
964 e = (struct ipt_entry *)(loc_cpu_entry + off);
965 if (copy_to_user(userptr + off
966 + offsetof(struct ipt_entry, counters),
968 sizeof(counters[num])) != 0) {
973 for (i = sizeof(struct ipt_entry);
974 i < e->target_offset;
975 i += m->u.match_size) {
978 if (copy_to_user(userptr + off + i
979 + offsetof(struct xt_entry_match,
981 m->u.kernel.match->name,
982 strlen(m->u.kernel.match->name)+1)
989 t = ipt_get_target_c(e);
990 if (copy_to_user(userptr + off + e->target_offset
991 + offsetof(struct xt_entry_target,
993 t->u.kernel.target->name,
994 strlen(t->u.kernel.target->name)+1) != 0) {
1005 #ifdef CONFIG_COMPAT
1006 static void compat_standard_from_user(void *dst, const void *src)
1008 int v = *(compat_int_t *)src;
1011 v += xt_compat_calc_jump(AF_INET, v);
1012 memcpy(dst, &v, sizeof(v));
1015 static int compat_standard_to_user(void __user *dst, const void *src)
1017 compat_int_t cv = *(int *)src;
1020 cv -= xt_compat_calc_jump(AF_INET, cv);
1021 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1024 static int compat_calc_entry(const struct ipt_entry *e,
1025 const struct xt_table_info *info,
1026 const void *base, struct xt_table_info *newinfo)
1028 const struct xt_entry_match *ematch;
1029 const struct xt_entry_target *t;
1030 unsigned int entry_offset;
1033 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1034 entry_offset = (void *)e - base;
1035 xt_ematch_foreach(ematch, e)
1036 off += xt_compat_match_offset(ematch->u.kernel.match);
1037 t = ipt_get_target_c(e);
1038 off += xt_compat_target_offset(t->u.kernel.target);
1039 newinfo->size -= off;
1040 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1044 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1045 if (info->hook_entry[i] &&
1046 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1047 newinfo->hook_entry[i] -= off;
1048 if (info->underflow[i] &&
1049 (e < (struct ipt_entry *)(base + info->underflow[i])))
1050 newinfo->underflow[i] -= off;
1055 static int compat_table_info(const struct xt_table_info *info,
1056 struct xt_table_info *newinfo)
1058 struct ipt_entry *iter;
1059 const void *loc_cpu_entry;
1062 if (!newinfo || !info)
1065 /* we dont care about newinfo->entries */
1066 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1067 newinfo->initial_entries = 0;
1068 loc_cpu_entry = info->entries;
1069 xt_compat_init_offsets(AF_INET, info->number);
1070 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1071 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1079 static int get_info(struct net *net, void __user *user,
1080 const int *len, int compat)
1082 char name[XT_TABLE_MAXNAMELEN];
1086 if (*len != sizeof(struct ipt_getinfo)) {
1087 duprintf("length %u != %zu\n", *len,
1088 sizeof(struct ipt_getinfo));
1092 if (copy_from_user(name, user, sizeof(name)) != 0)
1095 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1096 #ifdef CONFIG_COMPAT
1098 xt_compat_lock(AF_INET);
1100 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1101 "iptable_%s", name);
1102 if (!IS_ERR_OR_NULL(t)) {
1103 struct ipt_getinfo info;
1104 const struct xt_table_info *private = t->private;
1105 #ifdef CONFIG_COMPAT
1106 struct xt_table_info tmp;
1109 ret = compat_table_info(private, &tmp);
1110 xt_compat_flush_offsets(AF_INET);
1114 memset(&info, 0, sizeof(info));
1115 info.valid_hooks = t->valid_hooks;
1116 memcpy(info.hook_entry, private->hook_entry,
1117 sizeof(info.hook_entry));
1118 memcpy(info.underflow, private->underflow,
1119 sizeof(info.underflow));
1120 info.num_entries = private->number;
1121 info.size = private->size;
1122 strcpy(info.name, name);
1124 if (copy_to_user(user, &info, *len) != 0)
1132 ret = t ? PTR_ERR(t) : -ENOENT;
1133 #ifdef CONFIG_COMPAT
1135 xt_compat_unlock(AF_INET);
1141 get_entries(struct net *net, struct ipt_get_entries __user *uptr,
1145 struct ipt_get_entries get;
1148 if (*len < sizeof(get)) {
1149 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1152 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1154 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1155 duprintf("get_entries: %u != %zu\n",
1156 *len, sizeof(get) + get.size);
1160 t = xt_find_table_lock(net, AF_INET, get.name);
1161 if (!IS_ERR_OR_NULL(t)) {
1162 const struct xt_table_info *private = t->private;
1163 duprintf("t->private->number = %u\n", private->number);
1164 if (get.size == private->size)
1165 ret = copy_entries_to_user(private->size,
1166 t, uptr->entrytable);
1168 duprintf("get_entries: I've got %u not %u!\n",
1169 private->size, get.size);
1175 ret = t ? PTR_ERR(t) : -ENOENT;
1181 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1182 struct xt_table_info *newinfo, unsigned int num_counters,
1183 void __user *counters_ptr)
1187 struct xt_table_info *oldinfo;
1188 struct xt_counters *counters;
1189 struct ipt_entry *iter;
1192 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1198 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1199 "iptable_%s", name);
1200 if (IS_ERR_OR_NULL(t)) {
1201 ret = t ? PTR_ERR(t) : -ENOENT;
1202 goto free_newinfo_counters_untrans;
1206 if (valid_hooks != t->valid_hooks) {
1207 duprintf("Valid hook crap: %08X vs %08X\n",
1208 valid_hooks, t->valid_hooks);
1213 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1217 /* Update module usage count based on number of rules */
1218 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1219 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1220 if ((oldinfo->number > oldinfo->initial_entries) ||
1221 (newinfo->number <= oldinfo->initial_entries))
1223 if ((oldinfo->number > oldinfo->initial_entries) &&
1224 (newinfo->number <= oldinfo->initial_entries))
1227 /* Get the old counters, and synchronize with replace */
1228 get_counters(oldinfo, counters);
1230 /* Decrease module usage counts and free resource */
1231 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
1232 cleanup_entry(iter, net);
1234 xt_free_table_info(oldinfo);
1235 if (copy_to_user(counters_ptr, counters,
1236 sizeof(struct xt_counters) * num_counters) != 0) {
1237 /* Silent error, can't fail, new table is already in place */
1238 net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
1247 free_newinfo_counters_untrans:
1254 do_replace(struct net *net, const void __user *user, unsigned int len)
1257 struct ipt_replace tmp;
1258 struct xt_table_info *newinfo;
1259 void *loc_cpu_entry;
1260 struct ipt_entry *iter;
1262 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1265 /* overflow check */
1266 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1268 if (tmp.num_counters == 0)
1271 tmp.name[sizeof(tmp.name)-1] = 0;
1273 newinfo = xt_alloc_table_info(tmp.size);
1277 loc_cpu_entry = newinfo->entries;
1278 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1284 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1288 duprintf("Translated table\n");
1290 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1291 tmp.num_counters, tmp.counters);
1293 goto free_newinfo_untrans;
1296 free_newinfo_untrans:
1297 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1298 cleanup_entry(iter, net);
1300 xt_free_table_info(newinfo);
1305 do_add_counters(struct net *net, const void __user *user,
1306 unsigned int len, int compat)
1309 struct xt_counters_info tmp;
1310 struct xt_counters *paddc;
1311 unsigned int num_counters;
1316 const struct xt_table_info *private;
1318 struct ipt_entry *iter;
1319 unsigned int addend;
1320 #ifdef CONFIG_COMPAT
1321 struct compat_xt_counters_info compat_tmp;
1325 size = sizeof(struct compat_xt_counters_info);
1330 size = sizeof(struct xt_counters_info);
1333 if (copy_from_user(ptmp, user, size) != 0)
1336 #ifdef CONFIG_COMPAT
1338 num_counters = compat_tmp.num_counters;
1339 name = compat_tmp.name;
1343 num_counters = tmp.num_counters;
1347 if (len != size + num_counters * sizeof(struct xt_counters))
1350 paddc = vmalloc(len - size);
1354 if (copy_from_user(paddc, user + size, len - size) != 0) {
1359 t = xt_find_table_lock(net, AF_INET, name);
1360 if (IS_ERR_OR_NULL(t)) {
1361 ret = t ? PTR_ERR(t) : -ENOENT;
1366 private = t->private;
1367 if (private->number != num_counters) {
1369 goto unlock_up_free;
1373 addend = xt_write_recseq_begin();
1374 xt_entry_foreach(iter, private->entries, private->size) {
1375 struct xt_counters *tmp;
1377 tmp = xt_get_this_cpu_counter(&iter->counters);
1378 ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
1381 xt_write_recseq_end(addend);
1392 #ifdef CONFIG_COMPAT
1393 struct compat_ipt_replace {
1394 char name[XT_TABLE_MAXNAMELEN];
1398 u32 hook_entry[NF_INET_NUMHOOKS];
1399 u32 underflow[NF_INET_NUMHOOKS];
1401 compat_uptr_t counters; /* struct xt_counters * */
1402 struct compat_ipt_entry entries[0];
1406 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1407 unsigned int *size, struct xt_counters *counters,
1410 struct xt_entry_target *t;
1411 struct compat_ipt_entry __user *ce;
1412 u_int16_t target_offset, next_offset;
1413 compat_uint_t origsize;
1414 const struct xt_entry_match *ematch;
1418 ce = (struct compat_ipt_entry __user *)*dstptr;
1419 if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
1420 copy_to_user(&ce->counters, &counters[i],
1421 sizeof(counters[i])) != 0)
1424 *dstptr += sizeof(struct compat_ipt_entry);
1425 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1427 xt_ematch_foreach(ematch, e) {
1428 ret = xt_compat_match_to_user(ematch, dstptr, size);
1432 target_offset = e->target_offset - (origsize - *size);
1433 t = ipt_get_target(e);
1434 ret = xt_compat_target_to_user(t, dstptr, size);
1437 next_offset = e->next_offset - (origsize - *size);
1438 if (put_user(target_offset, &ce->target_offset) != 0 ||
1439 put_user(next_offset, &ce->next_offset) != 0)
1445 compat_find_calc_match(struct xt_entry_match *m,
1447 const struct ipt_ip *ip,
1450 struct xt_match *match;
1452 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
1453 m->u.user.revision);
1454 if (IS_ERR(match)) {
1455 duprintf("compat_check_calc_match: `%s' not found\n",
1457 return PTR_ERR(match);
1459 m->u.kernel.match = match;
1460 *size += xt_compat_match_offset(match);
1464 static void compat_release_entry(struct compat_ipt_entry *e)
1466 struct xt_entry_target *t;
1467 struct xt_entry_match *ematch;
1469 /* Cleanup all matches */
1470 xt_ematch_foreach(ematch, e)
1471 module_put(ematch->u.kernel.match->me);
1472 t = compat_ipt_get_target(e);
1473 module_put(t->u.kernel.target->me);
1477 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1478 struct xt_table_info *newinfo,
1480 const unsigned char *base,
1481 const unsigned char *limit,
1482 const unsigned int *hook_entries,
1483 const unsigned int *underflows,
1486 struct xt_entry_match *ematch;
1487 struct xt_entry_target *t;
1488 struct xt_target *target;
1489 unsigned int entry_offset;
1493 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1494 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
1495 (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
1496 (unsigned char *)e + e->next_offset > limit) {
1497 duprintf("Bad offset %p, limit = %p\n", e, limit);
1501 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1502 sizeof(struct compat_xt_entry_target)) {
1503 duprintf("checking: element %p size %u\n",
1508 /* For purposes of check_entry casting the compat entry is fine */
1509 ret = check_entry((struct ipt_entry *)e);
1513 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1514 entry_offset = (void *)e - (void *)base;
1516 xt_ematch_foreach(ematch, e) {
1517 ret = compat_find_calc_match(ematch, name, &e->ip, &off);
1519 goto release_matches;
1523 t = compat_ipt_get_target(e);
1524 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
1525 t->u.user.revision);
1526 if (IS_ERR(target)) {
1527 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1529 ret = PTR_ERR(target);
1530 goto release_matches;
1532 t->u.kernel.target = target;
1534 off += xt_compat_target_offset(target);
1536 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1540 /* Check hooks & underflows */
1541 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1542 if ((unsigned char *)e - base == hook_entries[h])
1543 newinfo->hook_entry[h] = hook_entries[h];
1544 if ((unsigned char *)e - base == underflows[h])
1545 newinfo->underflow[h] = underflows[h];
1548 /* Clear counters and comefrom */
1549 memset(&e->counters, 0, sizeof(e->counters));
1554 module_put(t->u.kernel.target->me);
1556 xt_ematch_foreach(ematch, e) {
1559 module_put(ematch->u.kernel.match->me);
1565 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1566 unsigned int *size, const char *name,
1567 struct xt_table_info *newinfo, unsigned char *base)
1569 struct xt_entry_target *t;
1570 struct xt_target *target;
1571 struct ipt_entry *de;
1572 unsigned int origsize;
1574 struct xt_entry_match *ematch;
1578 de = (struct ipt_entry *)*dstptr;
1579 memcpy(de, e, sizeof(struct ipt_entry));
1580 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1582 *dstptr += sizeof(struct ipt_entry);
1583 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1585 xt_ematch_foreach(ematch, e) {
1586 ret = xt_compat_match_from_user(ematch, dstptr, size);
1590 de->target_offset = e->target_offset - (origsize - *size);
1591 t = compat_ipt_get_target(e);
1592 target = t->u.kernel.target;
1593 xt_compat_target_from_user(t, dstptr, size);
1595 de->next_offset = e->next_offset - (origsize - *size);
1596 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1597 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1598 newinfo->hook_entry[h] -= origsize - *size;
1599 if ((unsigned char *)de - base < newinfo->underflow[h])
1600 newinfo->underflow[h] -= origsize - *size;
1606 compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
1608 struct xt_entry_match *ematch;
1609 struct xt_mtchk_param mtpar;
1613 e->counters.pcnt = xt_percpu_counter_alloc();
1614 if (IS_ERR_VALUE(e->counters.pcnt))
1620 mtpar.entryinfo = &e->ip;
1621 mtpar.hook_mask = e->comefrom;
1622 mtpar.family = NFPROTO_IPV4;
1623 xt_ematch_foreach(ematch, e) {
1624 ret = check_match(ematch, &mtpar);
1626 goto cleanup_matches;
1630 ret = check_target(e, net, name);
1632 goto cleanup_matches;
1636 xt_ematch_foreach(ematch, e) {
1639 cleanup_match(ematch, net);
1642 xt_percpu_counter_free(e->counters.pcnt);
1648 translate_compat_table(struct net *net,
1650 unsigned int valid_hooks,
1651 struct xt_table_info **pinfo,
1653 unsigned int total_size,
1654 unsigned int number,
1655 unsigned int *hook_entries,
1656 unsigned int *underflows)
1659 struct xt_table_info *newinfo, *info;
1660 void *pos, *entry0, *entry1;
1661 struct compat_ipt_entry *iter0;
1662 struct ipt_entry *iter1;
1669 info->number = number;
1671 /* Init all hooks to impossible value. */
1672 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1673 info->hook_entry[i] = 0xFFFFFFFF;
1674 info->underflow[i] = 0xFFFFFFFF;
1677 duprintf("translate_compat_table: size %u\n", info->size);
1679 xt_compat_lock(AF_INET);
1680 xt_compat_init_offsets(AF_INET, number);
1681 /* Walk through entries, checking offsets. */
1682 xt_entry_foreach(iter0, entry0, total_size) {
1683 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1685 entry0 + total_size,
1696 duprintf("translate_compat_table: %u not %u entries\n",
1701 /* Check hooks all assigned */
1702 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1703 /* Only hooks which are valid */
1704 if (!(valid_hooks & (1 << i)))
1706 if (info->hook_entry[i] == 0xFFFFFFFF) {
1707 duprintf("Invalid hook entry %u %u\n",
1708 i, hook_entries[i]);
1711 if (info->underflow[i] == 0xFFFFFFFF) {
1712 duprintf("Invalid underflow %u %u\n",
1719 newinfo = xt_alloc_table_info(size);
1723 newinfo->number = number;
1724 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1725 newinfo->hook_entry[i] = info->hook_entry[i];
1726 newinfo->underflow[i] = info->underflow[i];
1728 entry1 = newinfo->entries;
1731 xt_entry_foreach(iter0, entry0, total_size) {
1732 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1733 name, newinfo, entry1);
1737 xt_compat_flush_offsets(AF_INET);
1738 xt_compat_unlock(AF_INET);
1743 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1747 xt_entry_foreach(iter1, entry1, newinfo->size) {
1748 ret = compat_check_entry(iter1, net, name);
1752 if (strcmp(ipt_get_target(iter1)->u.user.name,
1753 XT_ERROR_TARGET) == 0)
1754 ++newinfo->stacksize;
1758 * The first i matches need cleanup_entry (calls ->destroy)
1759 * because they had called ->check already. The other j-i
1760 * entries need only release.
1764 xt_entry_foreach(iter0, entry0, newinfo->size) {
1769 compat_release_entry(iter0);
1771 xt_entry_foreach(iter1, entry1, newinfo->size) {
1774 cleanup_entry(iter1, net);
1776 xt_free_table_info(newinfo);
1782 xt_free_table_info(info);
1786 xt_free_table_info(newinfo);
1788 xt_entry_foreach(iter0, entry0, total_size) {
1791 compat_release_entry(iter0);
1795 xt_compat_flush_offsets(AF_INET);
1796 xt_compat_unlock(AF_INET);
1801 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1804 struct compat_ipt_replace tmp;
1805 struct xt_table_info *newinfo;
1806 void *loc_cpu_entry;
1807 struct ipt_entry *iter;
1809 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1812 /* overflow check */
1813 if (tmp.size >= INT_MAX / num_possible_cpus())
1815 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1817 if (tmp.num_counters == 0)
1820 tmp.name[sizeof(tmp.name)-1] = 0;
1822 newinfo = xt_alloc_table_info(tmp.size);
1826 loc_cpu_entry = newinfo->entries;
1827 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1833 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1834 &newinfo, &loc_cpu_entry, tmp.size,
1835 tmp.num_entries, tmp.hook_entry,
1840 duprintf("compat_do_replace: Translated table\n");
1842 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1843 tmp.num_counters, compat_ptr(tmp.counters));
1845 goto free_newinfo_untrans;
1848 free_newinfo_untrans:
1849 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1850 cleanup_entry(iter, net);
1852 xt_free_table_info(newinfo);
1857 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1862 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1866 case IPT_SO_SET_REPLACE:
1867 ret = compat_do_replace(sock_net(sk), user, len);
1870 case IPT_SO_SET_ADD_COUNTERS:
1871 ret = do_add_counters(sock_net(sk), user, len, 1);
1875 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1882 struct compat_ipt_get_entries {
1883 char name[XT_TABLE_MAXNAMELEN];
1885 struct compat_ipt_entry entrytable[0];
1889 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1890 void __user *userptr)
1892 struct xt_counters *counters;
1893 const struct xt_table_info *private = table->private;
1898 struct ipt_entry *iter;
1900 counters = alloc_counters(table);
1901 if (IS_ERR(counters))
1902 return PTR_ERR(counters);
1906 xt_entry_foreach(iter, private->entries, total_size) {
1907 ret = compat_copy_entry_to_user(iter, &pos,
1908 &size, counters, i++);
1918 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1922 struct compat_ipt_get_entries get;
1925 if (*len < sizeof(get)) {
1926 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1930 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1933 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1934 duprintf("compat_get_entries: %u != %zu\n",
1935 *len, sizeof(get) + get.size);
1939 xt_compat_lock(AF_INET);
1940 t = xt_find_table_lock(net, AF_INET, get.name);
1941 if (!IS_ERR_OR_NULL(t)) {
1942 const struct xt_table_info *private = t->private;
1943 struct xt_table_info info;
1944 duprintf("t->private->number = %u\n", private->number);
1945 ret = compat_table_info(private, &info);
1946 if (!ret && get.size == info.size) {
1947 ret = compat_copy_entries_to_user(private->size,
1948 t, uptr->entrytable);
1950 duprintf("compat_get_entries: I've got %u not %u!\n",
1951 private->size, get.size);
1954 xt_compat_flush_offsets(AF_INET);
1958 ret = t ? PTR_ERR(t) : -ENOENT;
1960 xt_compat_unlock(AF_INET);
1964 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1967 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1971 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1975 case IPT_SO_GET_INFO:
1976 ret = get_info(sock_net(sk), user, len, 1);
1978 case IPT_SO_GET_ENTRIES:
1979 ret = compat_get_entries(sock_net(sk), user, len);
1982 ret = do_ipt_get_ctl(sk, cmd, user, len);
1989 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1993 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1997 case IPT_SO_SET_REPLACE:
1998 ret = do_replace(sock_net(sk), user, len);
2001 case IPT_SO_SET_ADD_COUNTERS:
2002 ret = do_add_counters(sock_net(sk), user, len, 0);
2006 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2014 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2018 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2022 case IPT_SO_GET_INFO:
2023 ret = get_info(sock_net(sk), user, len, 0);
2026 case IPT_SO_GET_ENTRIES:
2027 ret = get_entries(sock_net(sk), user, len);
2030 case IPT_SO_GET_REVISION_MATCH:
2031 case IPT_SO_GET_REVISION_TARGET: {
2032 struct xt_get_revision rev;
2035 if (*len != sizeof(rev)) {
2039 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2043 rev.name[sizeof(rev.name)-1] = 0;
2045 if (cmd == IPT_SO_GET_REVISION_TARGET)
2050 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2053 "ipt_%s", rev.name);
2058 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2065 struct xt_table *ipt_register_table(struct net *net,
2066 const struct xt_table *table,
2067 const struct ipt_replace *repl)
2070 struct xt_table_info *newinfo;
2071 struct xt_table_info bootstrap = {0};
2072 void *loc_cpu_entry;
2073 struct xt_table *new_table;
2075 newinfo = xt_alloc_table_info(repl->size);
2081 loc_cpu_entry = newinfo->entries;
2082 memcpy(loc_cpu_entry, repl->entries, repl->size);
2084 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2088 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2089 if (IS_ERR(new_table)) {
2090 ret = PTR_ERR(new_table);
2097 xt_free_table_info(newinfo);
2099 return ERR_PTR(ret);
2102 void ipt_unregister_table(struct net *net, struct xt_table *table)
2104 struct xt_table_info *private;
2105 void *loc_cpu_entry;
2106 struct module *table_owner = table->me;
2107 struct ipt_entry *iter;
2109 private = xt_unregister_table(table);
2111 /* Decrease module usage counts and free resources */
2112 loc_cpu_entry = private->entries;
2113 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2114 cleanup_entry(iter, net);
2115 if (private->number > private->initial_entries)
2116 module_put(table_owner);
2117 xt_free_table_info(private);
2120 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2122 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2123 u_int8_t type, u_int8_t code,
2126 return ((test_type == 0xFF) ||
2127 (type == test_type && code >= min_code && code <= max_code))
2132 icmp_match(const struct sk_buff *skb, struct xt_action_param *par)
2134 const struct icmphdr *ic;
2135 struct icmphdr _icmph;
2136 const struct ipt_icmp *icmpinfo = par->matchinfo;
2138 /* Must not be a fragment. */
2139 if (par->fragoff != 0)
2142 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2144 /* We've been asked to examine this packet, and we
2145 * can't. Hence, no choice but to drop.
2147 duprintf("Dropping evil ICMP tinygram.\n");
2148 par->hotdrop = true;
2152 return icmp_type_code_match(icmpinfo->type,
2156 !!(icmpinfo->invflags&IPT_ICMP_INV));
2159 static int icmp_checkentry(const struct xt_mtchk_param *par)
2161 const struct ipt_icmp *icmpinfo = par->matchinfo;
2163 /* Must specify no unknown invflags */
2164 return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0;
2167 static struct xt_target ipt_builtin_tg[] __read_mostly = {
2169 .name = XT_STANDARD_TARGET,
2170 .targetsize = sizeof(int),
2171 .family = NFPROTO_IPV4,
2172 #ifdef CONFIG_COMPAT
2173 .compatsize = sizeof(compat_int_t),
2174 .compat_from_user = compat_standard_from_user,
2175 .compat_to_user = compat_standard_to_user,
2179 .name = XT_ERROR_TARGET,
2180 .target = ipt_error,
2181 .targetsize = XT_FUNCTION_MAXNAMELEN,
2182 .family = NFPROTO_IPV4,
2186 static struct nf_sockopt_ops ipt_sockopts = {
2188 .set_optmin = IPT_BASE_CTL,
2189 .set_optmax = IPT_SO_SET_MAX+1,
2190 .set = do_ipt_set_ctl,
2191 #ifdef CONFIG_COMPAT
2192 .compat_set = compat_do_ipt_set_ctl,
2194 .get_optmin = IPT_BASE_CTL,
2195 .get_optmax = IPT_SO_GET_MAX+1,
2196 .get = do_ipt_get_ctl,
2197 #ifdef CONFIG_COMPAT
2198 .compat_get = compat_do_ipt_get_ctl,
2200 .owner = THIS_MODULE,
2203 static struct xt_match ipt_builtin_mt[] __read_mostly = {
2206 .match = icmp_match,
2207 .matchsize = sizeof(struct ipt_icmp),
2208 .checkentry = icmp_checkentry,
2209 .proto = IPPROTO_ICMP,
2210 .family = NFPROTO_IPV4,
2214 static int __net_init ip_tables_net_init(struct net *net)
2216 return xt_proto_init(net, NFPROTO_IPV4);
2219 static void __net_exit ip_tables_net_exit(struct net *net)
2221 xt_proto_fini(net, NFPROTO_IPV4);
2224 static struct pernet_operations ip_tables_net_ops = {
2225 .init = ip_tables_net_init,
2226 .exit = ip_tables_net_exit,
2229 static int __init ip_tables_init(void)
2233 ret = register_pernet_subsys(&ip_tables_net_ops);
2237 /* No one else will be downing sem now, so we won't sleep */
2238 ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2241 ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2245 /* Register setsockopt */
2246 ret = nf_register_sockopt(&ipt_sockopts);
2250 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2254 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2256 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2258 unregister_pernet_subsys(&ip_tables_net_ops);
2263 static void __exit ip_tables_fini(void)
2265 nf_unregister_sockopt(&ipt_sockopts);
2267 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2268 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2269 unregister_pernet_subsys(&ip_tables_net_ops);
2272 EXPORT_SYMBOL(ipt_register_table);
2273 EXPORT_SYMBOL(ipt_unregister_table);
2274 EXPORT_SYMBOL(ipt_do_table);
2275 module_init(ip_tables_init);
2276 module_exit(ip_tables_fini);