2 * x_tables core - Backend for {ip,ip6,arp}_tables
4 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
5 * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
7 * Based on existing ip_tables code which is
8 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
9 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/socket.h>
20 #include <linux/net.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/string.h>
24 #include <linux/vmalloc.h>
25 #include <linux/mutex.h>
27 #include <linux/slab.h>
28 #include <linux/audit.h>
29 #include <net/net_namespace.h>
31 #include <linux/netfilter/x_tables.h>
32 #include <linux/netfilter_arp.h>
33 #include <linux/netfilter_ipv4/ip_tables.h>
34 #include <linux/netfilter_ipv6/ip6_tables.h>
35 #include <linux/netfilter_arp/arp_tables.h>
37 MODULE_LICENSE("GPL");
38 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
39 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
41 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
44 unsigned int offset; /* offset in kernel */
45 int delta; /* delta in 32bit user land */
50 struct list_head match;
51 struct list_head target;
53 struct mutex compat_mutex;
54 struct compat_delta *compat_tab;
55 unsigned int number; /* number of slots in compat_tab[] */
56 unsigned int cur; /* number of used slots in compat_tab[] */
60 static struct xt_af *xt;
62 static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
63 [NFPROTO_UNSPEC] = "x",
64 [NFPROTO_IPV4] = "ip",
65 [NFPROTO_ARP] = "arp",
66 [NFPROTO_BRIDGE] = "eb",
67 [NFPROTO_IPV6] = "ip6",
70 /* Registration hooks for targets. */
71 int xt_register_target(struct xt_target *target)
73 u_int8_t af = target->family;
75 mutex_lock(&xt[af].mutex);
76 list_add(&target->list, &xt[af].target);
77 mutex_unlock(&xt[af].mutex);
80 EXPORT_SYMBOL(xt_register_target);
83 xt_unregister_target(struct xt_target *target)
85 u_int8_t af = target->family;
87 mutex_lock(&xt[af].mutex);
88 list_del(&target->list);
89 mutex_unlock(&xt[af].mutex);
91 EXPORT_SYMBOL(xt_unregister_target);
94 xt_register_targets(struct xt_target *target, unsigned int n)
99 for (i = 0; i < n; i++) {
100 err = xt_register_target(&target[i]);
108 xt_unregister_targets(target, i);
111 EXPORT_SYMBOL(xt_register_targets);
114 xt_unregister_targets(struct xt_target *target, unsigned int n)
117 xt_unregister_target(&target[n]);
119 EXPORT_SYMBOL(xt_unregister_targets);
121 int xt_register_match(struct xt_match *match)
123 u_int8_t af = match->family;
125 mutex_lock(&xt[af].mutex);
126 list_add(&match->list, &xt[af].match);
127 mutex_unlock(&xt[af].mutex);
130 EXPORT_SYMBOL(xt_register_match);
133 xt_unregister_match(struct xt_match *match)
135 u_int8_t af = match->family;
137 mutex_lock(&xt[af].mutex);
138 list_del(&match->list);
139 mutex_unlock(&xt[af].mutex);
141 EXPORT_SYMBOL(xt_unregister_match);
144 xt_register_matches(struct xt_match *match, unsigned int n)
149 for (i = 0; i < n; i++) {
150 err = xt_register_match(&match[i]);
158 xt_unregister_matches(match, i);
161 EXPORT_SYMBOL(xt_register_matches);
164 xt_unregister_matches(struct xt_match *match, unsigned int n)
167 xt_unregister_match(&match[n]);
169 EXPORT_SYMBOL(xt_unregister_matches);
173 * These are weird, but module loading must not be done with mutex
174 * held (since they will register), and we have to have a single
178 /* Find match, grabs ref. Returns ERR_PTR() on error. */
179 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
184 mutex_lock(&xt[af].mutex);
185 list_for_each_entry(m, &xt[af].match, list) {
186 if (strcmp(m->name, name) == 0) {
187 if (m->revision == revision) {
188 if (try_module_get(m->me)) {
189 mutex_unlock(&xt[af].mutex);
193 err = -EPROTOTYPE; /* Found something. */
196 mutex_unlock(&xt[af].mutex);
198 if (af != NFPROTO_UNSPEC)
199 /* Try searching again in the family-independent list */
200 return xt_find_match(NFPROTO_UNSPEC, name, revision);
204 EXPORT_SYMBOL(xt_find_match);
207 xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
209 struct xt_match *match;
211 match = xt_find_match(nfproto, name, revision);
213 request_module("%st_%s", xt_prefix[nfproto], name);
214 match = xt_find_match(nfproto, name, revision);
219 EXPORT_SYMBOL_GPL(xt_request_find_match);
221 /* Find target, grabs ref. Returns ERR_PTR() on error. */
222 struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
227 mutex_lock(&xt[af].mutex);
228 list_for_each_entry(t, &xt[af].target, list) {
229 if (strcmp(t->name, name) == 0) {
230 if (t->revision == revision) {
231 if (try_module_get(t->me)) {
232 mutex_unlock(&xt[af].mutex);
236 err = -EPROTOTYPE; /* Found something. */
239 mutex_unlock(&xt[af].mutex);
241 if (af != NFPROTO_UNSPEC)
242 /* Try searching again in the family-independent list */
243 return xt_find_target(NFPROTO_UNSPEC, name, revision);
247 EXPORT_SYMBOL(xt_find_target);
249 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
251 struct xt_target *target;
253 target = xt_find_target(af, name, revision);
254 if (IS_ERR(target)) {
255 request_module("%st_%s", xt_prefix[af], name);
256 target = xt_find_target(af, name, revision);
261 EXPORT_SYMBOL_GPL(xt_request_find_target);
263 static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
265 const struct xt_match *m;
268 list_for_each_entry(m, &xt[af].match, list) {
269 if (strcmp(m->name, name) == 0) {
270 if (m->revision > *bestp)
271 *bestp = m->revision;
272 if (m->revision == revision)
277 if (af != NFPROTO_UNSPEC && !have_rev)
278 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
283 static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
285 const struct xt_target *t;
288 list_for_each_entry(t, &xt[af].target, list) {
289 if (strcmp(t->name, name) == 0) {
290 if (t->revision > *bestp)
291 *bestp = t->revision;
292 if (t->revision == revision)
297 if (af != NFPROTO_UNSPEC && !have_rev)
298 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
303 /* Returns true or false (if no such extension at all) */
304 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
307 int have_rev, best = -1;
309 mutex_lock(&xt[af].mutex);
311 have_rev = target_revfn(af, name, revision, &best);
313 have_rev = match_revfn(af, name, revision, &best);
314 mutex_unlock(&xt[af].mutex);
316 /* Nothing at all? Return 0 to try loading module. */
324 *err = -EPROTONOSUPPORT;
327 EXPORT_SYMBOL_GPL(xt_find_revision);
330 textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
332 static const char *const inetbr_names[] = {
333 "PREROUTING", "INPUT", "FORWARD",
334 "OUTPUT", "POSTROUTING", "BROUTING",
336 static const char *const arp_names[] = {
337 "INPUT", "FORWARD", "OUTPUT",
339 const char *const *names;
345 names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
346 max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
347 ARRAY_SIZE(inetbr_names);
349 for (i = 0; i < max; ++i) {
350 if (!(mask & (1 << i)))
352 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
363 int xt_check_match(struct xt_mtchk_param *par,
364 unsigned int size, u_int8_t proto, bool inv_proto)
368 if (XT_ALIGN(par->match->matchsize) != size &&
369 par->match->matchsize != -1) {
371 * ebt_among is exempt from centralized matchsize checking
372 * because it uses a dynamic-size data set.
374 pr_err("%s_tables: %s.%u match: invalid size "
375 "%u (kernel) != (user) %u\n",
376 xt_prefix[par->family], par->match->name,
377 par->match->revision,
378 XT_ALIGN(par->match->matchsize), size);
381 if (par->match->table != NULL &&
382 strcmp(par->match->table, par->table) != 0) {
383 pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
384 xt_prefix[par->family], par->match->name,
385 par->match->table, par->table);
388 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
389 char used[64], allow[64];
391 pr_err("%s_tables: %s match: used from hooks %s, but only "
393 xt_prefix[par->family], par->match->name,
394 textify_hooks(used, sizeof(used), par->hook_mask,
396 textify_hooks(allow, sizeof(allow), par->match->hooks,
400 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
401 pr_err("%s_tables: %s match: only valid for protocol %u\n",
402 xt_prefix[par->family], par->match->name,
406 if (par->match->checkentry != NULL) {
407 ret = par->match->checkentry(par);
411 /* Flag up potential errors. */
416 EXPORT_SYMBOL_GPL(xt_check_match);
419 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
421 struct xt_af *xp = &xt[af];
423 if (!xp->compat_tab) {
426 xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
432 if (xp->cur >= xp->number)
436 delta += xp->compat_tab[xp->cur - 1].delta;
437 xp->compat_tab[xp->cur].offset = offset;
438 xp->compat_tab[xp->cur].delta = delta;
442 EXPORT_SYMBOL_GPL(xt_compat_add_offset);
444 void xt_compat_flush_offsets(u_int8_t af)
446 if (xt[af].compat_tab) {
447 vfree(xt[af].compat_tab);
448 xt[af].compat_tab = NULL;
453 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
455 int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
457 struct compat_delta *tmp = xt[af].compat_tab;
458 int mid, left = 0, right = xt[af].cur - 1;
460 while (left <= right) {
461 mid = (left + right) >> 1;
462 if (offset > tmp[mid].offset)
464 else if (offset < tmp[mid].offset)
467 return mid ? tmp[mid - 1].delta : 0;
469 return left ? tmp[left - 1].delta : 0;
471 EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
473 void xt_compat_init_offsets(u_int8_t af, unsigned int number)
475 xt[af].number = number;
478 EXPORT_SYMBOL(xt_compat_init_offsets);
480 int xt_compat_match_offset(const struct xt_match *match)
482 u_int16_t csize = match->compatsize ? : match->matchsize;
483 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
485 EXPORT_SYMBOL_GPL(xt_compat_match_offset);
487 int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
490 const struct xt_match *match = m->u.kernel.match;
491 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
492 int pad, off = xt_compat_match_offset(match);
493 u_int16_t msize = cm->u.user.match_size;
496 memcpy(m, cm, sizeof(*cm));
497 if (match->compat_from_user)
498 match->compat_from_user(m->data, cm->data);
500 memcpy(m->data, cm->data, msize - sizeof(*cm));
501 pad = XT_ALIGN(match->matchsize) - match->matchsize;
503 memset(m->data + match->matchsize, 0, pad);
506 m->u.user.match_size = msize;
512 EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
514 int xt_compat_match_to_user(const struct xt_entry_match *m,
515 void __user **dstptr, unsigned int *size)
517 const struct xt_match *match = m->u.kernel.match;
518 struct compat_xt_entry_match __user *cm = *dstptr;
519 int off = xt_compat_match_offset(match);
520 u_int16_t msize = m->u.user.match_size - off;
522 if (copy_to_user(cm, m, sizeof(*cm)) ||
523 put_user(msize, &cm->u.user.match_size) ||
524 copy_to_user(cm->u.user.name, m->u.kernel.match->name,
525 strlen(m->u.kernel.match->name) + 1))
528 if (match->compat_to_user) {
529 if (match->compat_to_user((void __user *)cm->data, m->data))
532 if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
540 EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
541 #endif /* CONFIG_COMPAT */
544 * xt_check_entry_offsets - validate arp/ip/ip6t_entry
546 * @base: pointer to arp/ip/ip6t_entry
547 * @target_offset: the arp/ip/ip6_t->target_offset
548 * @next_offset: the arp/ip/ip6_t->next_offset
550 * validates that target_offset and next_offset are sane.
552 * The arp/ip/ip6t_entry structure @base must have passed following tests:
553 * - it must point to a valid memory location
554 * - base to base + next_offset must be accessible, i.e. not exceed allocated
557 * Return: 0 on success, negative errno on failure.
559 int xt_check_entry_offsets(const void *base,
560 unsigned int target_offset,
561 unsigned int next_offset)
563 const struct xt_entry_target *t;
564 const char *e = base;
566 if (target_offset + sizeof(*t) > next_offset)
569 t = (void *)(e + target_offset);
570 if (target_offset + t->u.target_size > next_offset)
575 EXPORT_SYMBOL(xt_check_entry_offsets);
577 int xt_check_target(struct xt_tgchk_param *par,
578 unsigned int size, u_int8_t proto, bool inv_proto)
582 if (XT_ALIGN(par->target->targetsize) != size) {
583 pr_err("%s_tables: %s.%u target: invalid size "
584 "%u (kernel) != (user) %u\n",
585 xt_prefix[par->family], par->target->name,
586 par->target->revision,
587 XT_ALIGN(par->target->targetsize), size);
590 if (par->target->table != NULL &&
591 strcmp(par->target->table, par->table) != 0) {
592 pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
593 xt_prefix[par->family], par->target->name,
594 par->target->table, par->table);
597 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
598 char used[64], allow[64];
600 pr_err("%s_tables: %s target: used from hooks %s, but only "
602 xt_prefix[par->family], par->target->name,
603 textify_hooks(used, sizeof(used), par->hook_mask,
605 textify_hooks(allow, sizeof(allow), par->target->hooks,
609 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
610 pr_err("%s_tables: %s target: only valid for protocol %u\n",
611 xt_prefix[par->family], par->target->name,
615 if (par->target->checkentry != NULL) {
616 ret = par->target->checkentry(par);
620 /* Flag up potential errors. */
625 EXPORT_SYMBOL_GPL(xt_check_target);
628 int xt_compat_target_offset(const struct xt_target *target)
630 u_int16_t csize = target->compatsize ? : target->targetsize;
631 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
633 EXPORT_SYMBOL_GPL(xt_compat_target_offset);
635 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
638 const struct xt_target *target = t->u.kernel.target;
639 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
640 int pad, off = xt_compat_target_offset(target);
641 u_int16_t tsize = ct->u.user.target_size;
644 memcpy(t, ct, sizeof(*ct));
645 if (target->compat_from_user)
646 target->compat_from_user(t->data, ct->data);
648 memcpy(t->data, ct->data, tsize - sizeof(*ct));
649 pad = XT_ALIGN(target->targetsize) - target->targetsize;
651 memset(t->data + target->targetsize, 0, pad);
654 t->u.user.target_size = tsize;
659 EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
661 int xt_compat_target_to_user(const struct xt_entry_target *t,
662 void __user **dstptr, unsigned int *size)
664 const struct xt_target *target = t->u.kernel.target;
665 struct compat_xt_entry_target __user *ct = *dstptr;
666 int off = xt_compat_target_offset(target);
667 u_int16_t tsize = t->u.user.target_size - off;
669 if (copy_to_user(ct, t, sizeof(*ct)) ||
670 put_user(tsize, &ct->u.user.target_size) ||
671 copy_to_user(ct->u.user.name, t->u.kernel.target->name,
672 strlen(t->u.kernel.target->name) + 1))
675 if (target->compat_to_user) {
676 if (target->compat_to_user((void __user *)ct->data, t->data))
679 if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
687 EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
690 struct xt_table_info *xt_alloc_table_info(unsigned int size)
692 struct xt_table_info *info = NULL;
693 size_t sz = sizeof(*info) + size;
695 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
696 if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
699 if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
700 info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
706 memset(info, 0, sizeof(*info));
710 EXPORT_SYMBOL(xt_alloc_table_info);
712 void xt_free_table_info(struct xt_table_info *info)
716 if (info->jumpstack != NULL) {
717 for_each_possible_cpu(cpu)
718 kvfree(info->jumpstack[cpu]);
719 kvfree(info->jumpstack);
724 EXPORT_SYMBOL(xt_free_table_info);
726 /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
727 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
732 mutex_lock(&xt[af].mutex);
733 list_for_each_entry(t, &net->xt.tables[af], list)
734 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
736 mutex_unlock(&xt[af].mutex);
739 EXPORT_SYMBOL_GPL(xt_find_table_lock);
741 void xt_table_unlock(struct xt_table *table)
743 mutex_unlock(&xt[table->af].mutex);
745 EXPORT_SYMBOL_GPL(xt_table_unlock);
748 void xt_compat_lock(u_int8_t af)
750 mutex_lock(&xt[af].compat_mutex);
752 EXPORT_SYMBOL_GPL(xt_compat_lock);
754 void xt_compat_unlock(u_int8_t af)
756 mutex_unlock(&xt[af].compat_mutex);
758 EXPORT_SYMBOL_GPL(xt_compat_unlock);
761 DEFINE_PER_CPU(seqcount_t, xt_recseq);
762 EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
764 struct static_key xt_tee_enabled __read_mostly;
765 EXPORT_SYMBOL_GPL(xt_tee_enabled);
767 static int xt_jumpstack_alloc(struct xt_table_info *i)
772 size = sizeof(void **) * nr_cpu_ids;
773 if (size > PAGE_SIZE)
774 i->jumpstack = vzalloc(size);
776 i->jumpstack = kzalloc(size, GFP_KERNEL);
777 if (i->jumpstack == NULL)
780 /* ruleset without jumps -- no stack needed */
781 if (i->stacksize == 0)
784 /* Jumpstack needs to be able to record two full callchains, one
785 * from the first rule set traversal, plus one table reentrancy
786 * via -j TEE without clobbering the callchain that brought us to
789 * This is done by allocating two jumpstacks per cpu, on reentry
790 * the upper half of the stack is used.
792 * see the jumpstack setup in ipt_do_table() for more details.
794 size = sizeof(void *) * i->stacksize * 2u;
795 for_each_possible_cpu(cpu) {
796 if (size > PAGE_SIZE)
797 i->jumpstack[cpu] = vmalloc_node(size,
800 i->jumpstack[cpu] = kmalloc_node(size,
801 GFP_KERNEL, cpu_to_node(cpu));
802 if (i->jumpstack[cpu] == NULL)
804 * Freeing will be done later on by the callers. The
805 * chain is: xt_replace_table -> __do_replace ->
806 * do_replace -> xt_free_table_info.
814 struct xt_table_info *
815 xt_replace_table(struct xt_table *table,
816 unsigned int num_counters,
817 struct xt_table_info *newinfo,
820 struct xt_table_info *private;
823 ret = xt_jumpstack_alloc(newinfo);
829 /* Do the substitution. */
831 private = table->private;
833 /* Check inside lock: is the old number correct? */
834 if (num_counters != private->number) {
835 pr_debug("num_counters != table->private->number (%u/%u)\n",
836 num_counters, private->number);
842 newinfo->initial_entries = private->initial_entries;
844 * Ensure contents of newinfo are visible before assigning to
848 table->private = newinfo;
851 * Even though table entries have now been swapped, other CPU's
852 * may still be using the old entries. This is okay, because
853 * resynchronization happens because of the locking done
854 * during the get_counters() routine.
860 struct audit_buffer *ab;
862 ab = audit_log_start(current->audit_context, GFP_KERNEL,
863 AUDIT_NETFILTER_CFG);
865 audit_log_format(ab, "table=%s family=%u entries=%u",
866 table->name, table->af,
875 EXPORT_SYMBOL_GPL(xt_replace_table);
877 struct xt_table *xt_register_table(struct net *net,
878 const struct xt_table *input_table,
879 struct xt_table_info *bootstrap,
880 struct xt_table_info *newinfo)
883 struct xt_table_info *private;
884 struct xt_table *t, *table;
886 /* Don't add one object to multiple lists. */
887 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
893 mutex_lock(&xt[table->af].mutex);
894 /* Don't autoload: we'd eat our tail... */
895 list_for_each_entry(t, &net->xt.tables[table->af], list) {
896 if (strcmp(t->name, table->name) == 0) {
902 /* Simplifies replace_table code. */
903 table->private = bootstrap;
905 if (!xt_replace_table(table, 0, newinfo, &ret))
908 private = table->private;
909 pr_debug("table->private->number = %u\n", private->number);
911 /* save number of initial entries */
912 private->initial_entries = private->number;
914 list_add(&table->list, &net->xt.tables[table->af]);
915 mutex_unlock(&xt[table->af].mutex);
919 mutex_unlock(&xt[table->af].mutex);
924 EXPORT_SYMBOL_GPL(xt_register_table);
926 void *xt_unregister_table(struct xt_table *table)
928 struct xt_table_info *private;
930 mutex_lock(&xt[table->af].mutex);
931 private = table->private;
932 list_del(&table->list);
933 mutex_unlock(&xt[table->af].mutex);
938 EXPORT_SYMBOL_GPL(xt_unregister_table);
940 #ifdef CONFIG_PROC_FS
941 struct xt_names_priv {
942 struct seq_net_private p;
945 static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
947 struct xt_names_priv *priv = seq->private;
948 struct net *net = seq_file_net(seq);
949 u_int8_t af = priv->af;
951 mutex_lock(&xt[af].mutex);
952 return seq_list_start(&net->xt.tables[af], *pos);
955 static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
957 struct xt_names_priv *priv = seq->private;
958 struct net *net = seq_file_net(seq);
959 u_int8_t af = priv->af;
961 return seq_list_next(v, &net->xt.tables[af], pos);
964 static void xt_table_seq_stop(struct seq_file *seq, void *v)
966 struct xt_names_priv *priv = seq->private;
967 u_int8_t af = priv->af;
969 mutex_unlock(&xt[af].mutex);
972 static int xt_table_seq_show(struct seq_file *seq, void *v)
974 struct xt_table *table = list_entry(v, struct xt_table, list);
977 seq_printf(seq, "%s\n", table->name);
981 static const struct seq_operations xt_table_seq_ops = {
982 .start = xt_table_seq_start,
983 .next = xt_table_seq_next,
984 .stop = xt_table_seq_stop,
985 .show = xt_table_seq_show,
988 static int xt_table_open(struct inode *inode, struct file *file)
991 struct xt_names_priv *priv;
993 ret = seq_open_net(inode, file, &xt_table_seq_ops,
994 sizeof(struct xt_names_priv));
996 priv = ((struct seq_file *)file->private_data)->private;
997 priv->af = (unsigned long)PDE_DATA(inode);
1002 static const struct file_operations xt_table_ops = {
1003 .owner = THIS_MODULE,
1004 .open = xt_table_open,
1006 .llseek = seq_lseek,
1007 .release = seq_release_net,
1011 * Traverse state for ip{,6}_{tables,matches} for helping crossing
1012 * the multi-AF mutexes.
1014 struct nf_mttg_trav {
1015 struct list_head *head, *curr;
1016 uint8_t class, nfproto;
1021 MTTG_TRAV_NFP_UNSPEC,
1026 static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1029 static const uint8_t next_class[] = {
1030 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1031 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
1033 struct nf_mttg_trav *trav = seq->private;
1035 switch (trav->class) {
1036 case MTTG_TRAV_INIT:
1037 trav->class = MTTG_TRAV_NFP_UNSPEC;
1038 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1039 trav->head = trav->curr = is_target ?
1040 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1042 case MTTG_TRAV_NFP_UNSPEC:
1043 trav->curr = trav->curr->next;
1044 if (trav->curr != trav->head)
1046 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1047 mutex_lock(&xt[trav->nfproto].mutex);
1048 trav->head = trav->curr = is_target ?
1049 &xt[trav->nfproto].target : &xt[trav->nfproto].match;
1050 trav->class = next_class[trav->class];
1052 case MTTG_TRAV_NFP_SPEC:
1053 trav->curr = trav->curr->next;
1054 if (trav->curr != trav->head)
1056 /* fallthru, _stop will unlock */
1066 static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1069 struct nf_mttg_trav *trav = seq->private;
1072 trav->class = MTTG_TRAV_INIT;
1073 for (j = 0; j < *pos; ++j)
1074 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1079 static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1081 struct nf_mttg_trav *trav = seq->private;
1083 switch (trav->class) {
1084 case MTTG_TRAV_NFP_UNSPEC:
1085 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1087 case MTTG_TRAV_NFP_SPEC:
1088 mutex_unlock(&xt[trav->nfproto].mutex);
1093 static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1095 return xt_mttg_seq_start(seq, pos, false);
1098 static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1100 return xt_mttg_seq_next(seq, v, ppos, false);
1103 static int xt_match_seq_show(struct seq_file *seq, void *v)
1105 const struct nf_mttg_trav *trav = seq->private;
1106 const struct xt_match *match;
1108 switch (trav->class) {
1109 case MTTG_TRAV_NFP_UNSPEC:
1110 case MTTG_TRAV_NFP_SPEC:
1111 if (trav->curr == trav->head)
1113 match = list_entry(trav->curr, struct xt_match, list);
1115 seq_printf(seq, "%s\n", match->name);
1120 static const struct seq_operations xt_match_seq_ops = {
1121 .start = xt_match_seq_start,
1122 .next = xt_match_seq_next,
1123 .stop = xt_mttg_seq_stop,
1124 .show = xt_match_seq_show,
1127 static int xt_match_open(struct inode *inode, struct file *file)
1129 struct nf_mttg_trav *trav;
1130 trav = __seq_open_private(file, &xt_match_seq_ops, sizeof(*trav));
1134 trav->nfproto = (unsigned long)PDE_DATA(inode);
1138 static const struct file_operations xt_match_ops = {
1139 .owner = THIS_MODULE,
1140 .open = xt_match_open,
1142 .llseek = seq_lseek,
1143 .release = seq_release_private,
1146 static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1148 return xt_mttg_seq_start(seq, pos, true);
1151 static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1153 return xt_mttg_seq_next(seq, v, ppos, true);
1156 static int xt_target_seq_show(struct seq_file *seq, void *v)
1158 const struct nf_mttg_trav *trav = seq->private;
1159 const struct xt_target *target;
1161 switch (trav->class) {
1162 case MTTG_TRAV_NFP_UNSPEC:
1163 case MTTG_TRAV_NFP_SPEC:
1164 if (trav->curr == trav->head)
1166 target = list_entry(trav->curr, struct xt_target, list);
1168 seq_printf(seq, "%s\n", target->name);
1173 static const struct seq_operations xt_target_seq_ops = {
1174 .start = xt_target_seq_start,
1175 .next = xt_target_seq_next,
1176 .stop = xt_mttg_seq_stop,
1177 .show = xt_target_seq_show,
1180 static int xt_target_open(struct inode *inode, struct file *file)
1182 struct nf_mttg_trav *trav;
1183 trav = __seq_open_private(file, &xt_target_seq_ops, sizeof(*trav));
1187 trav->nfproto = (unsigned long)PDE_DATA(inode);
1191 static const struct file_operations xt_target_ops = {
1192 .owner = THIS_MODULE,
1193 .open = xt_target_open,
1195 .llseek = seq_lseek,
1196 .release = seq_release_private,
1199 #define FORMAT_TABLES "_tables_names"
1200 #define FORMAT_MATCHES "_tables_matches"
1201 #define FORMAT_TARGETS "_tables_targets"
1203 #endif /* CONFIG_PROC_FS */
1206 * xt_hook_link - set up hooks for a new table
1207 * @table: table with metadata needed to set up hooks
1208 * @fn: Hook function
1210 * This function will take care of creating and registering the necessary
1211 * Netfilter hooks for XT tables.
1213 struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn)
1215 unsigned int hook_mask = table->valid_hooks;
1216 uint8_t i, num_hooks = hweight32(hook_mask);
1218 struct nf_hook_ops *ops;
1221 ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
1223 return ERR_PTR(-ENOMEM);
1225 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1226 hook_mask >>= 1, ++hooknum) {
1227 if (!(hook_mask & 1))
1230 ops[i].pf = table->af;
1231 ops[i].hooknum = hooknum;
1232 ops[i].priority = table->priority;
1236 ret = nf_register_hooks(ops, num_hooks);
1239 return ERR_PTR(ret);
1244 EXPORT_SYMBOL_GPL(xt_hook_link);
1247 * xt_hook_unlink - remove hooks for a table
1248 * @ops: nf_hook_ops array as returned by nf_hook_link
1249 * @hook_mask: the very same mask that was passed to nf_hook_link
1251 void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops)
1253 nf_unregister_hooks(ops, hweight32(table->valid_hooks));
1256 EXPORT_SYMBOL_GPL(xt_hook_unlink);
1258 int xt_proto_init(struct net *net, u_int8_t af)
1260 #ifdef CONFIG_PROC_FS
1261 char buf[XT_FUNCTION_MAXNAMELEN];
1262 struct proc_dir_entry *proc;
1265 if (af >= ARRAY_SIZE(xt_prefix))
1269 #ifdef CONFIG_PROC_FS
1270 strlcpy(buf, xt_prefix[af], sizeof(buf));
1271 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1272 proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
1273 (void *)(unsigned long)af);
1277 strlcpy(buf, xt_prefix[af], sizeof(buf));
1278 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1279 proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
1280 (void *)(unsigned long)af);
1282 goto out_remove_tables;
1284 strlcpy(buf, xt_prefix[af], sizeof(buf));
1285 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1286 proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
1287 (void *)(unsigned long)af);
1289 goto out_remove_matches;
1294 #ifdef CONFIG_PROC_FS
1296 strlcpy(buf, xt_prefix[af], sizeof(buf));
1297 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1298 remove_proc_entry(buf, net->proc_net);
1301 strlcpy(buf, xt_prefix[af], sizeof(buf));
1302 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1303 remove_proc_entry(buf, net->proc_net);
1308 EXPORT_SYMBOL_GPL(xt_proto_init);
1310 void xt_proto_fini(struct net *net, u_int8_t af)
1312 #ifdef CONFIG_PROC_FS
1313 char buf[XT_FUNCTION_MAXNAMELEN];
1315 strlcpy(buf, xt_prefix[af], sizeof(buf));
1316 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1317 remove_proc_entry(buf, net->proc_net);
1319 strlcpy(buf, xt_prefix[af], sizeof(buf));
1320 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1321 remove_proc_entry(buf, net->proc_net);
1323 strlcpy(buf, xt_prefix[af], sizeof(buf));
1324 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1325 remove_proc_entry(buf, net->proc_net);
1326 #endif /*CONFIG_PROC_FS*/
1328 EXPORT_SYMBOL_GPL(xt_proto_fini);
1330 static int __net_init xt_net_init(struct net *net)
1334 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1335 INIT_LIST_HEAD(&net->xt.tables[i]);
1339 static struct pernet_operations xt_net_ops = {
1340 .init = xt_net_init,
1343 static int __init xt_init(void)
1348 for_each_possible_cpu(i) {
1349 seqcount_init(&per_cpu(xt_recseq, i));
1352 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
1356 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1357 mutex_init(&xt[i].mutex);
1358 #ifdef CONFIG_COMPAT
1359 mutex_init(&xt[i].compat_mutex);
1360 xt[i].compat_tab = NULL;
1362 INIT_LIST_HEAD(&xt[i].target);
1363 INIT_LIST_HEAD(&xt[i].match);
1365 rv = register_pernet_subsys(&xt_net_ops);
1371 static void __exit xt_fini(void)
1373 unregister_pernet_subsys(&xt_net_ops);
1377 module_init(xt_init);
1378 module_exit(xt_fini);