2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
81 ip6t_ext_hdr(u8 nexthdr)
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
95 ip6_packet_match(const struct sk_buff *skb,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
103 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
105 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
107 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
108 &ip6info->src), IP6T_INV_SRCIP)
109 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
110 &ip6info->dst), IP6T_INV_DSTIP)) {
111 dprintf("Source or dest mismatch.\n");
113 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
114 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
115 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
116 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
117 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
118 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
122 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
124 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
125 dprintf("VIA in mismatch (%s vs %s).%s\n",
126 indev, ip6info->iniface,
127 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
131 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
133 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
134 dprintf("VIA out mismatch (%s vs %s).%s\n",
135 outdev, ip6info->outiface,
136 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
140 /* ... might want to do something with class and flowlabel here ... */
142 /* look for the desired protocol header */
143 if((ip6info->flags & IP6T_F_PROTO)) {
145 unsigned short _frag_off;
147 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
153 *fragoff = _frag_off;
155 dprintf("Packet protocol %hi ?= %s%hi.\n",
157 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
160 if (ip6info->proto == protohdr) {
161 if(ip6info->invflags & IP6T_INV_PROTO) {
167 /* We need match for the '-p all', too! */
168 if ((ip6info->proto != 0) &&
169 !(ip6info->invflags & IP6T_INV_PROTO))
175 /* should be ip6 safe */
177 ip6_checkentry(const struct ip6t_ip6 *ipv6)
179 if (ipv6->flags & ~IP6T_F_MASK) {
180 duprintf("Unknown flag bits set: %08X\n",
181 ipv6->flags & ~IP6T_F_MASK);
184 if (ipv6->invflags & ~IP6T_INV_MASK) {
185 duprintf("Unknown invflag bits set: %08X\n",
186 ipv6->invflags & ~IP6T_INV_MASK);
193 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
196 printk("ip6_tables: error: `%s'\n",
197 (const char *)par->targinfo);
202 /* Performance critical - called for every packet */
204 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
205 struct xt_match_param *par)
207 par->match = m->u.kernel.match;
208 par->matchinfo = m->data;
210 /* Stop iteration if it doesn't match */
211 if (!m->u.kernel.match->match(skb, par))
217 static inline struct ip6t_entry *
218 get_entry(void *base, unsigned int offset)
220 return (struct ip6t_entry *)(base + offset);
223 /* All zeroes == unconditional rule. */
224 /* Mildly perf critical (only if packet tracing is on) */
225 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
227 static const struct ip6t_ip6 uncond;
229 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
232 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
233 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
234 /* This cries for unification! */
235 static const char *const hooknames[] = {
236 [NF_INET_PRE_ROUTING] = "PREROUTING",
237 [NF_INET_LOCAL_IN] = "INPUT",
238 [NF_INET_FORWARD] = "FORWARD",
239 [NF_INET_LOCAL_OUT] = "OUTPUT",
240 [NF_INET_POST_ROUTING] = "POSTROUTING",
243 enum nf_ip_trace_comments {
244 NF_IP6_TRACE_COMMENT_RULE,
245 NF_IP6_TRACE_COMMENT_RETURN,
246 NF_IP6_TRACE_COMMENT_POLICY,
249 static const char *const comments[] = {
250 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
251 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
252 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
255 static struct nf_loginfo trace_loginfo = {
256 .type = NF_LOG_TYPE_LOG,
260 .logflags = NF_LOG_MASK,
265 /* Mildly perf critical (only if packet tracing is on) */
267 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
268 const char *hookname, const char **chainname,
269 const char **comment, unsigned int *rulenum)
271 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
273 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
274 /* Head of user chain: ERROR target with chainname */
275 *chainname = t->target.data;
280 if (s->target_offset == sizeof(struct ip6t_entry)
281 && strcmp(t->target.u.kernel.target->name,
282 IP6T_STANDARD_TARGET) == 0
284 && unconditional(&s->ipv6)) {
285 /* Tail of chains: STANDARD target (return/policy) */
286 *comment = *chainname == hookname
287 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
288 : comments[NF_IP6_TRACE_COMMENT_RETURN];
297 static void trace_packet(struct sk_buff *skb,
299 const struct net_device *in,
300 const struct net_device *out,
301 const char *tablename,
302 struct xt_table_info *private,
303 struct ip6t_entry *e)
306 const struct ip6t_entry *root;
307 const char *hookname, *chainname, *comment;
308 unsigned int rulenum = 0;
310 table_base = private->entries[smp_processor_id()];
311 root = get_entry(table_base, private->hook_entry[hook]);
313 hookname = chainname = hooknames[hook];
314 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
316 IP6T_ENTRY_ITERATE(root,
317 private->size - private->hook_entry[hook],
318 get_chainname_rulenum,
319 e, hookname, &chainname, &comment, &rulenum);
321 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
322 "TRACE: %s:%s:%s:%u ",
323 tablename, chainname, comment, rulenum);
327 static inline __pure struct ip6t_entry *
328 ip6t_next_entry(const struct ip6t_entry *entry)
330 return (void *)entry + entry->next_offset;
333 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
335 ip6t_do_table(struct sk_buff *skb,
337 const struct net_device *in,
338 const struct net_device *out,
339 struct xt_table *table)
341 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
343 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
344 bool hotdrop = false;
345 /* Initializing verdict to NF_DROP keeps gcc happy. */
346 unsigned int verdict = NF_DROP;
347 const char *indev, *outdev;
349 struct ip6t_entry *e, *back;
350 struct xt_table_info *private;
351 struct xt_match_param mtpar;
352 struct xt_target_param tgpar;
355 indev = in ? in->name : nulldevname;
356 outdev = out ? out->name : nulldevname;
357 /* We handle fragments by dealing with the first fragment as
358 * if it was a normal packet. All other fragments are treated
359 * normally, except that they will NEVER match rules that ask
360 * things we don't know, ie. tcp syn flag or ports). If the
361 * rule is also a fragment-specific rule, non-fragments won't
363 mtpar.hotdrop = &hotdrop;
364 mtpar.in = tgpar.in = in;
365 mtpar.out = tgpar.out = out;
366 mtpar.family = tgpar.family = NFPROTO_IPV6;
367 mtpar.hooknum = tgpar.hooknum = hook;
369 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
372 private = table->private;
373 table_base = private->entries[smp_processor_id()];
375 e = get_entry(table_base, private->hook_entry[hook]);
377 /* For return from builtin chain */
378 back = get_entry(table_base, private->underflow[hook]);
381 struct ip6t_entry_target *t;
385 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
386 &mtpar.thoff, &mtpar.fragoff, &hotdrop) ||
387 IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) {
388 e = ip6t_next_entry(e);
392 ADD_COUNTER(e->counters,
393 ntohs(ipv6_hdr(skb)->payload_len) +
394 sizeof(struct ipv6hdr), 1);
396 t = ip6t_get_target(e);
397 IP_NF_ASSERT(t->u.kernel.target);
399 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
400 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
401 /* The packet is traced: log it */
402 if (unlikely(skb->nf_trace))
403 trace_packet(skb, hook, in, out,
404 table->name, private, e);
406 /* Standard target? */
407 if (!t->u.kernel.target->target) {
410 v = ((struct ip6t_standard_target *)t)->verdict;
412 /* Pop from stack? */
413 if (v != IP6T_RETURN) {
414 verdict = (unsigned)(-v) - 1;
418 back = get_entry(table_base, back->comefrom);
421 if (table_base + v != ip6t_next_entry(e)
422 && !(e->ipv6.flags & IP6T_F_GOTO)) {
423 /* Save old back ptr in next entry */
424 struct ip6t_entry *next = ip6t_next_entry(e);
425 next->comefrom = (void *)back - table_base;
426 /* set back pointer to next entry */
430 e = get_entry(table_base, v);
434 /* Targets which reenter must return
436 tgpar.target = t->u.kernel.target;
437 tgpar.targinfo = t->data;
439 #ifdef CONFIG_NETFILTER_DEBUG
440 tb_comefrom = 0xeeeeeeec;
442 verdict = t->u.kernel.target->target(skb, &tgpar);
444 #ifdef CONFIG_NETFILTER_DEBUG
445 if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) {
446 printk("Target %s reentered!\n",
447 t->u.kernel.target->name);
450 tb_comefrom = 0x57acc001;
452 if (verdict == IP6T_CONTINUE)
453 e = ip6t_next_entry(e);
459 #ifdef CONFIG_NETFILTER_DEBUG
460 tb_comefrom = NETFILTER_LINK_POISON;
462 xt_info_rdunlock_bh();
464 #ifdef DEBUG_ALLOW_ALL
475 /* Figures out from what hook each rule can be called: returns 0 if
476 there are loops. Puts hook bitmask in comefrom. */
478 mark_source_chains(struct xt_table_info *newinfo,
479 unsigned int valid_hooks, void *entry0)
483 /* No recursion; use packet counter to save back ptrs (reset
484 to 0 as we leave), and comefrom to save source hook bitmask */
485 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
486 unsigned int pos = newinfo->hook_entry[hook];
487 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
489 if (!(valid_hooks & (1 << hook)))
492 /* Set initial back pointer. */
493 e->counters.pcnt = pos;
496 struct ip6t_standard_target *t
497 = (void *)ip6t_get_target(e);
498 int visited = e->comefrom & (1 << hook);
500 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
501 printk("iptables: loop hook %u pos %u %08X.\n",
502 hook, pos, e->comefrom);
505 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
507 /* Unconditional return/END. */
508 if ((e->target_offset == sizeof(struct ip6t_entry)
509 && (strcmp(t->target.u.user.name,
510 IP6T_STANDARD_TARGET) == 0)
512 && unconditional(&e->ipv6)) || visited) {
513 unsigned int oldpos, size;
515 if ((strcmp(t->target.u.user.name,
516 IP6T_STANDARD_TARGET) == 0) &&
517 t->verdict < -NF_MAX_VERDICT - 1) {
518 duprintf("mark_source_chains: bad "
519 "negative verdict (%i)\n",
524 /* Return: backtrack through the last
527 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
528 #ifdef DEBUG_IP_FIREWALL_USER
530 & (1 << NF_INET_NUMHOOKS)) {
531 duprintf("Back unset "
538 pos = e->counters.pcnt;
539 e->counters.pcnt = 0;
541 /* We're at the start. */
545 e = (struct ip6t_entry *)
547 } while (oldpos == pos + e->next_offset);
550 size = e->next_offset;
551 e = (struct ip6t_entry *)
552 (entry0 + pos + size);
553 e->counters.pcnt = pos;
556 int newpos = t->verdict;
558 if (strcmp(t->target.u.user.name,
559 IP6T_STANDARD_TARGET) == 0
561 if (newpos > newinfo->size -
562 sizeof(struct ip6t_entry)) {
563 duprintf("mark_source_chains: "
564 "bad verdict (%i)\n",
568 /* This a jump; chase it. */
569 duprintf("Jump rule %u -> %u\n",
572 /* ... this is a fallthru */
573 newpos = pos + e->next_offset;
575 e = (struct ip6t_entry *)
577 e->counters.pcnt = pos;
582 duprintf("Finished chain %u\n", hook);
588 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
590 struct xt_mtdtor_param par;
592 if (i && (*i)-- == 0)
595 par.match = m->u.kernel.match;
596 par.matchinfo = m->data;
597 par.family = NFPROTO_IPV6;
598 if (par.match->destroy != NULL)
599 par.match->destroy(&par);
600 module_put(par.match->me);
605 check_entry(struct ip6t_entry *e, const char *name)
607 struct ip6t_entry_target *t;
609 if (!ip6_checkentry(&e->ipv6)) {
610 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
614 if (e->target_offset + sizeof(struct ip6t_entry_target) >
618 t = ip6t_get_target(e);
619 if (e->target_offset + t->u.target_size > e->next_offset)
625 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
628 const struct ip6t_ip6 *ipv6 = par->entryinfo;
631 par->match = m->u.kernel.match;
632 par->matchinfo = m->data;
634 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
635 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
637 duprintf("ip_tables: check failed for `%s'.\n",
646 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
649 struct xt_match *match;
652 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
654 "ip6t_%s", m->u.user.name);
655 if (IS_ERR(match) || !match) {
656 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
657 return match ? PTR_ERR(match) : -ENOENT;
659 m->u.kernel.match = match;
661 ret = check_match(m, par, i);
667 module_put(m->u.kernel.match->me);
671 static int check_target(struct ip6t_entry *e, const char *name)
673 struct ip6t_entry_target *t = ip6t_get_target(e);
674 struct xt_tgchk_param par = {
677 .target = t->u.kernel.target,
679 .hook_mask = e->comefrom,
680 .family = NFPROTO_IPV6,
684 t = ip6t_get_target(e);
685 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
686 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
688 duprintf("ip_tables: check failed for `%s'.\n",
689 t->u.kernel.target->name);
696 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
699 struct ip6t_entry_target *t;
700 struct xt_target *target;
703 struct xt_mtchk_param mtpar;
705 ret = check_entry(e, name);
711 mtpar.entryinfo = &e->ipv6;
712 mtpar.hook_mask = e->comefrom;
713 mtpar.family = NFPROTO_IPV6;
714 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
716 goto cleanup_matches;
718 t = ip6t_get_target(e);
719 target = try_then_request_module(xt_find_target(AF_INET6,
722 "ip6t_%s", t->u.user.name);
723 if (IS_ERR(target) || !target) {
724 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
725 ret = target ? PTR_ERR(target) : -ENOENT;
726 goto cleanup_matches;
728 t->u.kernel.target = target;
730 ret = check_target(e, name);
737 module_put(t->u.kernel.target->me);
739 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
743 static bool check_underflow(struct ip6t_entry *e)
745 const struct ip6t_entry_target *t;
746 unsigned int verdict;
748 if (!unconditional(&e->ipv6))
750 t = ip6t_get_target(e);
751 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
753 verdict = ((struct ip6t_standard_target *)t)->verdict;
754 verdict = -verdict - 1;
755 return verdict == NF_DROP || verdict == NF_ACCEPT;
759 check_entry_size_and_hooks(struct ip6t_entry *e,
760 struct xt_table_info *newinfo,
762 unsigned char *limit,
763 const unsigned int *hook_entries,
764 const unsigned int *underflows,
765 unsigned int valid_hooks,
770 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
771 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
772 duprintf("Bad offset %p\n", e);
777 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
778 duprintf("checking: element %p size %u\n",
783 /* Check hooks & underflows */
784 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
785 if (!(valid_hooks & (1 << h)))
787 if ((unsigned char *)e - base == hook_entries[h])
788 newinfo->hook_entry[h] = hook_entries[h];
789 if ((unsigned char *)e - base == underflows[h]) {
790 if (!check_underflow(e)) {
791 pr_err("Underflows must be unconditional and "
792 "use the STANDARD target with "
796 newinfo->underflow[h] = underflows[h];
800 /* Clear counters and comefrom */
801 e->counters = ((struct xt_counters) { 0, 0 });
809 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
811 struct xt_tgdtor_param par;
812 struct ip6t_entry_target *t;
814 if (i && (*i)-- == 0)
817 /* Cleanup all matches */
818 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
819 t = ip6t_get_target(e);
821 par.target = t->u.kernel.target;
822 par.targinfo = t->data;
823 par.family = NFPROTO_IPV6;
824 if (par.target->destroy != NULL)
825 par.target->destroy(&par);
826 module_put(par.target->me);
830 /* Checks and translates the user-supplied table segment (held in
833 translate_table(const char *name,
834 unsigned int valid_hooks,
835 struct xt_table_info *newinfo,
839 const unsigned int *hook_entries,
840 const unsigned int *underflows)
845 newinfo->size = size;
846 newinfo->number = number;
848 /* Init all hooks to impossible value. */
849 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
850 newinfo->hook_entry[i] = 0xFFFFFFFF;
851 newinfo->underflow[i] = 0xFFFFFFFF;
854 duprintf("translate_table: size %u\n", newinfo->size);
856 /* Walk through entries, checking offsets. */
857 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
858 check_entry_size_and_hooks,
862 hook_entries, underflows, valid_hooks, &i);
867 duprintf("translate_table: %u not %u entries\n",
872 /* Check hooks all assigned */
873 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
874 /* Only hooks which are valid */
875 if (!(valid_hooks & (1 << i)))
877 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
878 duprintf("Invalid hook entry %u %u\n",
882 if (newinfo->underflow[i] == 0xFFFFFFFF) {
883 duprintf("Invalid underflow %u %u\n",
889 if (!mark_source_chains(newinfo, valid_hooks, entry0))
892 /* Finally, each sanity check must pass */
894 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
895 find_check_entry, name, size, &i);
898 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
903 /* And one copy for every other CPU */
904 for_each_possible_cpu(i) {
905 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
906 memcpy(newinfo->entries[i], entry0, newinfo->size);
914 add_entry_to_counter(const struct ip6t_entry *e,
915 struct xt_counters total[],
918 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
925 set_entry_to_counter(const struct ip6t_entry *e,
926 struct ip6t_counters total[],
929 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
936 get_counters(const struct xt_table_info *t,
937 struct xt_counters counters[])
943 /* Instead of clearing (by a previous call to memset())
944 * the counters and using adds, we set the counters
945 * with data used by 'current' CPU
947 * Bottom half has to be disabled to prevent deadlock
948 * if new softirq were to run and call ipt_do_table
951 curcpu = smp_processor_id();
954 IP6T_ENTRY_ITERATE(t->entries[curcpu],
956 set_entry_to_counter,
960 for_each_possible_cpu(cpu) {
965 IP6T_ENTRY_ITERATE(t->entries[cpu],
967 add_entry_to_counter,
970 xt_info_wrunlock(cpu);
975 static struct xt_counters *alloc_counters(struct xt_table *table)
977 unsigned int countersize;
978 struct xt_counters *counters;
979 struct xt_table_info *private = table->private;
981 /* We need atomic snapshot of counters: rest doesn't change
982 (other than comefrom, which userspace doesn't care
984 countersize = sizeof(struct xt_counters) * private->number;
985 counters = vmalloc_node(countersize, numa_node_id());
987 if (counters == NULL)
988 return ERR_PTR(-ENOMEM);
990 get_counters(private, counters);
996 copy_entries_to_user(unsigned int total_size,
997 struct xt_table *table,
998 void __user *userptr)
1000 unsigned int off, num;
1001 struct ip6t_entry *e;
1002 struct xt_counters *counters;
1003 const struct xt_table_info *private = table->private;
1005 const void *loc_cpu_entry;
1007 counters = alloc_counters(table);
1008 if (IS_ERR(counters))
1009 return PTR_ERR(counters);
1011 /* choose the copy that is on our node/cpu, ...
1012 * This choice is lazy (because current thread is
1013 * allowed to migrate to another cpu)
1015 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1016 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1021 /* FIXME: use iterator macros --RR */
1022 /* ... then go back and fix counters and names */
1023 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1025 const struct ip6t_entry_match *m;
1026 const struct ip6t_entry_target *t;
1028 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1029 if (copy_to_user(userptr + off
1030 + offsetof(struct ip6t_entry, counters),
1032 sizeof(counters[num])) != 0) {
1037 for (i = sizeof(struct ip6t_entry);
1038 i < e->target_offset;
1039 i += m->u.match_size) {
1042 if (copy_to_user(userptr + off + i
1043 + offsetof(struct ip6t_entry_match,
1045 m->u.kernel.match->name,
1046 strlen(m->u.kernel.match->name)+1)
1053 t = ip6t_get_target(e);
1054 if (copy_to_user(userptr + off + e->target_offset
1055 + offsetof(struct ip6t_entry_target,
1057 t->u.kernel.target->name,
1058 strlen(t->u.kernel.target->name)+1) != 0) {
1069 #ifdef CONFIG_COMPAT
1070 static void compat_standard_from_user(void *dst, void *src)
1072 int v = *(compat_int_t *)src;
1075 v += xt_compat_calc_jump(AF_INET6, v);
1076 memcpy(dst, &v, sizeof(v));
1079 static int compat_standard_to_user(void __user *dst, void *src)
1081 compat_int_t cv = *(int *)src;
1084 cv -= xt_compat_calc_jump(AF_INET6, cv);
1085 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1089 compat_calc_match(struct ip6t_entry_match *m, int *size)
1091 *size += xt_compat_match_offset(m->u.kernel.match);
1095 static int compat_calc_entry(struct ip6t_entry *e,
1096 const struct xt_table_info *info,
1097 void *base, struct xt_table_info *newinfo)
1099 struct ip6t_entry_target *t;
1100 unsigned int entry_offset;
1103 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1104 entry_offset = (void *)e - base;
1105 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1106 t = ip6t_get_target(e);
1107 off += xt_compat_target_offset(t->u.kernel.target);
1108 newinfo->size -= off;
1109 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1113 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1114 if (info->hook_entry[i] &&
1115 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1116 newinfo->hook_entry[i] -= off;
1117 if (info->underflow[i] &&
1118 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1119 newinfo->underflow[i] -= off;
1124 static int compat_table_info(const struct xt_table_info *info,
1125 struct xt_table_info *newinfo)
1127 void *loc_cpu_entry;
1129 if (!newinfo || !info)
1132 /* we dont care about newinfo->entries[] */
1133 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1134 newinfo->initial_entries = 0;
1135 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1136 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1137 compat_calc_entry, info, loc_cpu_entry,
1142 static int get_info(struct net *net, void __user *user, int *len, int compat)
1144 char name[IP6T_TABLE_MAXNAMELEN];
1148 if (*len != sizeof(struct ip6t_getinfo)) {
1149 duprintf("length %u != %zu\n", *len,
1150 sizeof(struct ip6t_getinfo));
1154 if (copy_from_user(name, user, sizeof(name)) != 0)
1157 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1158 #ifdef CONFIG_COMPAT
1160 xt_compat_lock(AF_INET6);
1162 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1163 "ip6table_%s", name);
1164 if (t && !IS_ERR(t)) {
1165 struct ip6t_getinfo info;
1166 const struct xt_table_info *private = t->private;
1167 #ifdef CONFIG_COMPAT
1168 struct xt_table_info tmp;
1171 ret = compat_table_info(private, &tmp);
1172 xt_compat_flush_offsets(AF_INET6);
1176 info.valid_hooks = t->valid_hooks;
1177 memcpy(info.hook_entry, private->hook_entry,
1178 sizeof(info.hook_entry));
1179 memcpy(info.underflow, private->underflow,
1180 sizeof(info.underflow));
1181 info.num_entries = private->number;
1182 info.size = private->size;
1183 strcpy(info.name, name);
1185 if (copy_to_user(user, &info, *len) != 0)
1193 ret = t ? PTR_ERR(t) : -ENOENT;
1194 #ifdef CONFIG_COMPAT
1196 xt_compat_unlock(AF_INET6);
1202 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1205 struct ip6t_get_entries get;
1208 if (*len < sizeof(get)) {
1209 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1212 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1214 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1215 duprintf("get_entries: %u != %zu\n",
1216 *len, sizeof(get) + get.size);
1220 t = xt_find_table_lock(net, AF_INET6, get.name);
1221 if (t && !IS_ERR(t)) {
1222 struct xt_table_info *private = t->private;
1223 duprintf("t->private->number = %u\n", private->number);
1224 if (get.size == private->size)
1225 ret = copy_entries_to_user(private->size,
1226 t, uptr->entrytable);
1228 duprintf("get_entries: I've got %u not %u!\n",
1229 private->size, get.size);
1235 ret = t ? PTR_ERR(t) : -ENOENT;
1241 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1242 struct xt_table_info *newinfo, unsigned int num_counters,
1243 void __user *counters_ptr)
1247 struct xt_table_info *oldinfo;
1248 struct xt_counters *counters;
1249 const void *loc_cpu_old_entry;
1252 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1259 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1260 "ip6table_%s", name);
1261 if (!t || IS_ERR(t)) {
1262 ret = t ? PTR_ERR(t) : -ENOENT;
1263 goto free_newinfo_counters_untrans;
1267 if (valid_hooks != t->valid_hooks) {
1268 duprintf("Valid hook crap: %08X vs %08X\n",
1269 valid_hooks, t->valid_hooks);
1274 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1278 /* Update module usage count based on number of rules */
1279 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1280 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1281 if ((oldinfo->number > oldinfo->initial_entries) ||
1282 (newinfo->number <= oldinfo->initial_entries))
1284 if ((oldinfo->number > oldinfo->initial_entries) &&
1285 (newinfo->number <= oldinfo->initial_entries))
1288 /* Get the old counters, and synchronize with replace */
1289 get_counters(oldinfo, counters);
1291 /* Decrease module usage counts and free resource */
1292 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1293 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1295 xt_free_table_info(oldinfo);
1296 if (copy_to_user(counters_ptr, counters,
1297 sizeof(struct xt_counters) * num_counters) != 0)
1306 free_newinfo_counters_untrans:
1313 do_replace(struct net *net, void __user *user, unsigned int len)
1316 struct ip6t_replace tmp;
1317 struct xt_table_info *newinfo;
1318 void *loc_cpu_entry;
1320 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1323 /* overflow check */
1324 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1326 tmp.name[sizeof(tmp.name)-1] = 0;
1328 newinfo = xt_alloc_table_info(tmp.size);
1332 /* choose the copy that is on our node/cpu */
1333 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1334 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1340 ret = translate_table(tmp.name, tmp.valid_hooks,
1341 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1342 tmp.hook_entry, tmp.underflow);
1346 duprintf("ip_tables: Translated table\n");
1348 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1349 tmp.num_counters, tmp.counters);
1351 goto free_newinfo_untrans;
1354 free_newinfo_untrans:
1355 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1357 xt_free_table_info(newinfo);
1361 /* We're lazy, and add to the first CPU; overflow works its fey magic
1362 * and everything is OK. */
1364 add_counter_to_entry(struct ip6t_entry *e,
1365 const struct xt_counters addme[],
1368 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1375 do_add_counters(struct net *net, void __user *user, unsigned int len,
1378 unsigned int i, curcpu;
1379 struct xt_counters_info tmp;
1380 struct xt_counters *paddc;
1381 unsigned int num_counters;
1386 const struct xt_table_info *private;
1388 const void *loc_cpu_entry;
1389 #ifdef CONFIG_COMPAT
1390 struct compat_xt_counters_info compat_tmp;
1394 size = sizeof(struct compat_xt_counters_info);
1399 size = sizeof(struct xt_counters_info);
1402 if (copy_from_user(ptmp, user, size) != 0)
1405 #ifdef CONFIG_COMPAT
1407 num_counters = compat_tmp.num_counters;
1408 name = compat_tmp.name;
1412 num_counters = tmp.num_counters;
1416 if (len != size + num_counters * sizeof(struct xt_counters))
1419 paddc = vmalloc_node(len - size, numa_node_id());
1423 if (copy_from_user(paddc, user + size, len - size) != 0) {
1428 t = xt_find_table_lock(net, AF_INET6, name);
1429 if (!t || IS_ERR(t)) {
1430 ret = t ? PTR_ERR(t) : -ENOENT;
1436 private = t->private;
1437 if (private->number != num_counters) {
1439 goto unlock_up_free;
1443 /* Choose the copy that is on our node */
1444 curcpu = smp_processor_id();
1445 xt_info_wrlock(curcpu);
1446 loc_cpu_entry = private->entries[curcpu];
1447 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1449 add_counter_to_entry,
1452 xt_info_wrunlock(curcpu);
1464 #ifdef CONFIG_COMPAT
1465 struct compat_ip6t_replace {
1466 char name[IP6T_TABLE_MAXNAMELEN];
1470 u32 hook_entry[NF_INET_NUMHOOKS];
1471 u32 underflow[NF_INET_NUMHOOKS];
1473 compat_uptr_t counters; /* struct ip6t_counters * */
1474 struct compat_ip6t_entry entries[0];
1478 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1479 unsigned int *size, struct xt_counters *counters,
1482 struct ip6t_entry_target *t;
1483 struct compat_ip6t_entry __user *ce;
1484 u_int16_t target_offset, next_offset;
1485 compat_uint_t origsize;
1490 ce = (struct compat_ip6t_entry __user *)*dstptr;
1491 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1494 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1497 *dstptr += sizeof(struct compat_ip6t_entry);
1498 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1500 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1501 target_offset = e->target_offset - (origsize - *size);
1504 t = ip6t_get_target(e);
1505 ret = xt_compat_target_to_user(t, dstptr, size);
1509 next_offset = e->next_offset - (origsize - *size);
1510 if (put_user(target_offset, &ce->target_offset))
1512 if (put_user(next_offset, &ce->next_offset))
1522 compat_find_calc_match(struct ip6t_entry_match *m,
1524 const struct ip6t_ip6 *ipv6,
1525 unsigned int hookmask,
1526 int *size, unsigned int *i)
1528 struct xt_match *match;
1530 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1531 m->u.user.revision),
1532 "ip6t_%s", m->u.user.name);
1533 if (IS_ERR(match) || !match) {
1534 duprintf("compat_check_calc_match: `%s' not found\n",
1536 return match ? PTR_ERR(match) : -ENOENT;
1538 m->u.kernel.match = match;
1539 *size += xt_compat_match_offset(match);
1546 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1548 if (i && (*i)-- == 0)
1551 module_put(m->u.kernel.match->me);
1556 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1558 struct ip6t_entry_target *t;
1560 if (i && (*i)-- == 0)
1563 /* Cleanup all matches */
1564 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1565 t = compat_ip6t_get_target(e);
1566 module_put(t->u.kernel.target->me);
1571 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1572 struct xt_table_info *newinfo,
1574 unsigned char *base,
1575 unsigned char *limit,
1576 unsigned int *hook_entries,
1577 unsigned int *underflows,
1581 struct ip6t_entry_target *t;
1582 struct xt_target *target;
1583 unsigned int entry_offset;
1587 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1588 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1589 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1590 duprintf("Bad offset %p, limit = %p\n", e, limit);
1594 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1595 sizeof(struct compat_xt_entry_target)) {
1596 duprintf("checking: element %p size %u\n",
1601 /* For purposes of check_entry casting the compat entry is fine */
1602 ret = check_entry((struct ip6t_entry *)e, name);
1606 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1607 entry_offset = (void *)e - (void *)base;
1609 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1610 &e->ipv6, e->comefrom, &off, &j);
1612 goto release_matches;
1614 t = compat_ip6t_get_target(e);
1615 target = try_then_request_module(xt_find_target(AF_INET6,
1617 t->u.user.revision),
1618 "ip6t_%s", t->u.user.name);
1619 if (IS_ERR(target) || !target) {
1620 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1622 ret = target ? PTR_ERR(target) : -ENOENT;
1623 goto release_matches;
1625 t->u.kernel.target = target;
1627 off += xt_compat_target_offset(target);
1629 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1633 /* Check hooks & underflows */
1634 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1635 if ((unsigned char *)e - base == hook_entries[h])
1636 newinfo->hook_entry[h] = hook_entries[h];
1637 if ((unsigned char *)e - base == underflows[h])
1638 newinfo->underflow[h] = underflows[h];
1641 /* Clear counters and comefrom */
1642 memset(&e->counters, 0, sizeof(e->counters));
1649 module_put(t->u.kernel.target->me);
1651 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1656 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1657 unsigned int *size, const char *name,
1658 struct xt_table_info *newinfo, unsigned char *base)
1660 struct ip6t_entry_target *t;
1661 struct xt_target *target;
1662 struct ip6t_entry *de;
1663 unsigned int origsize;
1668 de = (struct ip6t_entry *)*dstptr;
1669 memcpy(de, e, sizeof(struct ip6t_entry));
1670 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1672 *dstptr += sizeof(struct ip6t_entry);
1673 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1675 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1679 de->target_offset = e->target_offset - (origsize - *size);
1680 t = compat_ip6t_get_target(e);
1681 target = t->u.kernel.target;
1682 xt_compat_target_from_user(t, dstptr, size);
1684 de->next_offset = e->next_offset - (origsize - *size);
1685 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1686 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1687 newinfo->hook_entry[h] -= origsize - *size;
1688 if ((unsigned char *)de - base < newinfo->underflow[h])
1689 newinfo->underflow[h] -= origsize - *size;
1694 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1699 struct xt_mtchk_param mtpar;
1703 mtpar.entryinfo = &e->ipv6;
1704 mtpar.hook_mask = e->comefrom;
1705 mtpar.family = NFPROTO_IPV6;
1706 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1708 goto cleanup_matches;
1710 ret = check_target(e, name);
1712 goto cleanup_matches;
1718 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1723 translate_compat_table(const char *name,
1724 unsigned int valid_hooks,
1725 struct xt_table_info **pinfo,
1727 unsigned int total_size,
1728 unsigned int number,
1729 unsigned int *hook_entries,
1730 unsigned int *underflows)
1733 struct xt_table_info *newinfo, *info;
1734 void *pos, *entry0, *entry1;
1741 info->number = number;
1743 /* Init all hooks to impossible value. */
1744 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1745 info->hook_entry[i] = 0xFFFFFFFF;
1746 info->underflow[i] = 0xFFFFFFFF;
1749 duprintf("translate_compat_table: size %u\n", info->size);
1751 xt_compat_lock(AF_INET6);
1752 /* Walk through entries, checking offsets. */
1753 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1754 check_compat_entry_size_and_hooks,
1755 info, &size, entry0,
1756 entry0 + total_size,
1757 hook_entries, underflows, &j, name);
1763 duprintf("translate_compat_table: %u not %u entries\n",
1768 /* Check hooks all assigned */
1769 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1770 /* Only hooks which are valid */
1771 if (!(valid_hooks & (1 << i)))
1773 if (info->hook_entry[i] == 0xFFFFFFFF) {
1774 duprintf("Invalid hook entry %u %u\n",
1775 i, hook_entries[i]);
1778 if (info->underflow[i] == 0xFFFFFFFF) {
1779 duprintf("Invalid underflow %u %u\n",
1786 newinfo = xt_alloc_table_info(size);
1790 newinfo->number = number;
1791 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1792 newinfo->hook_entry[i] = info->hook_entry[i];
1793 newinfo->underflow[i] = info->underflow[i];
1795 entry1 = newinfo->entries[raw_smp_processor_id()];
1798 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1799 compat_copy_entry_from_user,
1800 &pos, &size, name, newinfo, entry1);
1801 xt_compat_flush_offsets(AF_INET6);
1802 xt_compat_unlock(AF_INET6);
1807 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1811 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1815 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1816 compat_release_entry, &j);
1817 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1818 xt_free_table_info(newinfo);
1822 /* And one copy for every other CPU */
1823 for_each_possible_cpu(i)
1824 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1825 memcpy(newinfo->entries[i], entry1, newinfo->size);
1829 xt_free_table_info(info);
1833 xt_free_table_info(newinfo);
1835 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1838 xt_compat_flush_offsets(AF_INET6);
1839 xt_compat_unlock(AF_INET6);
1844 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1847 struct compat_ip6t_replace tmp;
1848 struct xt_table_info *newinfo;
1849 void *loc_cpu_entry;
1851 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1854 /* overflow check */
1855 if (tmp.size >= INT_MAX / num_possible_cpus())
1857 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1859 tmp.name[sizeof(tmp.name)-1] = 0;
1861 newinfo = xt_alloc_table_info(tmp.size);
1865 /* choose the copy that is on our node/cpu */
1866 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1867 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1873 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1874 &newinfo, &loc_cpu_entry, tmp.size,
1875 tmp.num_entries, tmp.hook_entry,
1880 duprintf("compat_do_replace: Translated table\n");
1882 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1883 tmp.num_counters, compat_ptr(tmp.counters));
1885 goto free_newinfo_untrans;
1888 free_newinfo_untrans:
1889 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1891 xt_free_table_info(newinfo);
1896 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1901 if (!capable(CAP_NET_ADMIN))
1905 case IP6T_SO_SET_REPLACE:
1906 ret = compat_do_replace(sock_net(sk), user, len);
1909 case IP6T_SO_SET_ADD_COUNTERS:
1910 ret = do_add_counters(sock_net(sk), user, len, 1);
1914 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1921 struct compat_ip6t_get_entries {
1922 char name[IP6T_TABLE_MAXNAMELEN];
1924 struct compat_ip6t_entry entrytable[0];
1928 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1929 void __user *userptr)
1931 struct xt_counters *counters;
1932 const struct xt_table_info *private = table->private;
1936 const void *loc_cpu_entry;
1939 counters = alloc_counters(table);
1940 if (IS_ERR(counters))
1941 return PTR_ERR(counters);
1943 /* choose the copy that is on our node/cpu, ...
1944 * This choice is lazy (because current thread is
1945 * allowed to migrate to another cpu)
1947 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1950 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1951 compat_copy_entry_to_user,
1952 &pos, &size, counters, &i);
1959 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1963 struct compat_ip6t_get_entries get;
1966 if (*len < sizeof(get)) {
1967 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1971 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1974 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1975 duprintf("compat_get_entries: %u != %zu\n",
1976 *len, sizeof(get) + get.size);
1980 xt_compat_lock(AF_INET6);
1981 t = xt_find_table_lock(net, AF_INET6, get.name);
1982 if (t && !IS_ERR(t)) {
1983 const struct xt_table_info *private = t->private;
1984 struct xt_table_info info;
1985 duprintf("t->private->number = %u\n", private->number);
1986 ret = compat_table_info(private, &info);
1987 if (!ret && get.size == info.size) {
1988 ret = compat_copy_entries_to_user(private->size,
1989 t, uptr->entrytable);
1991 duprintf("compat_get_entries: I've got %u not %u!\n",
1992 private->size, get.size);
1995 xt_compat_flush_offsets(AF_INET6);
1999 ret = t ? PTR_ERR(t) : -ENOENT;
2001 xt_compat_unlock(AF_INET6);
2005 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2008 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2012 if (!capable(CAP_NET_ADMIN))
2016 case IP6T_SO_GET_INFO:
2017 ret = get_info(sock_net(sk), user, len, 1);
2019 case IP6T_SO_GET_ENTRIES:
2020 ret = compat_get_entries(sock_net(sk), user, len);
2023 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2030 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2034 if (!capable(CAP_NET_ADMIN))
2038 case IP6T_SO_SET_REPLACE:
2039 ret = do_replace(sock_net(sk), user, len);
2042 case IP6T_SO_SET_ADD_COUNTERS:
2043 ret = do_add_counters(sock_net(sk), user, len, 0);
2047 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2055 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2059 if (!capable(CAP_NET_ADMIN))
2063 case IP6T_SO_GET_INFO:
2064 ret = get_info(sock_net(sk), user, len, 0);
2067 case IP6T_SO_GET_ENTRIES:
2068 ret = get_entries(sock_net(sk), user, len);
2071 case IP6T_SO_GET_REVISION_MATCH:
2072 case IP6T_SO_GET_REVISION_TARGET: {
2073 struct ip6t_get_revision rev;
2076 if (*len != sizeof(rev)) {
2080 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2084 rev.name[sizeof(rev.name)-1] = 0;
2086 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2091 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2094 "ip6t_%s", rev.name);
2099 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2106 struct xt_table *ip6t_register_table(struct net *net,
2107 const struct xt_table *table,
2108 const struct ip6t_replace *repl)
2111 struct xt_table_info *newinfo;
2112 struct xt_table_info bootstrap
2113 = { 0, 0, 0, { 0 }, { 0 }, { } };
2114 void *loc_cpu_entry;
2115 struct xt_table *new_table;
2117 newinfo = xt_alloc_table_info(repl->size);
2123 /* choose the copy on our node/cpu, but dont care about preemption */
2124 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2125 memcpy(loc_cpu_entry, repl->entries, repl->size);
2127 ret = translate_table(table->name, table->valid_hooks,
2128 newinfo, loc_cpu_entry, repl->size,
2135 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2136 if (IS_ERR(new_table)) {
2137 ret = PTR_ERR(new_table);
2143 xt_free_table_info(newinfo);
2145 return ERR_PTR(ret);
2148 void ip6t_unregister_table(struct xt_table *table)
2150 struct xt_table_info *private;
2151 void *loc_cpu_entry;
2152 struct module *table_owner = table->me;
2154 private = xt_unregister_table(table);
2156 /* Decrease module usage counts and free resources */
2157 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2158 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2159 if (private->number > private->initial_entries)
2160 module_put(table_owner);
2161 xt_free_table_info(private);
2164 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2166 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2167 u_int8_t type, u_int8_t code,
2170 return (type == test_type && code >= min_code && code <= max_code)
2175 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2177 const struct icmp6hdr *ic;
2178 struct icmp6hdr _icmph;
2179 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2181 /* Must not be a fragment. */
2182 if (par->fragoff != 0)
2185 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2187 /* We've been asked to examine this packet, and we
2188 * can't. Hence, no choice but to drop.
2190 duprintf("Dropping evil ICMP tinygram.\n");
2191 *par->hotdrop = true;
2195 return icmp6_type_code_match(icmpinfo->type,
2198 ic->icmp6_type, ic->icmp6_code,
2199 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2202 /* Called when user tries to insert an entry of this type. */
2203 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2205 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2207 /* Must specify no unknown invflags */
2208 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2211 /* The built-in targets: standard (NULL) and error. */
2212 static struct xt_target ip6t_standard_target __read_mostly = {
2213 .name = IP6T_STANDARD_TARGET,
2214 .targetsize = sizeof(int),
2215 .family = NFPROTO_IPV6,
2216 #ifdef CONFIG_COMPAT
2217 .compatsize = sizeof(compat_int_t),
2218 .compat_from_user = compat_standard_from_user,
2219 .compat_to_user = compat_standard_to_user,
2223 static struct xt_target ip6t_error_target __read_mostly = {
2224 .name = IP6T_ERROR_TARGET,
2225 .target = ip6t_error,
2226 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2227 .family = NFPROTO_IPV6,
2230 static struct nf_sockopt_ops ip6t_sockopts = {
2232 .set_optmin = IP6T_BASE_CTL,
2233 .set_optmax = IP6T_SO_SET_MAX+1,
2234 .set = do_ip6t_set_ctl,
2235 #ifdef CONFIG_COMPAT
2236 .compat_set = compat_do_ip6t_set_ctl,
2238 .get_optmin = IP6T_BASE_CTL,
2239 .get_optmax = IP6T_SO_GET_MAX+1,
2240 .get = do_ip6t_get_ctl,
2241 #ifdef CONFIG_COMPAT
2242 .compat_get = compat_do_ip6t_get_ctl,
2244 .owner = THIS_MODULE,
2247 static struct xt_match icmp6_matchstruct __read_mostly = {
2249 .match = icmp6_match,
2250 .matchsize = sizeof(struct ip6t_icmp),
2251 .checkentry = icmp6_checkentry,
2252 .proto = IPPROTO_ICMPV6,
2253 .family = NFPROTO_IPV6,
2256 static int __net_init ip6_tables_net_init(struct net *net)
2258 return xt_proto_init(net, NFPROTO_IPV6);
2261 static void __net_exit ip6_tables_net_exit(struct net *net)
2263 xt_proto_fini(net, NFPROTO_IPV6);
2266 static struct pernet_operations ip6_tables_net_ops = {
2267 .init = ip6_tables_net_init,
2268 .exit = ip6_tables_net_exit,
2271 static int __init ip6_tables_init(void)
2275 ret = register_pernet_subsys(&ip6_tables_net_ops);
2279 /* Noone else will be downing sem now, so we won't sleep */
2280 ret = xt_register_target(&ip6t_standard_target);
2283 ret = xt_register_target(&ip6t_error_target);
2286 ret = xt_register_match(&icmp6_matchstruct);
2290 /* Register setsockopt */
2291 ret = nf_register_sockopt(&ip6t_sockopts);
2295 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2299 xt_unregister_match(&icmp6_matchstruct);
2301 xt_unregister_target(&ip6t_error_target);
2303 xt_unregister_target(&ip6t_standard_target);
2305 unregister_pernet_subsys(&ip6_tables_net_ops);
2310 static void __exit ip6_tables_fini(void)
2312 nf_unregister_sockopt(&ip6t_sockopts);
2314 xt_unregister_match(&icmp6_matchstruct);
2315 xt_unregister_target(&ip6t_error_target);
2316 xt_unregister_target(&ip6t_standard_target);
2318 unregister_pernet_subsys(&ip6_tables_net_ops);
2322 * find the offset to specified header or the protocol number of last header
2323 * if target < 0. "last header" is transport protocol header, ESP, or
2326 * If target header is found, its offset is set in *offset and return protocol
2327 * number. Otherwise, return -1.
2329 * If the first fragment doesn't contain the final protocol header or
2330 * NEXTHDR_NONE it is considered invalid.
2332 * Note that non-1st fragment is special case that "the protocol number
2333 * of last header" is "next header" field in Fragment header. In this case,
2334 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2338 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2339 int target, unsigned short *fragoff)
2341 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2342 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2343 unsigned int len = skb->len - start;
2348 while (nexthdr != target) {
2349 struct ipv6_opt_hdr _hdr, *hp;
2350 unsigned int hdrlen;
2352 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2358 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2361 if (nexthdr == NEXTHDR_FRAGMENT) {
2362 unsigned short _frag_off;
2364 fp = skb_header_pointer(skb,
2365 start+offsetof(struct frag_hdr,
2372 _frag_off = ntohs(*fp) & ~0x7;
2375 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2376 hp->nexthdr == NEXTHDR_NONE)) {
2378 *fragoff = _frag_off;
2384 } else if (nexthdr == NEXTHDR_AUTH)
2385 hdrlen = (hp->hdrlen + 2) << 2;
2387 hdrlen = ipv6_optlen(hp);
2389 nexthdr = hp->nexthdr;
2398 EXPORT_SYMBOL(ip6t_register_table);
2399 EXPORT_SYMBOL(ip6t_unregister_table);
2400 EXPORT_SYMBOL(ip6t_do_table);
2401 EXPORT_SYMBOL(ip6t_ext_hdr);
2402 EXPORT_SYMBOL(ipv6_find_hdr);
2404 module_init(ip6_tables_init);
2405 module_exit(ip6_tables_fini);