5 #include <linux/netdevice.h>
6 #include <linux/static_key.h>
7 #include <uapi/linux/netfilter/x_tables.h>
10 * struct xt_action_param - parameters for matches/targets
12 * @match: the match extension
13 * @target: the target extension
14 * @matchinfo: per-match data
15 * @targetinfo: per-target data
16 * @net network namespace through which the action was invoked
17 * @in: input netdevice
18 * @out: output netdevice
19 * @fragoff: packet is a fragment, this is the data offset
20 * @thoff: position of transport header relative to skb->data
21 * @hook: hook number given packet came from
22 * @family: Actual NFPROTO_* through which the function is invoked
23 * (helpful when match->family == NFPROTO_UNSPEC)
25 * Fields written to by extensions:
27 * @hotdrop: drop packet if we had inspection problems
29 struct xt_action_param {
31 const struct xt_match *match;
32 const struct xt_target *target;
35 const void *matchinfo, *targinfo;
38 const struct net_device *in, *out;
47 * struct xt_mtchk_param - parameters for match extensions'
48 * checkentry functions
50 * @net: network namespace through which the check was invoked
51 * @table: table the rule is tried to be inserted into
52 * @entryinfo: the family-specific rule data
53 * (struct ipt_ip, ip6t_ip, arpt_arp or (note) ebt_entry)
54 * @match: struct xt_match through which this function was invoked
55 * @matchinfo: per-match data
56 * @hook_mask: via which hooks the new rule is reachable
57 * Other fields as above.
59 struct xt_mtchk_param {
62 const void *entryinfo;
63 const struct xt_match *match;
65 unsigned int hook_mask;
71 * struct xt_mdtor_param - match destructor parameters
74 struct xt_mtdtor_param {
76 const struct xt_match *match;
82 * struct xt_tgchk_param - parameters for target extensions'
83 * checkentry functions
85 * @entryinfo: the family-specific rule data
86 * (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry)
88 * Other fields see above.
90 struct xt_tgchk_param {
93 const void *entryinfo;
94 const struct xt_target *target;
96 unsigned int hook_mask;
101 /* Target destructor parameters */
102 struct xt_tgdtor_param {
104 const struct xt_target *target;
110 struct list_head list;
112 const char name[XT_EXTENSION_MAXNAMELEN];
115 /* Return true or false: return FALSE and set *hotdrop = 1 to
116 force immediate packet drop. */
117 /* Arguments changed since 2.6.9, as this must now handle
118 non-linear skb, using skb_header_pointer and
119 skb_ip_make_writable. */
120 bool (*match)(const struct sk_buff *skb,
121 struct xt_action_param *);
123 /* Called when user tries to insert an entry of this type. */
124 int (*checkentry)(const struct xt_mtchk_param *);
126 /* Called when entry of this type deleted. */
127 void (*destroy)(const struct xt_mtdtor_param *);
129 /* Called when userspace align differs from kernel space one */
130 void (*compat_from_user)(void *dst, const void *src);
131 int (*compat_to_user)(void __user *dst, const void *src);
133 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
137 unsigned int matchsize;
139 unsigned int compatsize;
142 unsigned short proto;
144 unsigned short family;
147 /* Registration hooks for targets. */
149 struct list_head list;
151 const char name[XT_EXTENSION_MAXNAMELEN];
154 /* Returns verdict. Argument order changed since 2.6.9, as this
155 must now handle non-linear skbs, using skb_copy_bits and
156 skb_ip_make_writable. */
157 unsigned int (*target)(struct sk_buff *skb,
158 const struct xt_action_param *);
160 /* Called when user tries to insert an entry of this type:
161 hook_mask is a bitmask of hooks from which it can be
163 /* Should return 0 on success or an error code otherwise (-Exxxx). */
164 int (*checkentry)(const struct xt_tgchk_param *);
166 /* Called when entry of this type deleted. */
167 void (*destroy)(const struct xt_tgdtor_param *);
169 /* Called when userspace align differs from kernel space one */
170 void (*compat_from_user)(void *dst, const void *src);
171 int (*compat_to_user)(void __user *dst, const void *src);
173 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
177 unsigned int targetsize;
179 unsigned int compatsize;
182 unsigned short proto;
184 unsigned short family;
187 /* Furniture shopping... */
189 struct list_head list;
191 /* What hooks you will enter on */
192 unsigned int valid_hooks;
194 /* Man behind the curtain... */
195 struct xt_table_info *private;
197 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
200 u_int8_t af; /* address/protocol family */
201 int priority; /* hook order */
203 /* A unique name... */
204 const char name[XT_TABLE_MAXNAMELEN];
207 #include <linux/netfilter_ipv4.h>
209 /* The table itself */
210 struct xt_table_info {
213 /* Number of entries: FIXME. --RR */
215 /* Initial number of entries. Needed for module usage count */
216 unsigned int initial_entries;
218 /* Entry points and underflows */
219 unsigned int hook_entry[NF_INET_NUMHOOKS];
220 unsigned int underflow[NF_INET_NUMHOOKS];
223 * Number of user chains. Since tables cannot have loops, at most
224 * @stacksize jumps (number of user chains) can possibly be made.
226 unsigned int stacksize;
229 unsigned char entries[0] __aligned(8);
232 int xt_register_target(struct xt_target *target);
233 void xt_unregister_target(struct xt_target *target);
234 int xt_register_targets(struct xt_target *target, unsigned int n);
235 void xt_unregister_targets(struct xt_target *target, unsigned int n);
237 int xt_register_match(struct xt_match *target);
238 void xt_unregister_match(struct xt_match *target);
239 int xt_register_matches(struct xt_match *match, unsigned int n);
240 void xt_unregister_matches(struct xt_match *match, unsigned int n);
242 int xt_check_entry_offsets(const void *base,
243 unsigned int target_offset,
244 unsigned int next_offset);
246 int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
248 int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
251 struct xt_table *xt_register_table(struct net *net,
252 const struct xt_table *table,
253 struct xt_table_info *bootstrap,
254 struct xt_table_info *newinfo);
255 void *xt_unregister_table(struct xt_table *table);
257 struct xt_table_info *xt_replace_table(struct xt_table *table,
258 unsigned int num_counters,
259 struct xt_table_info *newinfo,
262 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
263 struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
264 struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision);
265 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
266 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
269 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
271 void xt_table_unlock(struct xt_table *t);
273 int xt_proto_init(struct net *net, u_int8_t af);
274 void xt_proto_fini(struct net *net, u_int8_t af);
276 struct xt_table_info *xt_alloc_table_info(unsigned int size);
277 void xt_free_table_info(struct xt_table_info *info);
280 * xt_recseq - recursive seqcount for netfilter use
282 * Packet processing changes the seqcount only if no recursion happened
283 * get_counters() can use read_seqcount_begin()/read_seqcount_retry(),
284 * because we use the normal seqcount convention :
285 * Low order bit set to 1 if a writer is active.
287 DECLARE_PER_CPU(seqcount_t, xt_recseq);
289 /* xt_tee_enabled - true if x_tables needs to handle reentrancy
291 * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
293 extern struct static_key xt_tee_enabled;
296 * xt_write_recseq_begin - start of a write section
298 * Begin packet processing : all readers must wait the end
299 * 1) Must be called with preemption disabled
300 * 2) softirqs must be disabled too (or we should use this_cpu_add())
302 * 1 if no recursion on this cpu
303 * 0 if recursion detected
305 static inline unsigned int xt_write_recseq_begin(void)
310 * Low order bit of sequence is set if we already
311 * called xt_write_recseq_begin().
313 addend = (__this_cpu_read(xt_recseq.sequence) + 1) & 1;
316 * This is kind of a write_seqcount_begin(), but addend is 0 or 1
317 * We dont check addend value to avoid a test and conditional jump,
318 * since addend is most likely 1
320 __this_cpu_add(xt_recseq.sequence, addend);
327 * xt_write_recseq_end - end of a write section
328 * @addend: return value from previous xt_write_recseq_begin()
330 * End packet processing : all readers can proceed
331 * 1) Must be called with preemption disabled
332 * 2) softirqs must be disabled too (or we should use this_cpu_add())
334 static inline void xt_write_recseq_end(unsigned int addend)
336 /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
338 __this_cpu_add(xt_recseq.sequence, addend);
342 * This helper is performance critical and must be inlined
344 static inline unsigned long ifname_compare_aligned(const char *_a,
348 const unsigned long *a = (const unsigned long *)_a;
349 const unsigned long *b = (const unsigned long *)_b;
350 const unsigned long *mask = (const unsigned long *)_mask;
353 ret = (a[0] ^ b[0]) & mask[0];
354 if (IFNAMSIZ > sizeof(unsigned long))
355 ret |= (a[1] ^ b[1]) & mask[1];
356 if (IFNAMSIZ > 2 * sizeof(unsigned long))
357 ret |= (a[2] ^ b[2]) & mask[2];
358 if (IFNAMSIZ > 3 * sizeof(unsigned long))
359 ret |= (a[3] ^ b[3]) & mask[3];
360 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
365 /* On SMP, ip(6)t_entry->counters.pcnt holds address of the
366 * real (percpu) counter. On !SMP, its just the packet count,
367 * so nothing needs to be done there.
369 * xt_percpu_counter_alloc returns the address of the percpu
370 * counter, or 0 on !SMP. We force an alignment of 16 bytes
371 * so that bytes/packets share a common cache line.
373 * Hence caller must use IS_ERR_VALUE to check for error, this
374 * allows us to return 0 for single core systems without forcing
375 * callers to deal with SMP vs. NONSMP issues.
377 static inline u64 xt_percpu_counter_alloc(void)
379 if (nr_cpu_ids > 1) {
380 void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
381 sizeof(struct xt_counters));
384 return (u64) -ENOMEM;
386 return (u64) (__force unsigned long) res;
391 static inline void xt_percpu_counter_free(u64 pcnt)
394 free_percpu((void __percpu *) (unsigned long) pcnt);
397 static inline struct xt_counters *
398 xt_get_this_cpu_counter(struct xt_counters *cnt)
401 return this_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt);
406 static inline struct xt_counters *
407 xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
410 return per_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt, cpu);
415 struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
416 void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
419 #include <net/compat.h>
421 struct compat_xt_entry_match {
424 u_int16_t match_size;
425 char name[XT_FUNCTION_MAXNAMELEN - 1];
429 u_int16_t match_size;
432 u_int16_t match_size;
434 unsigned char data[0];
437 struct compat_xt_entry_target {
440 u_int16_t target_size;
441 char name[XT_FUNCTION_MAXNAMELEN - 1];
445 u_int16_t target_size;
446 compat_uptr_t target;
448 u_int16_t target_size;
450 unsigned char data[0];
453 /* FIXME: this works only on 32 bit tasks
454 * need to change whole approach in order to calculate align as function of
455 * current task alignment */
457 struct compat_xt_counters {
458 compat_u64 pcnt, bcnt; /* Packet and byte counters */
461 struct compat_xt_counters_info {
462 char name[XT_TABLE_MAXNAMELEN];
463 compat_uint_t num_counters;
464 struct compat_xt_counters counters[0];
467 struct _compat_xt_align {
474 #define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
476 void xt_compat_lock(u_int8_t af);
477 void xt_compat_unlock(u_int8_t af);
479 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
480 void xt_compat_flush_offsets(u_int8_t af);
481 void xt_compat_init_offsets(u_int8_t af, unsigned int number);
482 int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
484 int xt_compat_match_offset(const struct xt_match *match);
485 int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
487 int xt_compat_match_to_user(const struct xt_entry_match *m,
488 void __user **dstptr, unsigned int *size);
490 int xt_compat_target_offset(const struct xt_target *target);
491 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
493 int xt_compat_target_to_user(const struct xt_entry_target *t,
494 void __user **dstptr, unsigned int *size);
495 int xt_compat_check_entry_offsets(const void *base,
496 unsigned int target_offset,
497 unsigned int next_offset);
499 #endif /* CONFIG_COMPAT */
500 #endif /* _X_TABLES_H */