2 * inet fragments management
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
19 #include <linux/random.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/slab.h>
25 #include <net/inet_frag.h>
26 #include <net/inet_ecn.h>
28 #define INETFRAGS_EVICT_BUCKETS 128
29 #define INETFRAGS_EVICT_MAX 512
31 /* don't rebuild inetfrag table with new secret more often than this */
32 #define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
34 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
35 * Value : 0xff if frame should be dropped.
36 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
38 const u8 ip_frag_ecn_table[16] = {
39 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
40 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
41 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
42 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
44 /* invalid combinations : drop frame */
45 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
46 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
47 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
48 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
49 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
50 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
51 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
53 EXPORT_SYMBOL(ip_frag_ecn_table);
56 inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
58 return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
61 static bool inet_frag_may_rebuild(struct inet_frags *f)
63 return time_after(jiffies,
64 f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
67 static void inet_frag_secret_rebuild(struct inet_frags *f)
71 write_seqlock_bh(&f->rnd_seqlock);
73 if (!inet_frag_may_rebuild(f))
76 get_random_bytes(&f->rnd, sizeof(u32));
78 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
79 struct inet_frag_bucket *hb;
80 struct inet_frag_queue *q;
84 spin_lock(&hb->chain_lock);
86 hlist_for_each_entry_safe(q, n, &hb->chain, list) {
87 unsigned int hval = inet_frag_hashfn(f, q);
90 struct inet_frag_bucket *hb_dest;
94 /* Relink to new hash chain. */
95 hb_dest = &f->hash[hval];
97 /* This is the only place where we take
98 * another chain_lock while already holding
99 * one. As this will not run concurrently,
100 * we cannot deadlock on hb_dest lock below, if its
101 * already locked it will be released soon since
102 * other caller cannot be waiting for hb lock
103 * that we've taken above.
105 spin_lock_nested(&hb_dest->chain_lock,
106 SINGLE_DEPTH_NESTING);
107 hlist_add_head(&q->list, &hb_dest->chain);
108 spin_unlock(&hb_dest->chain_lock);
111 spin_unlock(&hb->chain_lock);
115 f->last_rebuild_jiffies = jiffies;
117 write_sequnlock_bh(&f->rnd_seqlock);
120 static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
122 return q->net->low_thresh == 0 ||
123 frag_mem_limit(q->net) >= q->net->low_thresh;
127 inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
129 struct inet_frag_queue *fq;
130 struct hlist_node *n;
131 unsigned int evicted = 0;
134 spin_lock(&hb->chain_lock);
136 hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
137 if (!inet_fragq_should_evict(fq))
140 if (!del_timer(&fq->timer))
143 fq->flags |= INET_FRAG_EVICTED;
144 hlist_add_head(&fq->list_evictor, &expired);
148 spin_unlock(&hb->chain_lock);
150 hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
151 f->frag_expire((unsigned long) fq);
156 static void inet_frag_worker(struct work_struct *work)
158 unsigned int budget = INETFRAGS_EVICT_BUCKETS;
159 unsigned int i, evicted = 0;
160 struct inet_frags *f;
162 f = container_of(work, struct inet_frags, frags_work);
164 BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
168 for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
169 evicted += inet_evict_bucket(f, &f->hash[i]);
170 i = (i + 1) & (INETFRAGS_HASHSZ - 1);
171 if (evicted > INETFRAGS_EVICT_MAX)
179 if (f->rebuild && inet_frag_may_rebuild(f))
180 inet_frag_secret_rebuild(f);
183 static void inet_frag_schedule_worker(struct inet_frags *f)
185 if (unlikely(!work_pending(&f->frags_work)))
186 schedule_work(&f->frags_work);
189 int inet_frags_init(struct inet_frags *f)
193 INIT_WORK(&f->frags_work, inet_frag_worker);
195 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
196 struct inet_frag_bucket *hb = &f->hash[i];
198 spin_lock_init(&hb->chain_lock);
199 INIT_HLIST_HEAD(&hb->chain);
202 seqlock_init(&f->rnd_seqlock);
203 f->last_rebuild_jiffies = 0;
204 f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
206 if (!f->frags_cachep)
211 EXPORT_SYMBOL(inet_frags_init);
213 void inet_frags_init_net(struct netns_frags *nf)
215 init_frag_mem_limit(nf);
217 EXPORT_SYMBOL(inet_frags_init_net);
219 void inet_frags_fini(struct inet_frags *f)
221 cancel_work_sync(&f->frags_work);
222 kmem_cache_destroy(f->frags_cachep);
224 EXPORT_SYMBOL(inet_frags_fini);
226 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
235 seq = read_seqbegin(&f->rnd_seqlock);
237 for (i = 0; i < INETFRAGS_HASHSZ ; i++)
238 inet_evict_bucket(f, &f->hash[i]);
243 if (read_seqretry(&f->rnd_seqlock, seq) ||
244 percpu_counter_sum(&nf->mem))
247 percpu_counter_destroy(&nf->mem);
249 EXPORT_SYMBOL(inet_frags_exit_net);
251 static struct inet_frag_bucket *
252 get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
253 __acquires(hb->chain_lock)
255 struct inet_frag_bucket *hb;
256 unsigned int seq, hash;
259 seq = read_seqbegin(&f->rnd_seqlock);
261 hash = inet_frag_hashfn(f, fq);
264 spin_lock(&hb->chain_lock);
265 if (read_seqretry(&f->rnd_seqlock, seq)) {
266 spin_unlock(&hb->chain_lock);
273 static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
275 struct inet_frag_bucket *hb;
277 hb = get_frag_bucket_locked(fq, f);
278 hlist_del(&fq->list);
279 fq->flags |= INET_FRAG_COMPLETE;
280 spin_unlock(&hb->chain_lock);
283 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
285 if (del_timer(&fq->timer))
286 atomic_dec(&fq->refcnt);
288 if (!(fq->flags & INET_FRAG_COMPLETE)) {
290 atomic_dec(&fq->refcnt);
293 EXPORT_SYMBOL(inet_frag_kill);
295 static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
303 void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
306 struct netns_frags *nf;
307 unsigned int sum, sum_truesize = 0;
309 WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
310 WARN_ON(del_timer(&q->timer) != 0);
312 /* Release all fragment data. */
316 struct sk_buff *xp = fp->next;
318 sum_truesize += fp->truesize;
319 frag_kfree_skb(nf, f, fp);
322 sum = sum_truesize + f->qsize;
326 kmem_cache_free(f->frags_cachep, q);
328 sub_frag_mem_limit(nf, sum);
330 EXPORT_SYMBOL(inet_frag_destroy);
332 static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
333 struct inet_frag_queue *qp_in,
334 struct inet_frags *f,
337 struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
338 struct inet_frag_queue *qp;
341 /* With SMP race we have to recheck hash table, because
342 * such entry could have been created on other cpu before
343 * we acquired hash bucket lock.
345 hlist_for_each_entry(qp, &hb->chain, list) {
346 if (qp->net == nf && f->match(qp, arg)) {
347 atomic_inc(&qp->refcnt);
348 spin_unlock(&hb->chain_lock);
349 qp_in->flags |= INET_FRAG_COMPLETE;
350 inet_frag_put(qp_in, f);
356 if (!mod_timer(&qp->timer, jiffies + nf->timeout))
357 atomic_inc(&qp->refcnt);
359 atomic_inc(&qp->refcnt);
360 hlist_add_head(&qp->list, &hb->chain);
362 spin_unlock(&hb->chain_lock);
367 static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
368 struct inet_frags *f,
371 struct inet_frag_queue *q;
373 if (frag_mem_limit(nf) > nf->high_thresh) {
374 inet_frag_schedule_worker(f);
378 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
383 f->constructor(q, arg);
384 add_frag_mem_limit(nf, f->qsize);
386 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
387 spin_lock_init(&q->lock);
388 atomic_set(&q->refcnt, 1);
393 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
394 struct inet_frags *f,
397 struct inet_frag_queue *q;
399 q = inet_frag_alloc(nf, f, arg);
403 return inet_frag_intern(nf, q, f, arg);
406 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
407 struct inet_frags *f, void *key,
410 struct inet_frag_bucket *hb;
411 struct inet_frag_queue *q;
414 if (frag_mem_limit(nf) > nf->low_thresh)
415 inet_frag_schedule_worker(f);
417 hash &= (INETFRAGS_HASHSZ - 1);
420 spin_lock(&hb->chain_lock);
421 hlist_for_each_entry(q, &hb->chain, list) {
422 if (q->net == nf && f->match(q, key)) {
423 atomic_inc(&q->refcnt);
424 spin_unlock(&hb->chain_lock);
429 spin_unlock(&hb->chain_lock);
431 if (depth <= INETFRAGS_MAXDEPTH)
432 return inet_frag_create(nf, f, key);
434 if (inet_frag_may_rebuild(f)) {
437 inet_frag_schedule_worker(f);
440 return ERR_PTR(-ENOBUFS);
442 EXPORT_SYMBOL(inet_frag_find);
444 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
447 static const char msg[] = "inet_frag_find: Fragment hash bucket"
448 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
449 ". Dropping fragment.\n";
451 if (PTR_ERR(q) == -ENOBUFS)
452 net_dbg_ratelimited("%s%s", prefix, msg);
454 EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);