2 * inet fragments management
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
19 #include <linux/random.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/slab.h>
25 #include <net/inet_frag.h>
27 static void inet_frag_secret_rebuild(unsigned long dummy)
29 struct inet_frags *f = (struct inet_frags *)dummy;
30 unsigned long now = jiffies;
34 get_random_bytes(&f->rnd, sizeof(u32));
35 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
36 struct inet_frag_queue *q;
37 struct hlist_node *p, *n;
39 hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) {
40 unsigned int hval = f->hashfn(q);
45 /* Relink to new hash chain. */
46 hlist_add_head(&q->list, &f->hash[hval]);
50 write_unlock(&f->lock);
52 mod_timer(&f->secret_timer, now + f->secret_interval);
55 void inet_frags_init(struct inet_frags *f)
59 for (i = 0; i < INETFRAGS_HASHSZ; i++)
60 INIT_HLIST_HEAD(&f->hash[i]);
62 rwlock_init(&f->lock);
64 f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
65 (jiffies ^ (jiffies >> 6)));
67 setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
69 f->secret_timer.expires = jiffies + f->secret_interval;
70 add_timer(&f->secret_timer);
72 EXPORT_SYMBOL(inet_frags_init);
74 void inet_frags_init_net(struct netns_frags *nf)
77 atomic_set(&nf->mem, 0);
78 INIT_LIST_HEAD(&nf->lru_list);
80 EXPORT_SYMBOL(inet_frags_init_net);
82 void inet_frags_fini(struct inet_frags *f)
84 del_timer(&f->secret_timer);
86 EXPORT_SYMBOL(inet_frags_fini);
88 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
93 inet_frag_evictor(nf, f, true);
96 EXPORT_SYMBOL(inet_frags_exit_net);
98 static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
100 write_lock(&f->lock);
101 hlist_del(&fq->list);
102 list_del(&fq->lru_list);
104 write_unlock(&f->lock);
107 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
109 if (del_timer(&fq->timer))
110 atomic_dec(&fq->refcnt);
112 if (!(fq->last_in & INET_FRAG_COMPLETE)) {
114 atomic_dec(&fq->refcnt);
115 fq->last_in |= INET_FRAG_COMPLETE;
118 EXPORT_SYMBOL(inet_frag_kill);
120 static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
121 struct sk_buff *skb, int *work)
124 *work -= skb->truesize;
126 atomic_sub(skb->truesize, &nf->mem);
132 void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
136 struct netns_frags *nf;
138 WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
139 WARN_ON(del_timer(&q->timer) != 0);
141 /* Release all fragment data. */
145 struct sk_buff *xp = fp->next;
147 frag_kfree_skb(nf, f, fp, work);
153 atomic_sub(f->qsize, &nf->mem);
160 EXPORT_SYMBOL(inet_frag_destroy);
162 int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
164 struct inet_frag_queue *q;
165 int work, evicted = 0;
168 if (atomic_read(&nf->mem) <= nf->high_thresh)
172 work = atomic_read(&nf->mem) - nf->low_thresh;
175 if (list_empty(&nf->lru_list)) {
176 read_unlock(&f->lock);
180 q = list_first_entry(&nf->lru_list,
181 struct inet_frag_queue, lru_list);
182 atomic_inc(&q->refcnt);
183 read_unlock(&f->lock);
186 if (!(q->last_in & INET_FRAG_COMPLETE))
187 inet_frag_kill(q, f);
188 spin_unlock(&q->lock);
190 if (atomic_dec_and_test(&q->refcnt))
191 inet_frag_destroy(q, f, &work);
197 EXPORT_SYMBOL(inet_frag_evictor);
199 static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
200 struct inet_frag_queue *qp_in, struct inet_frags *f,
203 struct inet_frag_queue *qp;
205 struct hlist_node *n;
209 write_lock(&f->lock);
211 * While we stayed w/o the lock other CPU could update
212 * the rnd seed, so we need to re-calculate the hash
213 * chain. Fortunatelly the qp_in can be used to get one.
215 hash = f->hashfn(qp_in);
217 /* With SMP race we have to recheck hash table, because
218 * such entry could be created on other cpu, while we
219 * promoted read lock to write lock.
221 hlist_for_each_entry(qp, n, &f->hash[hash], list) {
222 if (qp->net == nf && f->match(qp, arg)) {
223 atomic_inc(&qp->refcnt);
224 write_unlock(&f->lock);
225 qp_in->last_in |= INET_FRAG_COMPLETE;
226 inet_frag_put(qp_in, f);
232 if (!mod_timer(&qp->timer, jiffies + nf->timeout))
233 atomic_inc(&qp->refcnt);
235 atomic_inc(&qp->refcnt);
236 hlist_add_head(&qp->list, &f->hash[hash]);
237 list_add_tail(&qp->lru_list, &nf->lru_list);
239 write_unlock(&f->lock);
243 static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
244 struct inet_frags *f, void *arg)
246 struct inet_frag_queue *q;
248 q = kzalloc(f->qsize, GFP_ATOMIC);
253 f->constructor(q, arg);
254 atomic_add(f->qsize, &nf->mem);
255 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
256 spin_lock_init(&q->lock);
257 atomic_set(&q->refcnt, 1);
262 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
263 struct inet_frags *f, void *arg)
265 struct inet_frag_queue *q;
267 q = inet_frag_alloc(nf, f, arg);
271 return inet_frag_intern(nf, q, f, arg);
274 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
275 struct inet_frags *f, void *key, unsigned int hash)
278 struct inet_frag_queue *q;
279 struct hlist_node *n;
282 hlist_for_each_entry(q, n, &f->hash[hash], list) {
283 if (q->net == nf && f->match(q, key)) {
284 atomic_inc(&q->refcnt);
285 read_unlock(&f->lock);
290 read_unlock(&f->lock);
292 if (depth <= INETFRAGS_MAXDEPTH)
293 return inet_frag_create(nf, f, key);
295 return ERR_PTR(-ENOBUFS);
297 EXPORT_SYMBOL(inet_frag_find);
299 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
302 static const char msg[] = "inet_frag_find: Fragment hash bucket"
303 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
304 ". Dropping fragment.\n";
306 if (PTR_ERR(q) == -ENOBUFS)
307 LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
309 EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);