]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/ipv4/inet_fragment.c
Merge branch 'next/cleanup' into for-next
[karo-tx-linux.git] / net / ipv4 / inet_fragment.c
1 /*
2  * inet fragments management
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  *              Authors:        Pavel Emelyanov <xemul@openvz.org>
10  *                              Started as consolidation of ipv4/ip_fragment.c,
11  *                              ipv6/reassembly. and ipv6 nf conntrack reassembly
12  */
13
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
18 #include <linux/mm.h>
19 #include <linux/random.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/slab.h>
23
24 #include <net/sock.h>
25 #include <net/inet_frag.h>
26 #include <net/inet_ecn.h>
27
28 #define INETFRAGS_EVICT_BUCKETS   128
29 #define INETFRAGS_EVICT_MAX       512
30
31 /* don't rebuild inetfrag table with new secret more often than this */
32 #define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
33
34 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
35  * Value : 0xff if frame should be dropped.
36  *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
37  */
38 const u8 ip_frag_ecn_table[16] = {
39         /* at least one fragment had CE, and others ECT_0 or ECT_1 */
40         [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]                      = INET_ECN_CE,
41         [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]                      = INET_ECN_CE,
42         [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]   = INET_ECN_CE,
43
44         /* invalid combinations : drop frame */
45         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
46         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
47         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
48         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
49         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
50         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
51         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
52 };
53 EXPORT_SYMBOL(ip_frag_ecn_table);
54
55 static unsigned int
56 inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
57 {
58         return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
59 }
60
61 static bool inet_frag_may_rebuild(struct inet_frags *f)
62 {
63         return time_after(jiffies,
64                f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
65 }
66
67 static void inet_frag_secret_rebuild(struct inet_frags *f)
68 {
69         int i;
70
71         write_seqlock_bh(&f->rnd_seqlock);
72
73         if (!inet_frag_may_rebuild(f))
74                 goto out;
75
76         get_random_bytes(&f->rnd, sizeof(u32));
77
78         for (i = 0; i < INETFRAGS_HASHSZ; i++) {
79                 struct inet_frag_bucket *hb;
80                 struct inet_frag_queue *q;
81                 struct hlist_node *n;
82
83                 hb = &f->hash[i];
84                 spin_lock(&hb->chain_lock);
85
86                 hlist_for_each_entry_safe(q, n, &hb->chain, list) {
87                         unsigned int hval = inet_frag_hashfn(f, q);
88
89                         if (hval != i) {
90                                 struct inet_frag_bucket *hb_dest;
91
92                                 hlist_del(&q->list);
93
94                                 /* Relink to new hash chain. */
95                                 hb_dest = &f->hash[hval];
96
97                                 /* This is the only place where we take
98                                  * another chain_lock while already holding
99                                  * one.  As this will not run concurrently,
100                                  * we cannot deadlock on hb_dest lock below, if its
101                                  * already locked it will be released soon since
102                                  * other caller cannot be waiting for hb lock
103                                  * that we've taken above.
104                                  */
105                                 spin_lock_nested(&hb_dest->chain_lock,
106                                                  SINGLE_DEPTH_NESTING);
107                                 hlist_add_head(&q->list, &hb_dest->chain);
108                                 spin_unlock(&hb_dest->chain_lock);
109                         }
110                 }
111                 spin_unlock(&hb->chain_lock);
112         }
113
114         f->rebuild = false;
115         f->last_rebuild_jiffies = jiffies;
116 out:
117         write_sequnlock_bh(&f->rnd_seqlock);
118 }
119
120 static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
121 {
122         return q->net->low_thresh == 0 ||
123                frag_mem_limit(q->net) >= q->net->low_thresh;
124 }
125
126 static unsigned int
127 inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
128 {
129         struct inet_frag_queue *fq;
130         struct hlist_node *n;
131         unsigned int evicted = 0;
132         HLIST_HEAD(expired);
133
134         spin_lock(&hb->chain_lock);
135
136         hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
137                 if (!inet_fragq_should_evict(fq))
138                         continue;
139
140                 if (!del_timer(&fq->timer))
141                         continue;
142
143                 hlist_add_head(&fq->list_evictor, &expired);
144                 ++evicted;
145         }
146
147         spin_unlock(&hb->chain_lock);
148
149         hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
150                 f->frag_expire((unsigned long) fq);
151
152         return evicted;
153 }
154
155 static void inet_frag_worker(struct work_struct *work)
156 {
157         unsigned int budget = INETFRAGS_EVICT_BUCKETS;
158         unsigned int i, evicted = 0;
159         struct inet_frags *f;
160
161         f = container_of(work, struct inet_frags, frags_work);
162
163         BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
164
165         local_bh_disable();
166
167         for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
168                 evicted += inet_evict_bucket(f, &f->hash[i]);
169                 i = (i + 1) & (INETFRAGS_HASHSZ - 1);
170                 if (evicted > INETFRAGS_EVICT_MAX)
171                         break;
172         }
173
174         f->next_bucket = i;
175
176         local_bh_enable();
177
178         if (f->rebuild && inet_frag_may_rebuild(f))
179                 inet_frag_secret_rebuild(f);
180 }
181
182 static void inet_frag_schedule_worker(struct inet_frags *f)
183 {
184         if (unlikely(!work_pending(&f->frags_work)))
185                 schedule_work(&f->frags_work);
186 }
187
188 int inet_frags_init(struct inet_frags *f)
189 {
190         int i;
191
192         INIT_WORK(&f->frags_work, inet_frag_worker);
193
194         for (i = 0; i < INETFRAGS_HASHSZ; i++) {
195                 struct inet_frag_bucket *hb = &f->hash[i];
196
197                 spin_lock_init(&hb->chain_lock);
198                 INIT_HLIST_HEAD(&hb->chain);
199         }
200
201         seqlock_init(&f->rnd_seqlock);
202         f->last_rebuild_jiffies = 0;
203         f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
204                                             NULL);
205         if (!f->frags_cachep)
206                 return -ENOMEM;
207
208         return 0;
209 }
210 EXPORT_SYMBOL(inet_frags_init);
211
212 void inet_frags_init_net(struct netns_frags *nf)
213 {
214         init_frag_mem_limit(nf);
215 }
216 EXPORT_SYMBOL(inet_frags_init_net);
217
218 void inet_frags_fini(struct inet_frags *f)
219 {
220         cancel_work_sync(&f->frags_work);
221         kmem_cache_destroy(f->frags_cachep);
222 }
223 EXPORT_SYMBOL(inet_frags_fini);
224
225 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
226 {
227         unsigned int seq;
228         int i;
229
230         nf->low_thresh = 0;
231
232 evict_again:
233         local_bh_disable();
234         seq = read_seqbegin(&f->rnd_seqlock);
235
236         for (i = 0; i < INETFRAGS_HASHSZ ; i++)
237                 inet_evict_bucket(f, &f->hash[i]);
238
239         local_bh_enable();
240         cond_resched();
241
242         if (read_seqretry(&f->rnd_seqlock, seq) ||
243             percpu_counter_sum(&nf->mem))
244                 goto evict_again;
245
246         percpu_counter_destroy(&nf->mem);
247 }
248 EXPORT_SYMBOL(inet_frags_exit_net);
249
250 static struct inet_frag_bucket *
251 get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
252 __acquires(hb->chain_lock)
253 {
254         struct inet_frag_bucket *hb;
255         unsigned int seq, hash;
256
257  restart:
258         seq = read_seqbegin(&f->rnd_seqlock);
259
260         hash = inet_frag_hashfn(f, fq);
261         hb = &f->hash[hash];
262
263         spin_lock(&hb->chain_lock);
264         if (read_seqretry(&f->rnd_seqlock, seq)) {
265                 spin_unlock(&hb->chain_lock);
266                 goto restart;
267         }
268
269         return hb;
270 }
271
272 static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
273 {
274         struct inet_frag_bucket *hb;
275
276         hb = get_frag_bucket_locked(fq, f);
277         hlist_del(&fq->list);
278         fq->flags |= INET_FRAG_COMPLETE;
279         spin_unlock(&hb->chain_lock);
280 }
281
282 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
283 {
284         if (del_timer(&fq->timer))
285                 atomic_dec(&fq->refcnt);
286
287         if (!(fq->flags & INET_FRAG_COMPLETE)) {
288                 fq_unlink(fq, f);
289                 atomic_dec(&fq->refcnt);
290         }
291 }
292 EXPORT_SYMBOL(inet_frag_kill);
293
294 static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
295                                   struct sk_buff *skb)
296 {
297         if (f->skb_free)
298                 f->skb_free(skb);
299         kfree_skb(skb);
300 }
301
302 void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
303 {
304         struct sk_buff *fp;
305         struct netns_frags *nf;
306         unsigned int sum, sum_truesize = 0;
307
308         WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
309         WARN_ON(del_timer(&q->timer) != 0);
310
311         /* Release all fragment data. */
312         fp = q->fragments;
313         nf = q->net;
314         while (fp) {
315                 struct sk_buff *xp = fp->next;
316
317                 sum_truesize += fp->truesize;
318                 frag_kfree_skb(nf, f, fp);
319                 fp = xp;
320         }
321         sum = sum_truesize + f->qsize;
322
323         if (f->destructor)
324                 f->destructor(q);
325         kmem_cache_free(f->frags_cachep, q);
326
327         sub_frag_mem_limit(nf, sum);
328 }
329 EXPORT_SYMBOL(inet_frag_destroy);
330
331 static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
332                                                 struct inet_frag_queue *qp_in,
333                                                 struct inet_frags *f,
334                                                 void *arg)
335 {
336         struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
337         struct inet_frag_queue *qp;
338
339 #ifdef CONFIG_SMP
340         /* With SMP race we have to recheck hash table, because
341          * such entry could have been created on other cpu before
342          * we acquired hash bucket lock.
343          */
344         hlist_for_each_entry(qp, &hb->chain, list) {
345                 if (qp->net == nf && f->match(qp, arg)) {
346                         atomic_inc(&qp->refcnt);
347                         spin_unlock(&hb->chain_lock);
348                         qp_in->flags |= INET_FRAG_COMPLETE;
349                         inet_frag_put(qp_in, f);
350                         return qp;
351                 }
352         }
353 #endif
354         qp = qp_in;
355         if (!mod_timer(&qp->timer, jiffies + nf->timeout))
356                 atomic_inc(&qp->refcnt);
357
358         atomic_inc(&qp->refcnt);
359         hlist_add_head(&qp->list, &hb->chain);
360
361         spin_unlock(&hb->chain_lock);
362
363         return qp;
364 }
365
366 static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
367                                                struct inet_frags *f,
368                                                void *arg)
369 {
370         struct inet_frag_queue *q;
371
372         if (frag_mem_limit(nf) > nf->high_thresh) {
373                 inet_frag_schedule_worker(f);
374                 return NULL;
375         }
376
377         q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
378         if (!q)
379                 return NULL;
380
381         q->net = nf;
382         f->constructor(q, arg);
383         add_frag_mem_limit(nf, f->qsize);
384
385         setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
386         spin_lock_init(&q->lock);
387         atomic_set(&q->refcnt, 1);
388
389         return q;
390 }
391
392 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
393                                                 struct inet_frags *f,
394                                                 void *arg)
395 {
396         struct inet_frag_queue *q;
397
398         q = inet_frag_alloc(nf, f, arg);
399         if (!q)
400                 return NULL;
401
402         return inet_frag_intern(nf, q, f, arg);
403 }
404
405 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
406                                        struct inet_frags *f, void *key,
407                                        unsigned int hash)
408 {
409         struct inet_frag_bucket *hb;
410         struct inet_frag_queue *q;
411         int depth = 0;
412
413         if (frag_mem_limit(nf) > nf->low_thresh)
414                 inet_frag_schedule_worker(f);
415
416         hash &= (INETFRAGS_HASHSZ - 1);
417         hb = &f->hash[hash];
418
419         spin_lock(&hb->chain_lock);
420         hlist_for_each_entry(q, &hb->chain, list) {
421                 if (q->net == nf && f->match(q, key)) {
422                         atomic_inc(&q->refcnt);
423                         spin_unlock(&hb->chain_lock);
424                         return q;
425                 }
426                 depth++;
427         }
428         spin_unlock(&hb->chain_lock);
429
430         if (depth <= INETFRAGS_MAXDEPTH)
431                 return inet_frag_create(nf, f, key);
432
433         if (inet_frag_may_rebuild(f)) {
434                 if (!f->rebuild)
435                         f->rebuild = true;
436                 inet_frag_schedule_worker(f);
437         }
438
439         return ERR_PTR(-ENOBUFS);
440 }
441 EXPORT_SYMBOL(inet_frag_find);
442
443 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
444                                    const char *prefix)
445 {
446         static const char msg[] = "inet_frag_find: Fragment hash bucket"
447                 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
448                 ". Dropping fragment.\n";
449
450         if (PTR_ERR(q) == -ENOBUFS)
451                 net_dbg_ratelimited("%s%s", prefix, msg);
452 }
453 EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);