]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - lib/rhashtable.c
Merge remote-tracking branch 'tglx/x86/urgent' into x86/urgent
[karo-tx-linux.git] / lib / rhashtable.c
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7  *
8  * Code partially derived from nft_hash
9  * Rewritten with rehash code from br_multicast plus single list
10  * pointer as suggested by Josh Triplett
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16
17 #include <linux/atomic.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/log2.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/mm.h>
25 #include <linux/jhash.h>
26 #include <linux/random.h>
27 #include <linux/rhashtable.h>
28 #include <linux/err.h>
29 #include <linux/export.h>
30
31 #define HASH_DEFAULT_SIZE       64UL
32 #define HASH_MIN_SIZE           4U
33 #define BUCKET_LOCKS_PER_CPU   128UL
34
35 static u32 head_hashfn(struct rhashtable *ht,
36                        const struct bucket_table *tbl,
37                        const struct rhash_head *he)
38 {
39         return rht_head_hashfn(ht, tbl, he, ht->p);
40 }
41
42 #ifdef CONFIG_PROVE_LOCKING
43 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
44
45 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
46 {
47         return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
48 }
49 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
50
51 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
52 {
53         spinlock_t *lock = rht_bucket_lock(tbl, hash);
54
55         return (debug_locks) ? lockdep_is_held(lock) : 1;
56 }
57 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
58 #else
59 #define ASSERT_RHT_MUTEX(HT)
60 #endif
61
62
63 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
64                               gfp_t gfp)
65 {
66         unsigned int i, size;
67 #if defined(CONFIG_PROVE_LOCKING)
68         unsigned int nr_pcpus = 2;
69 #else
70         unsigned int nr_pcpus = num_possible_cpus();
71 #endif
72
73         nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
74         size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
75
76         /* Never allocate more than 0.5 locks per bucket */
77         size = min_t(unsigned int, size, tbl->size >> 1);
78
79         if (sizeof(spinlock_t) != 0) {
80 #ifdef CONFIG_NUMA
81                 if (size * sizeof(spinlock_t) > PAGE_SIZE &&
82                     gfp == GFP_KERNEL)
83                         tbl->locks = vmalloc(size * sizeof(spinlock_t));
84                 else
85 #endif
86                 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
87                                            gfp);
88                 if (!tbl->locks)
89                         return -ENOMEM;
90                 for (i = 0; i < size; i++)
91                         spin_lock_init(&tbl->locks[i]);
92         }
93         tbl->locks_mask = size - 1;
94
95         return 0;
96 }
97
98 static void bucket_table_free(const struct bucket_table *tbl)
99 {
100         if (tbl)
101                 kvfree(tbl->locks);
102
103         kvfree(tbl);
104 }
105
106 static void bucket_table_free_rcu(struct rcu_head *head)
107 {
108         bucket_table_free(container_of(head, struct bucket_table, rcu));
109 }
110
111 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
112                                                size_t nbuckets,
113                                                gfp_t gfp)
114 {
115         struct bucket_table *tbl = NULL;
116         size_t size;
117         int i;
118
119         size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
120         if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
121             gfp != GFP_KERNEL)
122                 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
123         if (tbl == NULL && gfp == GFP_KERNEL)
124                 tbl = vzalloc(size);
125         if (tbl == NULL)
126                 return NULL;
127
128         tbl->size = nbuckets;
129
130         if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
131                 bucket_table_free(tbl);
132                 return NULL;
133         }
134
135         INIT_LIST_HEAD(&tbl->walkers);
136
137         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
138
139         for (i = 0; i < nbuckets; i++)
140                 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
141
142         return tbl;
143 }
144
145 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
146                                                   struct bucket_table *tbl)
147 {
148         struct bucket_table *new_tbl;
149
150         do {
151                 new_tbl = tbl;
152                 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
153         } while (tbl);
154
155         return new_tbl;
156 }
157
158 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
159 {
160         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
161         struct bucket_table *new_tbl = rhashtable_last_table(ht,
162                 rht_dereference_rcu(old_tbl->future_tbl, ht));
163         struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
164         int err = -ENOENT;
165         struct rhash_head *head, *next, *entry;
166         spinlock_t *new_bucket_lock;
167         unsigned int new_hash;
168
169         rht_for_each(entry, old_tbl, old_hash) {
170                 err = 0;
171                 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
172
173                 if (rht_is_a_nulls(next))
174                         break;
175
176                 pprev = &entry->next;
177         }
178
179         if (err)
180                 goto out;
181
182         new_hash = head_hashfn(ht, new_tbl, entry);
183
184         new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
185
186         spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
187         head = rht_dereference_bucket(new_tbl->buckets[new_hash],
188                                       new_tbl, new_hash);
189
190         RCU_INIT_POINTER(entry->next, head);
191
192         rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
193         spin_unlock(new_bucket_lock);
194
195         rcu_assign_pointer(*pprev, next);
196
197 out:
198         return err;
199 }
200
201 static void rhashtable_rehash_chain(struct rhashtable *ht,
202                                     unsigned int old_hash)
203 {
204         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
205         spinlock_t *old_bucket_lock;
206
207         old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
208
209         spin_lock_bh(old_bucket_lock);
210         while (!rhashtable_rehash_one(ht, old_hash))
211                 ;
212         old_tbl->rehash++;
213         spin_unlock_bh(old_bucket_lock);
214 }
215
216 static int rhashtable_rehash_attach(struct rhashtable *ht,
217                                     struct bucket_table *old_tbl,
218                                     struct bucket_table *new_tbl)
219 {
220         /* Protect future_tbl using the first bucket lock. */
221         spin_lock_bh(old_tbl->locks);
222
223         /* Did somebody beat us to it? */
224         if (rcu_access_pointer(old_tbl->future_tbl)) {
225                 spin_unlock_bh(old_tbl->locks);
226                 return -EEXIST;
227         }
228
229         /* Make insertions go into the new, empty table right away. Deletions
230          * and lookups will be attempted in both tables until we synchronize.
231          */
232         rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
233
234         /* Ensure the new table is visible to readers. */
235         smp_wmb();
236
237         spin_unlock_bh(old_tbl->locks);
238
239         return 0;
240 }
241
242 static int rhashtable_rehash_table(struct rhashtable *ht)
243 {
244         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
245         struct bucket_table *new_tbl;
246         struct rhashtable_walker *walker;
247         unsigned int old_hash;
248
249         new_tbl = rht_dereference(old_tbl->future_tbl, ht);
250         if (!new_tbl)
251                 return 0;
252
253         for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
254                 rhashtable_rehash_chain(ht, old_hash);
255
256         /* Publish the new table pointer. */
257         rcu_assign_pointer(ht->tbl, new_tbl);
258
259         spin_lock(&ht->lock);
260         list_for_each_entry(walker, &old_tbl->walkers, list)
261                 walker->tbl = NULL;
262         spin_unlock(&ht->lock);
263
264         /* Wait for readers. All new readers will see the new
265          * table, and thus no references to the old table will
266          * remain.
267          */
268         call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
269
270         return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
271 }
272
273 /**
274  * rhashtable_expand - Expand hash table while allowing concurrent lookups
275  * @ht:         the hash table to expand
276  *
277  * A secondary bucket array is allocated and the hash entries are migrated.
278  *
279  * This function may only be called in a context where it is safe to call
280  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
281  *
282  * The caller must ensure that no concurrent resizing occurs by holding
283  * ht->mutex.
284  *
285  * It is valid to have concurrent insertions and deletions protected by per
286  * bucket locks or concurrent RCU protected lookups and traversals.
287  */
288 static int rhashtable_expand(struct rhashtable *ht)
289 {
290         struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
291         int err;
292
293         ASSERT_RHT_MUTEX(ht);
294
295         old_tbl = rhashtable_last_table(ht, old_tbl);
296
297         new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
298         if (new_tbl == NULL)
299                 return -ENOMEM;
300
301         err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
302         if (err)
303                 bucket_table_free(new_tbl);
304
305         return err;
306 }
307
308 /**
309  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
310  * @ht:         the hash table to shrink
311  *
312  * This function shrinks the hash table to fit, i.e., the smallest
313  * size would not cause it to expand right away automatically.
314  *
315  * The caller must ensure that no concurrent resizing occurs by holding
316  * ht->mutex.
317  *
318  * The caller must ensure that no concurrent table mutations take place.
319  * It is however valid to have concurrent lookups if they are RCU protected.
320  *
321  * It is valid to have concurrent insertions and deletions protected by per
322  * bucket locks or concurrent RCU protected lookups and traversals.
323  */
324 static int rhashtable_shrink(struct rhashtable *ht)
325 {
326         struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
327         unsigned int size;
328         int err;
329
330         ASSERT_RHT_MUTEX(ht);
331
332         size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
333         if (size < ht->p.min_size)
334                 size = ht->p.min_size;
335
336         if (old_tbl->size <= size)
337                 return 0;
338
339         if (rht_dereference(old_tbl->future_tbl, ht))
340                 return -EEXIST;
341
342         new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
343         if (new_tbl == NULL)
344                 return -ENOMEM;
345
346         err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
347         if (err)
348                 bucket_table_free(new_tbl);
349
350         return err;
351 }
352
353 static void rht_deferred_worker(struct work_struct *work)
354 {
355         struct rhashtable *ht;
356         struct bucket_table *tbl;
357         int err = 0;
358
359         ht = container_of(work, struct rhashtable, run_work);
360         mutex_lock(&ht->mutex);
361
362         tbl = rht_dereference(ht->tbl, ht);
363         tbl = rhashtable_last_table(ht, tbl);
364
365         if (rht_grow_above_75(ht, tbl))
366                 rhashtable_expand(ht);
367         else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
368                 rhashtable_shrink(ht);
369
370         err = rhashtable_rehash_table(ht);
371
372         mutex_unlock(&ht->mutex);
373
374         if (err)
375                 schedule_work(&ht->run_work);
376 }
377
378 static bool rhashtable_check_elasticity(struct rhashtable *ht,
379                                         struct bucket_table *tbl,
380                                         unsigned int hash)
381 {
382         unsigned int elasticity = ht->elasticity;
383         struct rhash_head *head;
384
385         rht_for_each(head, tbl, hash)
386                 if (!--elasticity)
387                         return true;
388
389         return false;
390 }
391
392 int rhashtable_insert_rehash(struct rhashtable *ht)
393 {
394         struct bucket_table *old_tbl;
395         struct bucket_table *new_tbl;
396         struct bucket_table *tbl;
397         unsigned int size;
398         int err;
399
400         old_tbl = rht_dereference_rcu(ht->tbl, ht);
401         tbl = rhashtable_last_table(ht, old_tbl);
402
403         size = tbl->size;
404
405         if (rht_grow_above_75(ht, tbl))
406                 size *= 2;
407         /* Do not schedule more than one rehash */
408         else if (old_tbl != tbl)
409                 return -EBUSY;
410
411         new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
412         if (new_tbl == NULL) {
413                 /* Schedule async resize/rehash to try allocation
414                  * non-atomic context.
415                  */
416                 schedule_work(&ht->run_work);
417                 return -ENOMEM;
418         }
419
420         err = rhashtable_rehash_attach(ht, tbl, new_tbl);
421         if (err) {
422                 bucket_table_free(new_tbl);
423                 if (err == -EEXIST)
424                         err = 0;
425         } else
426                 schedule_work(&ht->run_work);
427
428         return err;
429 }
430 EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
431
432 int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
433                            struct rhash_head *obj,
434                            struct bucket_table *tbl)
435 {
436         struct rhash_head *head;
437         unsigned int hash;
438         int err;
439
440         tbl = rhashtable_last_table(ht, tbl);
441         hash = head_hashfn(ht, tbl, obj);
442         spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
443
444         err = -EEXIST;
445         if (key && rhashtable_lookup_fast(ht, key, ht->p))
446                 goto exit;
447
448         err = -E2BIG;
449         if (unlikely(rht_grow_above_max(ht, tbl)))
450                 goto exit;
451
452         err = -EAGAIN;
453         if (rhashtable_check_elasticity(ht, tbl, hash) ||
454             rht_grow_above_100(ht, tbl))
455                 goto exit;
456
457         err = 0;
458
459         head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
460
461         RCU_INIT_POINTER(obj->next, head);
462
463         rcu_assign_pointer(tbl->buckets[hash], obj);
464
465         atomic_inc(&ht->nelems);
466
467 exit:
468         spin_unlock(rht_bucket_lock(tbl, hash));
469
470         return err;
471 }
472 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
473
474 /**
475  * rhashtable_walk_init - Initialise an iterator
476  * @ht:         Table to walk over
477  * @iter:       Hash table Iterator
478  *
479  * This function prepares a hash table walk.
480  *
481  * Note that if you restart a walk after rhashtable_walk_stop you
482  * may see the same object twice.  Also, you may miss objects if
483  * there are removals in between rhashtable_walk_stop and the next
484  * call to rhashtable_walk_start.
485  *
486  * For a completely stable walk you should construct your own data
487  * structure outside the hash table.
488  *
489  * This function may sleep so you must not call it from interrupt
490  * context or with spin locks held.
491  *
492  * You must call rhashtable_walk_exit if this function returns
493  * successfully.
494  */
495 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
496 {
497         iter->ht = ht;
498         iter->p = NULL;
499         iter->slot = 0;
500         iter->skip = 0;
501
502         iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
503         if (!iter->walker)
504                 return -ENOMEM;
505
506         mutex_lock(&ht->mutex);
507         iter->walker->tbl = rht_dereference(ht->tbl, ht);
508         list_add(&iter->walker->list, &iter->walker->tbl->walkers);
509         mutex_unlock(&ht->mutex);
510
511         return 0;
512 }
513 EXPORT_SYMBOL_GPL(rhashtable_walk_init);
514
515 /**
516  * rhashtable_walk_exit - Free an iterator
517  * @iter:       Hash table Iterator
518  *
519  * This function frees resources allocated by rhashtable_walk_init.
520  */
521 void rhashtable_walk_exit(struct rhashtable_iter *iter)
522 {
523         mutex_lock(&iter->ht->mutex);
524         if (iter->walker->tbl)
525                 list_del(&iter->walker->list);
526         mutex_unlock(&iter->ht->mutex);
527         kfree(iter->walker);
528 }
529 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
530
531 /**
532  * rhashtable_walk_start - Start a hash table walk
533  * @iter:       Hash table iterator
534  *
535  * Start a hash table walk.  Note that we take the RCU lock in all
536  * cases including when we return an error.  So you must always call
537  * rhashtable_walk_stop to clean up.
538  *
539  * Returns zero if successful.
540  *
541  * Returns -EAGAIN if resize event occured.  Note that the iterator
542  * will rewind back to the beginning and you may use it immediately
543  * by calling rhashtable_walk_next.
544  */
545 int rhashtable_walk_start(struct rhashtable_iter *iter)
546         __acquires(RCU)
547 {
548         struct rhashtable *ht = iter->ht;
549
550         mutex_lock(&ht->mutex);
551
552         if (iter->walker->tbl)
553                 list_del(&iter->walker->list);
554
555         rcu_read_lock();
556
557         mutex_unlock(&ht->mutex);
558
559         if (!iter->walker->tbl) {
560                 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
561                 return -EAGAIN;
562         }
563
564         return 0;
565 }
566 EXPORT_SYMBOL_GPL(rhashtable_walk_start);
567
568 /**
569  * rhashtable_walk_next - Return the next object and advance the iterator
570  * @iter:       Hash table iterator
571  *
572  * Note that you must call rhashtable_walk_stop when you are finished
573  * with the walk.
574  *
575  * Returns the next object or NULL when the end of the table is reached.
576  *
577  * Returns -EAGAIN if resize event occured.  Note that the iterator
578  * will rewind back to the beginning and you may continue to use it.
579  */
580 void *rhashtable_walk_next(struct rhashtable_iter *iter)
581 {
582         struct bucket_table *tbl = iter->walker->tbl;
583         struct rhashtable *ht = iter->ht;
584         struct rhash_head *p = iter->p;
585
586         if (p) {
587                 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
588                 goto next;
589         }
590
591         for (; iter->slot < tbl->size; iter->slot++) {
592                 int skip = iter->skip;
593
594                 rht_for_each_rcu(p, tbl, iter->slot) {
595                         if (!skip)
596                                 break;
597                         skip--;
598                 }
599
600 next:
601                 if (!rht_is_a_nulls(p)) {
602                         iter->skip++;
603                         iter->p = p;
604                         return rht_obj(ht, p);
605                 }
606
607                 iter->skip = 0;
608         }
609
610         iter->p = NULL;
611
612         /* Ensure we see any new tables. */
613         smp_rmb();
614
615         iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
616         if (iter->walker->tbl) {
617                 iter->slot = 0;
618                 iter->skip = 0;
619                 return ERR_PTR(-EAGAIN);
620         }
621
622         return NULL;
623 }
624 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
625
626 /**
627  * rhashtable_walk_stop - Finish a hash table walk
628  * @iter:       Hash table iterator
629  *
630  * Finish a hash table walk.
631  */
632 void rhashtable_walk_stop(struct rhashtable_iter *iter)
633         __releases(RCU)
634 {
635         struct rhashtable *ht;
636         struct bucket_table *tbl = iter->walker->tbl;
637
638         if (!tbl)
639                 goto out;
640
641         ht = iter->ht;
642
643         spin_lock(&ht->lock);
644         if (tbl->rehash < tbl->size)
645                 list_add(&iter->walker->list, &tbl->walkers);
646         else
647                 iter->walker->tbl = NULL;
648         spin_unlock(&ht->lock);
649
650         iter->p = NULL;
651
652 out:
653         rcu_read_unlock();
654 }
655 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
656
657 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
658 {
659         return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
660                    (unsigned long)params->min_size);
661 }
662
663 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
664 {
665         return jhash2(key, length, seed);
666 }
667
668 /**
669  * rhashtable_init - initialize a new hash table
670  * @ht:         hash table to be initialized
671  * @params:     configuration parameters
672  *
673  * Initializes a new hash table based on the provided configuration
674  * parameters. A table can be configured either with a variable or
675  * fixed length key:
676  *
677  * Configuration Example 1: Fixed length keys
678  * struct test_obj {
679  *      int                     key;
680  *      void *                  my_member;
681  *      struct rhash_head       node;
682  * };
683  *
684  * struct rhashtable_params params = {
685  *      .head_offset = offsetof(struct test_obj, node),
686  *      .key_offset = offsetof(struct test_obj, key),
687  *      .key_len = sizeof(int),
688  *      .hashfn = jhash,
689  *      .nulls_base = (1U << RHT_BASE_SHIFT),
690  * };
691  *
692  * Configuration Example 2: Variable length keys
693  * struct test_obj {
694  *      [...]
695  *      struct rhash_head       node;
696  * };
697  *
698  * u32 my_hash_fn(const void *data, u32 len, u32 seed)
699  * {
700  *      struct test_obj *obj = data;
701  *
702  *      return [... hash ...];
703  * }
704  *
705  * struct rhashtable_params params = {
706  *      .head_offset = offsetof(struct test_obj, node),
707  *      .hashfn = jhash,
708  *      .obj_hashfn = my_hash_fn,
709  * };
710  */
711 int rhashtable_init(struct rhashtable *ht,
712                     const struct rhashtable_params *params)
713 {
714         struct bucket_table *tbl;
715         size_t size;
716
717         size = HASH_DEFAULT_SIZE;
718
719         if ((!params->key_len && !params->obj_hashfn) ||
720             (params->obj_hashfn && !params->obj_cmpfn))
721                 return -EINVAL;
722
723         if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
724                 return -EINVAL;
725
726         if (params->nelem_hint)
727                 size = rounded_hashtable_size(params);
728
729         memset(ht, 0, sizeof(*ht));
730         mutex_init(&ht->mutex);
731         spin_lock_init(&ht->lock);
732         memcpy(&ht->p, params, sizeof(*params));
733
734         if (params->min_size)
735                 ht->p.min_size = roundup_pow_of_two(params->min_size);
736
737         if (params->max_size)
738                 ht->p.max_size = rounddown_pow_of_two(params->max_size);
739
740         if (params->insecure_max_entries)
741                 ht->p.insecure_max_entries =
742                         rounddown_pow_of_two(params->insecure_max_entries);
743         else
744                 ht->p.insecure_max_entries = ht->p.max_size * 2;
745
746         ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
747
748         /* The maximum (not average) chain length grows with the
749          * size of the hash table, at a rate of (log N)/(log log N).
750          * The value of 16 is selected so that even if the hash
751          * table grew to 2^32 you would not expect the maximum
752          * chain length to exceed it unless we are under attack
753          * (or extremely unlucky).
754          *
755          * As this limit is only to detect attacks, we don't need
756          * to set it to a lower value as you'd need the chain
757          * length to vastly exceed 16 to have any real effect
758          * on the system.
759          */
760         if (!params->insecure_elasticity)
761                 ht->elasticity = 16;
762
763         if (params->locks_mul)
764                 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
765         else
766                 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
767
768         ht->key_len = ht->p.key_len;
769         if (!params->hashfn) {
770                 ht->p.hashfn = jhash;
771
772                 if (!(ht->key_len & (sizeof(u32) - 1))) {
773                         ht->key_len /= sizeof(u32);
774                         ht->p.hashfn = rhashtable_jhash2;
775                 }
776         }
777
778         tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
779         if (tbl == NULL)
780                 return -ENOMEM;
781
782         atomic_set(&ht->nelems, 0);
783
784         RCU_INIT_POINTER(ht->tbl, tbl);
785
786         INIT_WORK(&ht->run_work, rht_deferred_worker);
787
788         return 0;
789 }
790 EXPORT_SYMBOL_GPL(rhashtable_init);
791
792 /**
793  * rhashtable_free_and_destroy - free elements and destroy hash table
794  * @ht:         the hash table to destroy
795  * @free_fn:    callback to release resources of element
796  * @arg:        pointer passed to free_fn
797  *
798  * Stops an eventual async resize. If defined, invokes free_fn for each
799  * element to releasal resources. Please note that RCU protected
800  * readers may still be accessing the elements. Releasing of resources
801  * must occur in a compatible manner. Then frees the bucket array.
802  *
803  * This function will eventually sleep to wait for an async resize
804  * to complete. The caller is responsible that no further write operations
805  * occurs in parallel.
806  */
807 void rhashtable_free_and_destroy(struct rhashtable *ht,
808                                  void (*free_fn)(void *ptr, void *arg),
809                                  void *arg)
810 {
811         const struct bucket_table *tbl;
812         unsigned int i;
813
814         cancel_work_sync(&ht->run_work);
815
816         mutex_lock(&ht->mutex);
817         tbl = rht_dereference(ht->tbl, ht);
818         if (free_fn) {
819                 for (i = 0; i < tbl->size; i++) {
820                         struct rhash_head *pos, *next;
821
822                         for (pos = rht_dereference(tbl->buckets[i], ht),
823                              next = !rht_is_a_nulls(pos) ?
824                                         rht_dereference(pos->next, ht) : NULL;
825                              !rht_is_a_nulls(pos);
826                              pos = next,
827                              next = !rht_is_a_nulls(pos) ?
828                                         rht_dereference(pos->next, ht) : NULL)
829                                 free_fn(rht_obj(ht, pos), arg);
830                 }
831         }
832
833         bucket_table_free(tbl);
834         mutex_unlock(&ht->mutex);
835 }
836 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
837
838 void rhashtable_destroy(struct rhashtable *ht)
839 {
840         return rhashtable_free_and_destroy(ht, NULL, NULL);
841 }
842 EXPORT_SYMBOL_GPL(rhashtable_destroy);