]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - lib/rhashtable.c
kernel/module.c: use generic module param operaters for sig_enforce
[karo-tx-linux.git] / lib / rhashtable.c
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7  *
8  * Code partially derived from nft_hash
9  * Rewritten with rehash code from br_multicast plus single list
10  * pointer as suggested by Josh Triplett
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16
17 #include <linux/atomic.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/log2.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/mm.h>
25 #include <linux/jhash.h>
26 #include <linux/random.h>
27 #include <linux/rhashtable.h>
28 #include <linux/err.h>
29
30 #define HASH_DEFAULT_SIZE       64UL
31 #define HASH_MIN_SIZE           4U
32 #define BUCKET_LOCKS_PER_CPU   128UL
33
34 static u32 head_hashfn(struct rhashtable *ht,
35                        const struct bucket_table *tbl,
36                        const struct rhash_head *he)
37 {
38         return rht_head_hashfn(ht, tbl, he, ht->p);
39 }
40
41 #ifdef CONFIG_PROVE_LOCKING
42 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
43
44 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
45 {
46         return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
47 }
48 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
49
50 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
51 {
52         spinlock_t *lock = rht_bucket_lock(tbl, hash);
53
54         return (debug_locks) ? lockdep_is_held(lock) : 1;
55 }
56 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
57 #else
58 #define ASSERT_RHT_MUTEX(HT)
59 #endif
60
61
62 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
63                               gfp_t gfp)
64 {
65         unsigned int i, size;
66 #if defined(CONFIG_PROVE_LOCKING)
67         unsigned int nr_pcpus = 2;
68 #else
69         unsigned int nr_pcpus = num_possible_cpus();
70 #endif
71
72         nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
73         size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
74
75         /* Never allocate more than 0.5 locks per bucket */
76         size = min_t(unsigned int, size, tbl->size >> 1);
77
78         if (sizeof(spinlock_t) != 0) {
79 #ifdef CONFIG_NUMA
80                 if (size * sizeof(spinlock_t) > PAGE_SIZE &&
81                     gfp == GFP_KERNEL)
82                         tbl->locks = vmalloc(size * sizeof(spinlock_t));
83                 else
84 #endif
85                 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
86                                            gfp);
87                 if (!tbl->locks)
88                         return -ENOMEM;
89                 for (i = 0; i < size; i++)
90                         spin_lock_init(&tbl->locks[i]);
91         }
92         tbl->locks_mask = size - 1;
93
94         return 0;
95 }
96
97 static void bucket_table_free(const struct bucket_table *tbl)
98 {
99         if (tbl)
100                 kvfree(tbl->locks);
101
102         kvfree(tbl);
103 }
104
105 static void bucket_table_free_rcu(struct rcu_head *head)
106 {
107         bucket_table_free(container_of(head, struct bucket_table, rcu));
108 }
109
110 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
111                                                size_t nbuckets,
112                                                gfp_t gfp)
113 {
114         struct bucket_table *tbl = NULL;
115         size_t size;
116         int i;
117
118         size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
119         if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
120             gfp != GFP_KERNEL)
121                 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
122         if (tbl == NULL && gfp == GFP_KERNEL)
123                 tbl = vzalloc(size);
124         if (tbl == NULL)
125                 return NULL;
126
127         tbl->size = nbuckets;
128
129         if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
130                 bucket_table_free(tbl);
131                 return NULL;
132         }
133
134         INIT_LIST_HEAD(&tbl->walkers);
135
136         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
137
138         for (i = 0; i < nbuckets; i++)
139                 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
140
141         return tbl;
142 }
143
144 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
145                                                   struct bucket_table *tbl)
146 {
147         struct bucket_table *new_tbl;
148
149         do {
150                 new_tbl = tbl;
151                 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
152         } while (tbl);
153
154         return new_tbl;
155 }
156
157 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
158 {
159         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
160         struct bucket_table *new_tbl = rhashtable_last_table(ht,
161                 rht_dereference_rcu(old_tbl->future_tbl, ht));
162         struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
163         int err = -ENOENT;
164         struct rhash_head *head, *next, *entry;
165         spinlock_t *new_bucket_lock;
166         unsigned int new_hash;
167
168         rht_for_each(entry, old_tbl, old_hash) {
169                 err = 0;
170                 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
171
172                 if (rht_is_a_nulls(next))
173                         break;
174
175                 pprev = &entry->next;
176         }
177
178         if (err)
179                 goto out;
180
181         new_hash = head_hashfn(ht, new_tbl, entry);
182
183         new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
184
185         spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
186         head = rht_dereference_bucket(new_tbl->buckets[new_hash],
187                                       new_tbl, new_hash);
188
189         if (rht_is_a_nulls(head))
190                 INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
191         else
192                 RCU_INIT_POINTER(entry->next, head);
193
194         rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
195         spin_unlock(new_bucket_lock);
196
197         rcu_assign_pointer(*pprev, next);
198
199 out:
200         return err;
201 }
202
203 static void rhashtable_rehash_chain(struct rhashtable *ht,
204                                     unsigned int old_hash)
205 {
206         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
207         spinlock_t *old_bucket_lock;
208
209         old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
210
211         spin_lock_bh(old_bucket_lock);
212         while (!rhashtable_rehash_one(ht, old_hash))
213                 ;
214         old_tbl->rehash++;
215         spin_unlock_bh(old_bucket_lock);
216 }
217
218 static int rhashtable_rehash_attach(struct rhashtable *ht,
219                                     struct bucket_table *old_tbl,
220                                     struct bucket_table *new_tbl)
221 {
222         /* Protect future_tbl using the first bucket lock. */
223         spin_lock_bh(old_tbl->locks);
224
225         /* Did somebody beat us to it? */
226         if (rcu_access_pointer(old_tbl->future_tbl)) {
227                 spin_unlock_bh(old_tbl->locks);
228                 return -EEXIST;
229         }
230
231         /* Make insertions go into the new, empty table right away. Deletions
232          * and lookups will be attempted in both tables until we synchronize.
233          */
234         rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
235
236         /* Ensure the new table is visible to readers. */
237         smp_wmb();
238
239         spin_unlock_bh(old_tbl->locks);
240
241         return 0;
242 }
243
244 static int rhashtable_rehash_table(struct rhashtable *ht)
245 {
246         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
247         struct bucket_table *new_tbl;
248         struct rhashtable_walker *walker;
249         unsigned int old_hash;
250
251         new_tbl = rht_dereference(old_tbl->future_tbl, ht);
252         if (!new_tbl)
253                 return 0;
254
255         for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
256                 rhashtable_rehash_chain(ht, old_hash);
257
258         /* Publish the new table pointer. */
259         rcu_assign_pointer(ht->tbl, new_tbl);
260
261         spin_lock(&ht->lock);
262         list_for_each_entry(walker, &old_tbl->walkers, list)
263                 walker->tbl = NULL;
264         spin_unlock(&ht->lock);
265
266         /* Wait for readers. All new readers will see the new
267          * table, and thus no references to the old table will
268          * remain.
269          */
270         call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
271
272         return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
273 }
274
275 /**
276  * rhashtable_expand - Expand hash table while allowing concurrent lookups
277  * @ht:         the hash table to expand
278  *
279  * A secondary bucket array is allocated and the hash entries are migrated.
280  *
281  * This function may only be called in a context where it is safe to call
282  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
283  *
284  * The caller must ensure that no concurrent resizing occurs by holding
285  * ht->mutex.
286  *
287  * It is valid to have concurrent insertions and deletions protected by per
288  * bucket locks or concurrent RCU protected lookups and traversals.
289  */
290 static int rhashtable_expand(struct rhashtable *ht)
291 {
292         struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
293         int err;
294
295         ASSERT_RHT_MUTEX(ht);
296
297         old_tbl = rhashtable_last_table(ht, old_tbl);
298
299         new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
300         if (new_tbl == NULL)
301                 return -ENOMEM;
302
303         err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
304         if (err)
305                 bucket_table_free(new_tbl);
306
307         return err;
308 }
309
310 /**
311  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
312  * @ht:         the hash table to shrink
313  *
314  * This function shrinks the hash table to fit, i.e., the smallest
315  * size would not cause it to expand right away automatically.
316  *
317  * The caller must ensure that no concurrent resizing occurs by holding
318  * ht->mutex.
319  *
320  * The caller must ensure that no concurrent table mutations take place.
321  * It is however valid to have concurrent lookups if they are RCU protected.
322  *
323  * It is valid to have concurrent insertions and deletions protected by per
324  * bucket locks or concurrent RCU protected lookups and traversals.
325  */
326 static int rhashtable_shrink(struct rhashtable *ht)
327 {
328         struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
329         unsigned int size;
330         int err;
331
332         ASSERT_RHT_MUTEX(ht);
333
334         size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
335         if (size < ht->p.min_size)
336                 size = ht->p.min_size;
337
338         if (old_tbl->size <= size)
339                 return 0;
340
341         if (rht_dereference(old_tbl->future_tbl, ht))
342                 return -EEXIST;
343
344         new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
345         if (new_tbl == NULL)
346                 return -ENOMEM;
347
348         err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
349         if (err)
350                 bucket_table_free(new_tbl);
351
352         return err;
353 }
354
355 static void rht_deferred_worker(struct work_struct *work)
356 {
357         struct rhashtable *ht;
358         struct bucket_table *tbl;
359         int err = 0;
360
361         ht = container_of(work, struct rhashtable, run_work);
362         mutex_lock(&ht->mutex);
363
364         tbl = rht_dereference(ht->tbl, ht);
365         tbl = rhashtable_last_table(ht, tbl);
366
367         if (rht_grow_above_75(ht, tbl))
368                 rhashtable_expand(ht);
369         else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
370                 rhashtable_shrink(ht);
371
372         err = rhashtable_rehash_table(ht);
373
374         mutex_unlock(&ht->mutex);
375
376         if (err)
377                 schedule_work(&ht->run_work);
378 }
379
380 static bool rhashtable_check_elasticity(struct rhashtable *ht,
381                                         struct bucket_table *tbl,
382                                         unsigned int hash)
383 {
384         unsigned int elasticity = ht->elasticity;
385         struct rhash_head *head;
386
387         rht_for_each(head, tbl, hash)
388                 if (!--elasticity)
389                         return true;
390
391         return false;
392 }
393
394 int rhashtable_insert_rehash(struct rhashtable *ht)
395 {
396         struct bucket_table *old_tbl;
397         struct bucket_table *new_tbl;
398         struct bucket_table *tbl;
399         unsigned int size;
400         int err;
401
402         old_tbl = rht_dereference_rcu(ht->tbl, ht);
403         tbl = rhashtable_last_table(ht, old_tbl);
404
405         size = tbl->size;
406
407         if (rht_grow_above_75(ht, tbl))
408                 size *= 2;
409         /* Do not schedule more than one rehash */
410         else if (old_tbl != tbl)
411                 return -EBUSY;
412
413         new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
414         if (new_tbl == NULL) {
415                 /* Schedule async resize/rehash to try allocation
416                  * non-atomic context.
417                  */
418                 schedule_work(&ht->run_work);
419                 return -ENOMEM;
420         }
421
422         err = rhashtable_rehash_attach(ht, tbl, new_tbl);
423         if (err) {
424                 bucket_table_free(new_tbl);
425                 if (err == -EEXIST)
426                         err = 0;
427         } else
428                 schedule_work(&ht->run_work);
429
430         return err;
431 }
432 EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
433
434 int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
435                            struct rhash_head *obj,
436                            struct bucket_table *tbl)
437 {
438         struct rhash_head *head;
439         unsigned int hash;
440         int err;
441
442         tbl = rhashtable_last_table(ht, tbl);
443         hash = head_hashfn(ht, tbl, obj);
444         spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
445
446         err = -EEXIST;
447         if (key && rhashtable_lookup_fast(ht, key, ht->p))
448                 goto exit;
449
450         err = -E2BIG;
451         if (unlikely(rht_grow_above_max(ht, tbl)))
452                 goto exit;
453
454         err = -EAGAIN;
455         if (rhashtable_check_elasticity(ht, tbl, hash) ||
456             rht_grow_above_100(ht, tbl))
457                 goto exit;
458
459         err = 0;
460
461         head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
462
463         RCU_INIT_POINTER(obj->next, head);
464
465         rcu_assign_pointer(tbl->buckets[hash], obj);
466
467         atomic_inc(&ht->nelems);
468
469 exit:
470         spin_unlock(rht_bucket_lock(tbl, hash));
471
472         return err;
473 }
474 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
475
476 /**
477  * rhashtable_walk_init - Initialise an iterator
478  * @ht:         Table to walk over
479  * @iter:       Hash table Iterator
480  *
481  * This function prepares a hash table walk.
482  *
483  * Note that if you restart a walk after rhashtable_walk_stop you
484  * may see the same object twice.  Also, you may miss objects if
485  * there are removals in between rhashtable_walk_stop and the next
486  * call to rhashtable_walk_start.
487  *
488  * For a completely stable walk you should construct your own data
489  * structure outside the hash table.
490  *
491  * This function may sleep so you must not call it from interrupt
492  * context or with spin locks held.
493  *
494  * You must call rhashtable_walk_exit if this function returns
495  * successfully.
496  */
497 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
498 {
499         iter->ht = ht;
500         iter->p = NULL;
501         iter->slot = 0;
502         iter->skip = 0;
503
504         iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
505         if (!iter->walker)
506                 return -ENOMEM;
507
508         mutex_lock(&ht->mutex);
509         iter->walker->tbl = rht_dereference(ht->tbl, ht);
510         list_add(&iter->walker->list, &iter->walker->tbl->walkers);
511         mutex_unlock(&ht->mutex);
512
513         return 0;
514 }
515 EXPORT_SYMBOL_GPL(rhashtable_walk_init);
516
517 /**
518  * rhashtable_walk_exit - Free an iterator
519  * @iter:       Hash table Iterator
520  *
521  * This function frees resources allocated by rhashtable_walk_init.
522  */
523 void rhashtable_walk_exit(struct rhashtable_iter *iter)
524 {
525         mutex_lock(&iter->ht->mutex);
526         if (iter->walker->tbl)
527                 list_del(&iter->walker->list);
528         mutex_unlock(&iter->ht->mutex);
529         kfree(iter->walker);
530 }
531 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
532
533 /**
534  * rhashtable_walk_start - Start a hash table walk
535  * @iter:       Hash table iterator
536  *
537  * Start a hash table walk.  Note that we take the RCU lock in all
538  * cases including when we return an error.  So you must always call
539  * rhashtable_walk_stop to clean up.
540  *
541  * Returns zero if successful.
542  *
543  * Returns -EAGAIN if resize event occured.  Note that the iterator
544  * will rewind back to the beginning and you may use it immediately
545  * by calling rhashtable_walk_next.
546  */
547 int rhashtable_walk_start(struct rhashtable_iter *iter)
548         __acquires(RCU)
549 {
550         struct rhashtable *ht = iter->ht;
551
552         mutex_lock(&ht->mutex);
553
554         if (iter->walker->tbl)
555                 list_del(&iter->walker->list);
556
557         rcu_read_lock();
558
559         mutex_unlock(&ht->mutex);
560
561         if (!iter->walker->tbl) {
562                 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
563                 return -EAGAIN;
564         }
565
566         return 0;
567 }
568 EXPORT_SYMBOL_GPL(rhashtable_walk_start);
569
570 /**
571  * rhashtable_walk_next - Return the next object and advance the iterator
572  * @iter:       Hash table iterator
573  *
574  * Note that you must call rhashtable_walk_stop when you are finished
575  * with the walk.
576  *
577  * Returns the next object or NULL when the end of the table is reached.
578  *
579  * Returns -EAGAIN if resize event occured.  Note that the iterator
580  * will rewind back to the beginning and you may continue to use it.
581  */
582 void *rhashtable_walk_next(struct rhashtable_iter *iter)
583 {
584         struct bucket_table *tbl = iter->walker->tbl;
585         struct rhashtable *ht = iter->ht;
586         struct rhash_head *p = iter->p;
587         void *obj = NULL;
588
589         if (p) {
590                 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
591                 goto next;
592         }
593
594         for (; iter->slot < tbl->size; iter->slot++) {
595                 int skip = iter->skip;
596
597                 rht_for_each_rcu(p, tbl, iter->slot) {
598                         if (!skip)
599                                 break;
600                         skip--;
601                 }
602
603 next:
604                 if (!rht_is_a_nulls(p)) {
605                         iter->skip++;
606                         iter->p = p;
607                         obj = rht_obj(ht, p);
608                         goto out;
609                 }
610
611                 iter->skip = 0;
612         }
613
614         /* Ensure we see any new tables. */
615         smp_rmb();
616
617         iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
618         if (iter->walker->tbl) {
619                 iter->slot = 0;
620                 iter->skip = 0;
621                 return ERR_PTR(-EAGAIN);
622         }
623
624         iter->p = NULL;
625
626 out:
627
628         return obj;
629 }
630 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
631
632 /**
633  * rhashtable_walk_stop - Finish a hash table walk
634  * @iter:       Hash table iterator
635  *
636  * Finish a hash table walk.
637  */
638 void rhashtable_walk_stop(struct rhashtable_iter *iter)
639         __releases(RCU)
640 {
641         struct rhashtable *ht;
642         struct bucket_table *tbl = iter->walker->tbl;
643
644         if (!tbl)
645                 goto out;
646
647         ht = iter->ht;
648
649         spin_lock(&ht->lock);
650         if (tbl->rehash < tbl->size)
651                 list_add(&iter->walker->list, &tbl->walkers);
652         else
653                 iter->walker->tbl = NULL;
654         spin_unlock(&ht->lock);
655
656         iter->p = NULL;
657
658 out:
659         rcu_read_unlock();
660 }
661 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
662
663 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
664 {
665         return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
666                    (unsigned long)params->min_size);
667 }
668
669 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
670 {
671         return jhash2(key, length, seed);
672 }
673
674 /**
675  * rhashtable_init - initialize a new hash table
676  * @ht:         hash table to be initialized
677  * @params:     configuration parameters
678  *
679  * Initializes a new hash table based on the provided configuration
680  * parameters. A table can be configured either with a variable or
681  * fixed length key:
682  *
683  * Configuration Example 1: Fixed length keys
684  * struct test_obj {
685  *      int                     key;
686  *      void *                  my_member;
687  *      struct rhash_head       node;
688  * };
689  *
690  * struct rhashtable_params params = {
691  *      .head_offset = offsetof(struct test_obj, node),
692  *      .key_offset = offsetof(struct test_obj, key),
693  *      .key_len = sizeof(int),
694  *      .hashfn = jhash,
695  *      .nulls_base = (1U << RHT_BASE_SHIFT),
696  * };
697  *
698  * Configuration Example 2: Variable length keys
699  * struct test_obj {
700  *      [...]
701  *      struct rhash_head       node;
702  * };
703  *
704  * u32 my_hash_fn(const void *data, u32 len, u32 seed)
705  * {
706  *      struct test_obj *obj = data;
707  *
708  *      return [... hash ...];
709  * }
710  *
711  * struct rhashtable_params params = {
712  *      .head_offset = offsetof(struct test_obj, node),
713  *      .hashfn = jhash,
714  *      .obj_hashfn = my_hash_fn,
715  * };
716  */
717 int rhashtable_init(struct rhashtable *ht,
718                     const struct rhashtable_params *params)
719 {
720         struct bucket_table *tbl;
721         size_t size;
722
723         size = HASH_DEFAULT_SIZE;
724
725         if ((!params->key_len && !params->obj_hashfn) ||
726             (params->obj_hashfn && !params->obj_cmpfn))
727                 return -EINVAL;
728
729         if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
730                 return -EINVAL;
731
732         if (params->nelem_hint)
733                 size = rounded_hashtable_size(params);
734
735         memset(ht, 0, sizeof(*ht));
736         mutex_init(&ht->mutex);
737         spin_lock_init(&ht->lock);
738         memcpy(&ht->p, params, sizeof(*params));
739
740         if (params->min_size)
741                 ht->p.min_size = roundup_pow_of_two(params->min_size);
742
743         if (params->max_size)
744                 ht->p.max_size = rounddown_pow_of_two(params->max_size);
745
746         if (params->insecure_max_entries)
747                 ht->p.insecure_max_entries =
748                         rounddown_pow_of_two(params->insecure_max_entries);
749         else
750                 ht->p.insecure_max_entries = ht->p.max_size * 2;
751
752         ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
753
754         /* The maximum (not average) chain length grows with the
755          * size of the hash table, at a rate of (log N)/(log log N).
756          * The value of 16 is selected so that even if the hash
757          * table grew to 2^32 you would not expect the maximum
758          * chain length to exceed it unless we are under attack
759          * (or extremely unlucky).
760          *
761          * As this limit is only to detect attacks, we don't need
762          * to set it to a lower value as you'd need the chain
763          * length to vastly exceed 16 to have any real effect
764          * on the system.
765          */
766         if (!params->insecure_elasticity)
767                 ht->elasticity = 16;
768
769         if (params->locks_mul)
770                 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
771         else
772                 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
773
774         ht->key_len = ht->p.key_len;
775         if (!params->hashfn) {
776                 ht->p.hashfn = jhash;
777
778                 if (!(ht->key_len & (sizeof(u32) - 1))) {
779                         ht->key_len /= sizeof(u32);
780                         ht->p.hashfn = rhashtable_jhash2;
781                 }
782         }
783
784         tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
785         if (tbl == NULL)
786                 return -ENOMEM;
787
788         atomic_set(&ht->nelems, 0);
789
790         RCU_INIT_POINTER(ht->tbl, tbl);
791
792         INIT_WORK(&ht->run_work, rht_deferred_worker);
793
794         return 0;
795 }
796 EXPORT_SYMBOL_GPL(rhashtable_init);
797
798 /**
799  * rhashtable_free_and_destroy - free elements and destroy hash table
800  * @ht:         the hash table to destroy
801  * @free_fn:    callback to release resources of element
802  * @arg:        pointer passed to free_fn
803  *
804  * Stops an eventual async resize. If defined, invokes free_fn for each
805  * element to releasal resources. Please note that RCU protected
806  * readers may still be accessing the elements. Releasing of resources
807  * must occur in a compatible manner. Then frees the bucket array.
808  *
809  * This function will eventually sleep to wait for an async resize
810  * to complete. The caller is responsible that no further write operations
811  * occurs in parallel.
812  */
813 void rhashtable_free_and_destroy(struct rhashtable *ht,
814                                  void (*free_fn)(void *ptr, void *arg),
815                                  void *arg)
816 {
817         const struct bucket_table *tbl;
818         unsigned int i;
819
820         cancel_work_sync(&ht->run_work);
821
822         mutex_lock(&ht->mutex);
823         tbl = rht_dereference(ht->tbl, ht);
824         if (free_fn) {
825                 for (i = 0; i < tbl->size; i++) {
826                         struct rhash_head *pos, *next;
827
828                         for (pos = rht_dereference(tbl->buckets[i], ht),
829                              next = !rht_is_a_nulls(pos) ?
830                                         rht_dereference(pos->next, ht) : NULL;
831                              !rht_is_a_nulls(pos);
832                              pos = next,
833                              next = !rht_is_a_nulls(pos) ?
834                                         rht_dereference(pos->next, ht) : NULL)
835                                 free_fn(rht_obj(ht, pos), arg);
836                 }
837         }
838
839         bucket_table_free(tbl);
840         mutex_unlock(&ht->mutex);
841 }
842 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
843
844 void rhashtable_destroy(struct rhashtable *ht)
845 {
846         return rhashtable_free_and_destroy(ht, NULL, NULL);
847 }
848 EXPORT_SYMBOL_GPL(rhashtable_destroy);