]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - mm/zswap.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[karo-tx-linux.git] / mm / zswap.c
1 /*
2  * zswap.c - zswap driver file
3  *
4  * zswap is a backend for frontswap that takes pages that are in the process
5  * of being swapped out and attempts to compress and store them in a
6  * RAM-based memory pool.  This can result in a significant I/O reduction on
7  * the swap device and, in the case where decompressing from RAM is faster
8  * than reading from the swap device, can also improve workload performance.
9  *
10  * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License
14  * as published by the Free Software Foundation; either version 2
15  * of the License, or (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21 */
22
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/module.h>
26 #include <linux/cpu.h>
27 #include <linux/highmem.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/types.h>
31 #include <linux/atomic.h>
32 #include <linux/frontswap.h>
33 #include <linux/rbtree.h>
34 #include <linux/swap.h>
35 #include <linux/crypto.h>
36 #include <linux/mempool.h>
37 #include <linux/zpool.h>
38
39 #include <linux/mm_types.h>
40 #include <linux/page-flags.h>
41 #include <linux/swapops.h>
42 #include <linux/writeback.h>
43 #include <linux/pagemap.h>
44
45 /*********************************
46 * statistics
47 **********************************/
48 /* Total bytes used by the compressed storage */
49 static u64 zswap_pool_total_size;
50 /* The number of compressed pages currently stored in zswap */
51 static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
52
53 /*
54  * The statistics below are not protected from concurrent access for
55  * performance reasons so they may not be a 100% accurate.  However,
56  * they do provide useful information on roughly how many times a
57  * certain event is occurring.
58 */
59
60 /* Pool limit was hit (see zswap_max_pool_percent) */
61 static u64 zswap_pool_limit_hit;
62 /* Pages written back when pool limit was reached */
63 static u64 zswap_written_back_pages;
64 /* Store failed due to a reclaim failure after pool limit was reached */
65 static u64 zswap_reject_reclaim_fail;
66 /* Compressed page was too big for the allocator to (optimally) store */
67 static u64 zswap_reject_compress_poor;
68 /* Store failed because underlying allocator could not get memory */
69 static u64 zswap_reject_alloc_fail;
70 /* Store failed because the entry metadata could not be allocated (rare) */
71 static u64 zswap_reject_kmemcache_fail;
72 /* Duplicate store was encountered (rare) */
73 static u64 zswap_duplicate_entry;
74
75 /*********************************
76 * tunables
77 **********************************/
78
79 /* Enable/disable zswap (disabled by default) */
80 static bool zswap_enabled;
81 module_param_named(enabled, zswap_enabled, bool, 0644);
82
83 /* Crypto compressor to use */
84 #define ZSWAP_COMPRESSOR_DEFAULT "lzo"
85 static char zswap_compressor[CRYPTO_MAX_ALG_NAME] = ZSWAP_COMPRESSOR_DEFAULT;
86 static struct kparam_string zswap_compressor_kparam = {
87         .string =       zswap_compressor,
88         .maxlen =       sizeof(zswap_compressor),
89 };
90 static int zswap_compressor_param_set(const char *,
91                                       const struct kernel_param *);
92 static struct kernel_param_ops zswap_compressor_param_ops = {
93         .set =          zswap_compressor_param_set,
94         .get =          param_get_string,
95 };
96 module_param_cb(compressor, &zswap_compressor_param_ops,
97                 &zswap_compressor_kparam, 0644);
98
99 /* Compressed storage zpool to use */
100 #define ZSWAP_ZPOOL_DEFAULT "zbud"
101 static char zswap_zpool_type[32 /* arbitrary */] = ZSWAP_ZPOOL_DEFAULT;
102 static struct kparam_string zswap_zpool_kparam = {
103         .string =       zswap_zpool_type,
104         .maxlen =       sizeof(zswap_zpool_type),
105 };
106 static int zswap_zpool_param_set(const char *, const struct kernel_param *);
107 static struct kernel_param_ops zswap_zpool_param_ops = {
108         .set =  zswap_zpool_param_set,
109         .get =  param_get_string,
110 };
111 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_kparam, 0644);
112
113 /* The maximum percentage of memory that the compressed pool can occupy */
114 static unsigned int zswap_max_pool_percent = 20;
115 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
116
117 /*********************************
118 * data structures
119 **********************************/
120
121 struct zswap_pool {
122         struct zpool *zpool;
123         struct crypto_comp * __percpu *tfm;
124         struct kref kref;
125         struct list_head list;
126         struct rcu_head rcu_head;
127         struct notifier_block notifier;
128         char tfm_name[CRYPTO_MAX_ALG_NAME];
129 };
130
131 /*
132  * struct zswap_entry
133  *
134  * This structure contains the metadata for tracking a single compressed
135  * page within zswap.
136  *
137  * rbnode - links the entry into red-black tree for the appropriate swap type
138  * offset - the swap offset for the entry.  Index into the red-black tree.
139  * refcount - the number of outstanding reference to the entry. This is needed
140  *            to protect against premature freeing of the entry by code
141  *            concurrent calls to load, invalidate, and writeback.  The lock
142  *            for the zswap_tree structure that contains the entry must
143  *            be held while changing the refcount.  Since the lock must
144  *            be held, there is no reason to also make refcount atomic.
145  * length - the length in bytes of the compressed page data.  Needed during
146  *          decompression
147  * pool - the zswap_pool the entry's data is in
148  * handle - zpool allocation handle that stores the compressed page data
149  */
150 struct zswap_entry {
151         struct rb_node rbnode;
152         pgoff_t offset;
153         int refcount;
154         unsigned int length;
155         struct zswap_pool *pool;
156         unsigned long handle;
157 };
158
159 struct zswap_header {
160         swp_entry_t swpentry;
161 };
162
163 /*
164  * The tree lock in the zswap_tree struct protects a few things:
165  * - the rbtree
166  * - the refcount field of each entry in the tree
167  */
168 struct zswap_tree {
169         struct rb_root rbroot;
170         spinlock_t lock;
171 };
172
173 static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
174
175 /* RCU-protected iteration */
176 static LIST_HEAD(zswap_pools);
177 /* protects zswap_pools list modification */
178 static DEFINE_SPINLOCK(zswap_pools_lock);
179
180 /* used by param callback function */
181 static bool zswap_init_started;
182
183 /*********************************
184 * helpers and fwd declarations
185 **********************************/
186
187 #define zswap_pool_debug(msg, p)                                \
188         pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,         \
189                  zpool_get_type((p)->zpool))
190
191 static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
192 static int zswap_pool_get(struct zswap_pool *pool);
193 static void zswap_pool_put(struct zswap_pool *pool);
194
195 static const struct zpool_ops zswap_zpool_ops = {
196         .evict = zswap_writeback_entry
197 };
198
199 static bool zswap_is_full(void)
200 {
201         return totalram_pages * zswap_max_pool_percent / 100 <
202                 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
203 }
204
205 static void zswap_update_total_size(void)
206 {
207         struct zswap_pool *pool;
208         u64 total = 0;
209
210         rcu_read_lock();
211
212         list_for_each_entry_rcu(pool, &zswap_pools, list)
213                 total += zpool_get_total_size(pool->zpool);
214
215         rcu_read_unlock();
216
217         zswap_pool_total_size = total;
218 }
219
220 /*********************************
221 * zswap entry functions
222 **********************************/
223 static struct kmem_cache *zswap_entry_cache;
224
225 static int __init zswap_entry_cache_create(void)
226 {
227         zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
228         return zswap_entry_cache == NULL;
229 }
230
231 static void __init zswap_entry_cache_destroy(void)
232 {
233         kmem_cache_destroy(zswap_entry_cache);
234 }
235
236 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
237 {
238         struct zswap_entry *entry;
239         entry = kmem_cache_alloc(zswap_entry_cache, gfp);
240         if (!entry)
241                 return NULL;
242         entry->refcount = 1;
243         RB_CLEAR_NODE(&entry->rbnode);
244         return entry;
245 }
246
247 static void zswap_entry_cache_free(struct zswap_entry *entry)
248 {
249         kmem_cache_free(zswap_entry_cache, entry);
250 }
251
252 /*********************************
253 * rbtree functions
254 **********************************/
255 static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
256 {
257         struct rb_node *node = root->rb_node;
258         struct zswap_entry *entry;
259
260         while (node) {
261                 entry = rb_entry(node, struct zswap_entry, rbnode);
262                 if (entry->offset > offset)
263                         node = node->rb_left;
264                 else if (entry->offset < offset)
265                         node = node->rb_right;
266                 else
267                         return entry;
268         }
269         return NULL;
270 }
271
272 /*
273  * In the case that a entry with the same offset is found, a pointer to
274  * the existing entry is stored in dupentry and the function returns -EEXIST
275  */
276 static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
277                         struct zswap_entry **dupentry)
278 {
279         struct rb_node **link = &root->rb_node, *parent = NULL;
280         struct zswap_entry *myentry;
281
282         while (*link) {
283                 parent = *link;
284                 myentry = rb_entry(parent, struct zswap_entry, rbnode);
285                 if (myentry->offset > entry->offset)
286                         link = &(*link)->rb_left;
287                 else if (myentry->offset < entry->offset)
288                         link = &(*link)->rb_right;
289                 else {
290                         *dupentry = myentry;
291                         return -EEXIST;
292                 }
293         }
294         rb_link_node(&entry->rbnode, parent, link);
295         rb_insert_color(&entry->rbnode, root);
296         return 0;
297 }
298
299 static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
300 {
301         if (!RB_EMPTY_NODE(&entry->rbnode)) {
302                 rb_erase(&entry->rbnode, root);
303                 RB_CLEAR_NODE(&entry->rbnode);
304         }
305 }
306
307 /*
308  * Carries out the common pattern of freeing and entry's zpool allocation,
309  * freeing the entry itself, and decrementing the number of stored pages.
310  */
311 static void zswap_free_entry(struct zswap_entry *entry)
312 {
313         zpool_free(entry->pool->zpool, entry->handle);
314         zswap_pool_put(entry->pool);
315         zswap_entry_cache_free(entry);
316         atomic_dec(&zswap_stored_pages);
317         zswap_update_total_size();
318 }
319
320 /* caller must hold the tree lock */
321 static void zswap_entry_get(struct zswap_entry *entry)
322 {
323         entry->refcount++;
324 }
325
326 /* caller must hold the tree lock
327 * remove from the tree and free it, if nobody reference the entry
328 */
329 static void zswap_entry_put(struct zswap_tree *tree,
330                         struct zswap_entry *entry)
331 {
332         int refcount = --entry->refcount;
333
334         BUG_ON(refcount < 0);
335         if (refcount == 0) {
336                 zswap_rb_erase(&tree->rbroot, entry);
337                 zswap_free_entry(entry);
338         }
339 }
340
341 /* caller must hold the tree lock */
342 static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
343                                 pgoff_t offset)
344 {
345         struct zswap_entry *entry = NULL;
346
347         entry = zswap_rb_search(root, offset);
348         if (entry)
349                 zswap_entry_get(entry);
350
351         return entry;
352 }
353
354 /*********************************
355 * per-cpu code
356 **********************************/
357 static DEFINE_PER_CPU(u8 *, zswap_dstmem);
358
359 static int __zswap_cpu_dstmem_notifier(unsigned long action, unsigned long cpu)
360 {
361         u8 *dst;
362
363         switch (action) {
364         case CPU_UP_PREPARE:
365                 dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
366                 if (!dst) {
367                         pr_err("can't allocate compressor buffer\n");
368                         return NOTIFY_BAD;
369                 }
370                 per_cpu(zswap_dstmem, cpu) = dst;
371                 break;
372         case CPU_DEAD:
373         case CPU_UP_CANCELED:
374                 dst = per_cpu(zswap_dstmem, cpu);
375                 kfree(dst);
376                 per_cpu(zswap_dstmem, cpu) = NULL;
377                 break;
378         default:
379                 break;
380         }
381         return NOTIFY_OK;
382 }
383
384 static int zswap_cpu_dstmem_notifier(struct notifier_block *nb,
385                                      unsigned long action, void *pcpu)
386 {
387         return __zswap_cpu_dstmem_notifier(action, (unsigned long)pcpu);
388 }
389
390 static struct notifier_block zswap_dstmem_notifier = {
391         .notifier_call =        zswap_cpu_dstmem_notifier,
392 };
393
394 static int __init zswap_cpu_dstmem_init(void)
395 {
396         unsigned long cpu;
397
398         cpu_notifier_register_begin();
399         for_each_online_cpu(cpu)
400                 if (__zswap_cpu_dstmem_notifier(CPU_UP_PREPARE, cpu) ==
401                     NOTIFY_BAD)
402                         goto cleanup;
403         __register_cpu_notifier(&zswap_dstmem_notifier);
404         cpu_notifier_register_done();
405         return 0;
406
407 cleanup:
408         for_each_online_cpu(cpu)
409                 __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu);
410         cpu_notifier_register_done();
411         return -ENOMEM;
412 }
413
414 static void zswap_cpu_dstmem_destroy(void)
415 {
416         unsigned long cpu;
417
418         cpu_notifier_register_begin();
419         for_each_online_cpu(cpu)
420                 __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu);
421         __unregister_cpu_notifier(&zswap_dstmem_notifier);
422         cpu_notifier_register_done();
423 }
424
425 static int __zswap_cpu_comp_notifier(struct zswap_pool *pool,
426                                      unsigned long action, unsigned long cpu)
427 {
428         struct crypto_comp *tfm;
429
430         switch (action) {
431         case CPU_UP_PREPARE:
432                 if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
433                         break;
434                 tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
435                 if (IS_ERR_OR_NULL(tfm)) {
436                         pr_err("could not alloc crypto comp %s : %ld\n",
437                                pool->tfm_name, PTR_ERR(tfm));
438                         return NOTIFY_BAD;
439                 }
440                 *per_cpu_ptr(pool->tfm, cpu) = tfm;
441                 break;
442         case CPU_DEAD:
443         case CPU_UP_CANCELED:
444                 tfm = *per_cpu_ptr(pool->tfm, cpu);
445                 if (!IS_ERR_OR_NULL(tfm))
446                         crypto_free_comp(tfm);
447                 *per_cpu_ptr(pool->tfm, cpu) = NULL;
448                 break;
449         default:
450                 break;
451         }
452         return NOTIFY_OK;
453 }
454
455 static int zswap_cpu_comp_notifier(struct notifier_block *nb,
456                                    unsigned long action, void *pcpu)
457 {
458         unsigned long cpu = (unsigned long)pcpu;
459         struct zswap_pool *pool = container_of(nb, typeof(*pool), notifier);
460
461         return __zswap_cpu_comp_notifier(pool, action, cpu);
462 }
463
464 static int zswap_cpu_comp_init(struct zswap_pool *pool)
465 {
466         unsigned long cpu;
467
468         memset(&pool->notifier, 0, sizeof(pool->notifier));
469         pool->notifier.notifier_call = zswap_cpu_comp_notifier;
470
471         cpu_notifier_register_begin();
472         for_each_online_cpu(cpu)
473                 if (__zswap_cpu_comp_notifier(pool, CPU_UP_PREPARE, cpu) ==
474                     NOTIFY_BAD)
475                         goto cleanup;
476         __register_cpu_notifier(&pool->notifier);
477         cpu_notifier_register_done();
478         return 0;
479
480 cleanup:
481         for_each_online_cpu(cpu)
482                 __zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu);
483         cpu_notifier_register_done();
484         return -ENOMEM;
485 }
486
487 static void zswap_cpu_comp_destroy(struct zswap_pool *pool)
488 {
489         unsigned long cpu;
490
491         cpu_notifier_register_begin();
492         for_each_online_cpu(cpu)
493                 __zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu);
494         __unregister_cpu_notifier(&pool->notifier);
495         cpu_notifier_register_done();
496 }
497
498 /*********************************
499 * pool functions
500 **********************************/
501
502 static struct zswap_pool *__zswap_pool_current(void)
503 {
504         struct zswap_pool *pool;
505
506         pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
507         WARN_ON(!pool);
508
509         return pool;
510 }
511
512 static struct zswap_pool *zswap_pool_current(void)
513 {
514         assert_spin_locked(&zswap_pools_lock);
515
516         return __zswap_pool_current();
517 }
518
519 static struct zswap_pool *zswap_pool_current_get(void)
520 {
521         struct zswap_pool *pool;
522
523         rcu_read_lock();
524
525         pool = __zswap_pool_current();
526         if (!pool || !zswap_pool_get(pool))
527                 pool = NULL;
528
529         rcu_read_unlock();
530
531         return pool;
532 }
533
534 static struct zswap_pool *zswap_pool_last_get(void)
535 {
536         struct zswap_pool *pool, *last = NULL;
537
538         rcu_read_lock();
539
540         list_for_each_entry_rcu(pool, &zswap_pools, list)
541                 last = pool;
542         if (!WARN_ON(!last) && !zswap_pool_get(last))
543                 last = NULL;
544
545         rcu_read_unlock();
546
547         return last;
548 }
549
550 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
551 {
552         struct zswap_pool *pool;
553
554         assert_spin_locked(&zswap_pools_lock);
555
556         list_for_each_entry_rcu(pool, &zswap_pools, list) {
557                 if (strncmp(pool->tfm_name, compressor, sizeof(pool->tfm_name)))
558                         continue;
559                 if (strncmp(zpool_get_type(pool->zpool), type,
560                             sizeof(zswap_zpool_type)))
561                         continue;
562                 /* if we can't get it, it's about to be destroyed */
563                 if (!zswap_pool_get(pool))
564                         continue;
565                 return pool;
566         }
567
568         return NULL;
569 }
570
571 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
572 {
573         struct zswap_pool *pool;
574         gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN;
575
576         pool = kzalloc(sizeof(*pool), GFP_KERNEL);
577         if (!pool) {
578                 pr_err("pool alloc failed\n");
579                 return NULL;
580         }
581
582         pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops);
583         if (!pool->zpool) {
584                 pr_err("%s zpool not available\n", type);
585                 goto error;
586         }
587         pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
588
589         strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
590         pool->tfm = alloc_percpu(struct crypto_comp *);
591         if (!pool->tfm) {
592                 pr_err("percpu alloc failed\n");
593                 goto error;
594         }
595
596         if (zswap_cpu_comp_init(pool))
597                 goto error;
598         pr_debug("using %s compressor\n", pool->tfm_name);
599
600         /* being the current pool takes 1 ref; this func expects the
601          * caller to always add the new pool as the current pool
602          */
603         kref_init(&pool->kref);
604         INIT_LIST_HEAD(&pool->list);
605
606         zswap_pool_debug("created", pool);
607
608         return pool;
609
610 error:
611         free_percpu(pool->tfm);
612         if (pool->zpool)
613                 zpool_destroy_pool(pool->zpool);
614         kfree(pool);
615         return NULL;
616 }
617
618 static struct zswap_pool *__zswap_pool_create_fallback(void)
619 {
620         if (!crypto_has_comp(zswap_compressor, 0, 0)) {
621                 pr_err("compressor %s not available, using default %s\n",
622                        zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT);
623                 strncpy(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT,
624                         sizeof(zswap_compressor));
625         }
626         if (!zpool_has_pool(zswap_zpool_type)) {
627                 pr_err("zpool %s not available, using default %s\n",
628                        zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT);
629                 strncpy(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT,
630                         sizeof(zswap_zpool_type));
631         }
632
633         return zswap_pool_create(zswap_zpool_type, zswap_compressor);
634 }
635
636 static void zswap_pool_destroy(struct zswap_pool *pool)
637 {
638         zswap_pool_debug("destroying", pool);
639
640         zswap_cpu_comp_destroy(pool);
641         free_percpu(pool->tfm);
642         zpool_destroy_pool(pool->zpool);
643         kfree(pool);
644 }
645
646 static int __must_check zswap_pool_get(struct zswap_pool *pool)
647 {
648         return kref_get_unless_zero(&pool->kref);
649 }
650
651 static void __zswap_pool_release(struct rcu_head *head)
652 {
653         struct zswap_pool *pool = container_of(head, typeof(*pool), rcu_head);
654
655         /* nobody should have been able to get a kref... */
656         WARN_ON(kref_get_unless_zero(&pool->kref));
657
658         /* pool is now off zswap_pools list and has no references. */
659         zswap_pool_destroy(pool);
660 }
661
662 static void __zswap_pool_empty(struct kref *kref)
663 {
664         struct zswap_pool *pool;
665
666         pool = container_of(kref, typeof(*pool), kref);
667
668         spin_lock(&zswap_pools_lock);
669
670         WARN_ON(pool == zswap_pool_current());
671
672         list_del_rcu(&pool->list);
673         call_rcu(&pool->rcu_head, __zswap_pool_release);
674
675         spin_unlock(&zswap_pools_lock);
676 }
677
678 static void zswap_pool_put(struct zswap_pool *pool)
679 {
680         kref_put(&pool->kref, __zswap_pool_empty);
681 }
682
683 /*********************************
684 * param callbacks
685 **********************************/
686
687 static int __zswap_param_set(const char *val, const struct kernel_param *kp,
688                              char *type, char *compressor)
689 {
690         struct zswap_pool *pool, *put_pool = NULL;
691         char str[kp->str->maxlen], *s;
692         int ret;
693
694         /*
695          * kp is either zswap_zpool_kparam or zswap_compressor_kparam, defined
696          * at the top of this file, so maxlen is CRYPTO_MAX_ALG_NAME (64) or
697          * 32 (arbitrary).
698          */
699         strlcpy(str, val, kp->str->maxlen);
700         s = strim(str);
701
702         /* if this is load-time (pre-init) param setting,
703          * don't create a pool; that's done during init.
704          */
705         if (!zswap_init_started)
706                 return param_set_copystring(s, kp);
707
708         /* no change required */
709         if (!strncmp(kp->str->string, s, kp->str->maxlen))
710                 return 0;
711
712         if (!type) {
713                 type = s;
714                 if (!zpool_has_pool(type)) {
715                         pr_err("zpool %s not available\n", type);
716                         return -ENOENT;
717                 }
718         } else if (!compressor) {
719                 compressor = s;
720                 if (!crypto_has_comp(compressor, 0, 0)) {
721                         pr_err("compressor %s not available\n", compressor);
722                         return -ENOENT;
723                 }
724         }
725
726         spin_lock(&zswap_pools_lock);
727
728         pool = zswap_pool_find_get(type, compressor);
729         if (pool) {
730                 zswap_pool_debug("using existing", pool);
731                 list_del_rcu(&pool->list);
732         } else {
733                 spin_unlock(&zswap_pools_lock);
734                 pool = zswap_pool_create(type, compressor);
735                 spin_lock(&zswap_pools_lock);
736         }
737
738         if (pool)
739                 ret = param_set_copystring(s, kp);
740         else
741                 ret = -EINVAL;
742
743         if (!ret) {
744                 put_pool = zswap_pool_current();
745                 list_add_rcu(&pool->list, &zswap_pools);
746         } else if (pool) {
747                 /* add the possibly pre-existing pool to the end of the pools
748                  * list; if it's new (and empty) then it'll be removed and
749                  * destroyed by the put after we drop the lock
750                  */
751                 list_add_tail_rcu(&pool->list, &zswap_pools);
752                 put_pool = pool;
753         }
754
755         spin_unlock(&zswap_pools_lock);
756
757         /* drop the ref from either the old current pool,
758          * or the new pool we failed to add
759          */
760         if (put_pool)
761                 zswap_pool_put(put_pool);
762
763         return ret;
764 }
765
766 static int zswap_compressor_param_set(const char *val,
767                                       const struct kernel_param *kp)
768 {
769         return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
770 }
771
772 static int zswap_zpool_param_set(const char *val,
773                                  const struct kernel_param *kp)
774 {
775         return __zswap_param_set(val, kp, NULL, zswap_compressor);
776 }
777
778 /*********************************
779 * writeback code
780 **********************************/
781 /* return enum for zswap_get_swap_cache_page */
782 enum zswap_get_swap_ret {
783         ZSWAP_SWAPCACHE_NEW,
784         ZSWAP_SWAPCACHE_EXIST,
785         ZSWAP_SWAPCACHE_FAIL,
786 };
787
788 /*
789  * zswap_get_swap_cache_page
790  *
791  * This is an adaption of read_swap_cache_async()
792  *
793  * This function tries to find a page with the given swap entry
794  * in the swapper_space address space (the swap cache).  If the page
795  * is found, it is returned in retpage.  Otherwise, a page is allocated,
796  * added to the swap cache, and returned in retpage.
797  *
798  * If success, the swap cache page is returned in retpage
799  * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
800  * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
801  *     the new page is added to swapcache and locked
802  * Returns ZSWAP_SWAPCACHE_FAIL on error
803  */
804 static int zswap_get_swap_cache_page(swp_entry_t entry,
805                                 struct page **retpage)
806 {
807         bool page_was_allocated;
808
809         *retpage = __read_swap_cache_async(entry, GFP_KERNEL,
810                         NULL, 0, &page_was_allocated);
811         if (page_was_allocated)
812                 return ZSWAP_SWAPCACHE_NEW;
813         if (!*retpage)
814                 return ZSWAP_SWAPCACHE_FAIL;
815         return ZSWAP_SWAPCACHE_EXIST;
816 }
817
818 /*
819  * Attempts to free an entry by adding a page to the swap cache,
820  * decompressing the entry data into the page, and issuing a
821  * bio write to write the page back to the swap device.
822  *
823  * This can be thought of as a "resumed writeback" of the page
824  * to the swap device.  We are basically resuming the same swap
825  * writeback path that was intercepted with the frontswap_store()
826  * in the first place.  After the page has been decompressed into
827  * the swap cache, the compressed version stored by zswap can be
828  * freed.
829  */
830 static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
831 {
832         struct zswap_header *zhdr;
833         swp_entry_t swpentry;
834         struct zswap_tree *tree;
835         pgoff_t offset;
836         struct zswap_entry *entry;
837         struct page *page;
838         struct crypto_comp *tfm;
839         u8 *src, *dst;
840         unsigned int dlen;
841         int ret;
842         struct writeback_control wbc = {
843                 .sync_mode = WB_SYNC_NONE,
844         };
845
846         /* extract swpentry from data */
847         zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
848         swpentry = zhdr->swpentry; /* here */
849         zpool_unmap_handle(pool, handle);
850         tree = zswap_trees[swp_type(swpentry)];
851         offset = swp_offset(swpentry);
852
853         /* find and ref zswap entry */
854         spin_lock(&tree->lock);
855         entry = zswap_entry_find_get(&tree->rbroot, offset);
856         if (!entry) {
857                 /* entry was invalidated */
858                 spin_unlock(&tree->lock);
859                 return 0;
860         }
861         spin_unlock(&tree->lock);
862         BUG_ON(offset != entry->offset);
863
864         /* try to allocate swap cache page */
865         switch (zswap_get_swap_cache_page(swpentry, &page)) {
866         case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
867                 ret = -ENOMEM;
868                 goto fail;
869
870         case ZSWAP_SWAPCACHE_EXIST:
871                 /* page is already in the swap cache, ignore for now */
872                 page_cache_release(page);
873                 ret = -EEXIST;
874                 goto fail;
875
876         case ZSWAP_SWAPCACHE_NEW: /* page is locked */
877                 /* decompress */
878                 dlen = PAGE_SIZE;
879                 src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
880                                 ZPOOL_MM_RO) + sizeof(struct zswap_header);
881                 dst = kmap_atomic(page);
882                 tfm = *get_cpu_ptr(entry->pool->tfm);
883                 ret = crypto_comp_decompress(tfm, src, entry->length,
884                                              dst, &dlen);
885                 put_cpu_ptr(entry->pool->tfm);
886                 kunmap_atomic(dst);
887                 zpool_unmap_handle(entry->pool->zpool, entry->handle);
888                 BUG_ON(ret);
889                 BUG_ON(dlen != PAGE_SIZE);
890
891                 /* page is up to date */
892                 SetPageUptodate(page);
893         }
894
895         /* move it to the tail of the inactive list after end_writeback */
896         SetPageReclaim(page);
897
898         /* start writeback */
899         __swap_writepage(page, &wbc, end_swap_bio_write);
900         page_cache_release(page);
901         zswap_written_back_pages++;
902
903         spin_lock(&tree->lock);
904         /* drop local reference */
905         zswap_entry_put(tree, entry);
906
907         /*
908         * There are two possible situations for entry here:
909         * (1) refcount is 1(normal case),  entry is valid and on the tree
910         * (2) refcount is 0, entry is freed and not on the tree
911         *     because invalidate happened during writeback
912         *  search the tree and free the entry if find entry
913         */
914         if (entry == zswap_rb_search(&tree->rbroot, offset))
915                 zswap_entry_put(tree, entry);
916         spin_unlock(&tree->lock);
917
918         goto end;
919
920         /*
921         * if we get here due to ZSWAP_SWAPCACHE_EXIST
922         * a load may happening concurrently
923         * it is safe and okay to not free the entry
924         * if we free the entry in the following put
925         * it it either okay to return !0
926         */
927 fail:
928         spin_lock(&tree->lock);
929         zswap_entry_put(tree, entry);
930         spin_unlock(&tree->lock);
931
932 end:
933         return ret;
934 }
935
936 static int zswap_shrink(void)
937 {
938         struct zswap_pool *pool;
939         int ret;
940
941         pool = zswap_pool_last_get();
942         if (!pool)
943                 return -ENOENT;
944
945         ret = zpool_shrink(pool->zpool, 1, NULL);
946
947         zswap_pool_put(pool);
948
949         return ret;
950 }
951
952 /*********************************
953 * frontswap hooks
954 **********************************/
955 /* attempts to compress and store an single page */
956 static int zswap_frontswap_store(unsigned type, pgoff_t offset,
957                                 struct page *page)
958 {
959         struct zswap_tree *tree = zswap_trees[type];
960         struct zswap_entry *entry, *dupentry;
961         struct crypto_comp *tfm;
962         int ret;
963         unsigned int dlen = PAGE_SIZE, len;
964         unsigned long handle;
965         char *buf;
966         u8 *src, *dst;
967         struct zswap_header *zhdr;
968
969         if (!zswap_enabled || !tree) {
970                 ret = -ENODEV;
971                 goto reject;
972         }
973
974         /* reclaim space if needed */
975         if (zswap_is_full()) {
976                 zswap_pool_limit_hit++;
977                 if (zswap_shrink()) {
978                         zswap_reject_reclaim_fail++;
979                         ret = -ENOMEM;
980                         goto reject;
981                 }
982         }
983
984         /* allocate entry */
985         entry = zswap_entry_cache_alloc(GFP_KERNEL);
986         if (!entry) {
987                 zswap_reject_kmemcache_fail++;
988                 ret = -ENOMEM;
989                 goto reject;
990         }
991
992         /* if entry is successfully added, it keeps the reference */
993         entry->pool = zswap_pool_current_get();
994         if (!entry->pool) {
995                 ret = -EINVAL;
996                 goto freepage;
997         }
998
999         /* compress */
1000         dst = get_cpu_var(zswap_dstmem);
1001         tfm = *get_cpu_ptr(entry->pool->tfm);
1002         src = kmap_atomic(page);
1003         ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
1004         kunmap_atomic(src);
1005         put_cpu_ptr(entry->pool->tfm);
1006         if (ret) {
1007                 ret = -EINVAL;
1008                 goto put_dstmem;
1009         }
1010
1011         /* store */
1012         len = dlen + sizeof(struct zswap_header);
1013         ret = zpool_malloc(entry->pool->zpool, len,
1014                            __GFP_NORETRY | __GFP_NOWARN, &handle);
1015         if (ret == -ENOSPC) {
1016                 zswap_reject_compress_poor++;
1017                 goto put_dstmem;
1018         }
1019         if (ret) {
1020                 zswap_reject_alloc_fail++;
1021                 goto put_dstmem;
1022         }
1023         zhdr = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
1024         zhdr->swpentry = swp_entry(type, offset);
1025         buf = (u8 *)(zhdr + 1);
1026         memcpy(buf, dst, dlen);
1027         zpool_unmap_handle(entry->pool->zpool, handle);
1028         put_cpu_var(zswap_dstmem);
1029
1030         /* populate entry */
1031         entry->offset = offset;
1032         entry->handle = handle;
1033         entry->length = dlen;
1034
1035         /* map */
1036         spin_lock(&tree->lock);
1037         do {
1038                 ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
1039                 if (ret == -EEXIST) {
1040                         zswap_duplicate_entry++;
1041                         /* remove from rbtree */
1042                         zswap_rb_erase(&tree->rbroot, dupentry);
1043                         zswap_entry_put(tree, dupentry);
1044                 }
1045         } while (ret == -EEXIST);
1046         spin_unlock(&tree->lock);
1047
1048         /* update stats */
1049         atomic_inc(&zswap_stored_pages);
1050         zswap_update_total_size();
1051
1052         return 0;
1053
1054 put_dstmem:
1055         put_cpu_var(zswap_dstmem);
1056         zswap_pool_put(entry->pool);
1057 freepage:
1058         zswap_entry_cache_free(entry);
1059 reject:
1060         return ret;
1061 }
1062
1063 /*
1064  * returns 0 if the page was successfully decompressed
1065  * return -1 on entry not found or error
1066 */
1067 static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1068                                 struct page *page)
1069 {
1070         struct zswap_tree *tree = zswap_trees[type];
1071         struct zswap_entry *entry;
1072         struct crypto_comp *tfm;
1073         u8 *src, *dst;
1074         unsigned int dlen;
1075         int ret;
1076
1077         /* find */
1078         spin_lock(&tree->lock);
1079         entry = zswap_entry_find_get(&tree->rbroot, offset);
1080         if (!entry) {
1081                 /* entry was written back */
1082                 spin_unlock(&tree->lock);
1083                 return -1;
1084         }
1085         spin_unlock(&tree->lock);
1086
1087         /* decompress */
1088         dlen = PAGE_SIZE;
1089         src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
1090                         ZPOOL_MM_RO) + sizeof(struct zswap_header);
1091         dst = kmap_atomic(page);
1092         tfm = *get_cpu_ptr(entry->pool->tfm);
1093         ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
1094         put_cpu_ptr(entry->pool->tfm);
1095         kunmap_atomic(dst);
1096         zpool_unmap_handle(entry->pool->zpool, entry->handle);
1097         BUG_ON(ret);
1098
1099         spin_lock(&tree->lock);
1100         zswap_entry_put(tree, entry);
1101         spin_unlock(&tree->lock);
1102
1103         return 0;
1104 }
1105
1106 /* frees an entry in zswap */
1107 static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
1108 {
1109         struct zswap_tree *tree = zswap_trees[type];
1110         struct zswap_entry *entry;
1111
1112         /* find */
1113         spin_lock(&tree->lock);
1114         entry = zswap_rb_search(&tree->rbroot, offset);
1115         if (!entry) {
1116                 /* entry was written back */
1117                 spin_unlock(&tree->lock);
1118                 return;
1119         }
1120
1121         /* remove from rbtree */
1122         zswap_rb_erase(&tree->rbroot, entry);
1123
1124         /* drop the initial reference from entry creation */
1125         zswap_entry_put(tree, entry);
1126
1127         spin_unlock(&tree->lock);
1128 }
1129
1130 /* frees all zswap entries for the given swap type */
1131 static void zswap_frontswap_invalidate_area(unsigned type)
1132 {
1133         struct zswap_tree *tree = zswap_trees[type];
1134         struct zswap_entry *entry, *n;
1135
1136         if (!tree)
1137                 return;
1138
1139         /* walk the tree and free everything */
1140         spin_lock(&tree->lock);
1141         rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1142                 zswap_free_entry(entry);
1143         tree->rbroot = RB_ROOT;
1144         spin_unlock(&tree->lock);
1145         kfree(tree);
1146         zswap_trees[type] = NULL;
1147 }
1148
1149 static void zswap_frontswap_init(unsigned type)
1150 {
1151         struct zswap_tree *tree;
1152
1153         tree = kzalloc(sizeof(struct zswap_tree), GFP_KERNEL);
1154         if (!tree) {
1155                 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1156                 return;
1157         }
1158
1159         tree->rbroot = RB_ROOT;
1160         spin_lock_init(&tree->lock);
1161         zswap_trees[type] = tree;
1162 }
1163
1164 static struct frontswap_ops zswap_frontswap_ops = {
1165         .store = zswap_frontswap_store,
1166         .load = zswap_frontswap_load,
1167         .invalidate_page = zswap_frontswap_invalidate_page,
1168         .invalidate_area = zswap_frontswap_invalidate_area,
1169         .init = zswap_frontswap_init
1170 };
1171
1172 /*********************************
1173 * debugfs functions
1174 **********************************/
1175 #ifdef CONFIG_DEBUG_FS
1176 #include <linux/debugfs.h>
1177
1178 static struct dentry *zswap_debugfs_root;
1179
1180 static int __init zswap_debugfs_init(void)
1181 {
1182         if (!debugfs_initialized())
1183                 return -ENODEV;
1184
1185         zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1186         if (!zswap_debugfs_root)
1187                 return -ENOMEM;
1188
1189         debugfs_create_u64("pool_limit_hit", S_IRUGO,
1190                         zswap_debugfs_root, &zswap_pool_limit_hit);
1191         debugfs_create_u64("reject_reclaim_fail", S_IRUGO,
1192                         zswap_debugfs_root, &zswap_reject_reclaim_fail);
1193         debugfs_create_u64("reject_alloc_fail", S_IRUGO,
1194                         zswap_debugfs_root, &zswap_reject_alloc_fail);
1195         debugfs_create_u64("reject_kmemcache_fail", S_IRUGO,
1196                         zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1197         debugfs_create_u64("reject_compress_poor", S_IRUGO,
1198                         zswap_debugfs_root, &zswap_reject_compress_poor);
1199         debugfs_create_u64("written_back_pages", S_IRUGO,
1200                         zswap_debugfs_root, &zswap_written_back_pages);
1201         debugfs_create_u64("duplicate_entry", S_IRUGO,
1202                         zswap_debugfs_root, &zswap_duplicate_entry);
1203         debugfs_create_u64("pool_total_size", S_IRUGO,
1204                         zswap_debugfs_root, &zswap_pool_total_size);
1205         debugfs_create_atomic_t("stored_pages", S_IRUGO,
1206                         zswap_debugfs_root, &zswap_stored_pages);
1207
1208         return 0;
1209 }
1210
1211 static void __exit zswap_debugfs_exit(void)
1212 {
1213         debugfs_remove_recursive(zswap_debugfs_root);
1214 }
1215 #else
1216 static int __init zswap_debugfs_init(void)
1217 {
1218         return 0;
1219 }
1220
1221 static void __exit zswap_debugfs_exit(void) { }
1222 #endif
1223
1224 /*********************************
1225 * module init and exit
1226 **********************************/
1227 static int __init init_zswap(void)
1228 {
1229         struct zswap_pool *pool;
1230
1231         zswap_init_started = true;
1232
1233         if (zswap_entry_cache_create()) {
1234                 pr_err("entry cache creation failed\n");
1235                 goto cache_fail;
1236         }
1237
1238         if (zswap_cpu_dstmem_init()) {
1239                 pr_err("dstmem alloc failed\n");
1240                 goto dstmem_fail;
1241         }
1242
1243         pool = __zswap_pool_create_fallback();
1244         if (!pool) {
1245                 pr_err("pool creation failed\n");
1246                 goto pool_fail;
1247         }
1248         pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1249                 zpool_get_type(pool->zpool));
1250
1251         list_add(&pool->list, &zswap_pools);
1252
1253         frontswap_register_ops(&zswap_frontswap_ops);
1254         if (zswap_debugfs_init())
1255                 pr_warn("debugfs initialization failed\n");
1256         return 0;
1257
1258 pool_fail:
1259         zswap_cpu_dstmem_destroy();
1260 dstmem_fail:
1261         zswap_entry_cache_destroy();
1262 cache_fail:
1263         return -ENOMEM;
1264 }
1265 /* must be late so crypto has time to come up */
1266 late_initcall(init_zswap);
1267
1268 MODULE_LICENSE("GPL");
1269 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1270 MODULE_DESCRIPTION("Compressed cache for swap pages");