2 * Compressed RAM based swap device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
12 * Project home: http://compcache.googlecode.com
15 #define KMSG_COMPONENT "ramzswap"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bitops.h>
21 #include <linux/blkdev.h>
22 #include <linux/buffer_head.h>
23 #include <linux/device.h>
24 #include <linux/genhd.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/lzo.h>
28 #include <linux/string.h>
29 #include <linux/swap.h>
30 #include <linux/swapops.h>
31 #include <linux/vmalloc.h>
36 static int ramzswap_major;
37 static struct ramzswap *devices;
39 /* Module params (documentation at end) */
40 static unsigned int num_devices;
42 static int rzs_test_flag(struct ramzswap *rzs, u32 index,
43 enum rzs_pageflags flag)
45 return rzs->table[index].flags & BIT(flag);
48 static void rzs_set_flag(struct ramzswap *rzs, u32 index,
49 enum rzs_pageflags flag)
51 rzs->table[index].flags |= BIT(flag);
54 static void rzs_clear_flag(struct ramzswap *rzs, u32 index,
55 enum rzs_pageflags flag)
57 rzs->table[index].flags &= ~BIT(flag);
60 static int page_zero_filled(void *ptr)
65 page = (unsigned long *)ptr;
67 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
75 static void ramzswap_set_disksize(struct ramzswap *rzs, size_t totalram_bytes)
79 "disk size not provided. You can use disksize_kb module "
80 "param to specify size.\nUsing default: (%u%% of RAM).\n",
81 default_disksize_perc_ram
83 rzs->disksize = default_disksize_perc_ram *
84 (totalram_bytes / 100);
87 if (rzs->disksize > 2 * (totalram_bytes)) {
89 "There is little point creating a ramzswap of greater than "
90 "twice the size of memory since we expect a 2:1 compression "
91 "ratio. Note that ramzswap uses about 0.1%% of the size of "
92 "the swap device when not in use so a huge ramzswap is "
94 "\tMemory Size: %zu kB\n"
95 "\tSize you selected: %zu kB\n"
96 "Continuing anyway ...\n",
97 totalram_bytes >> 10, rzs->disksize
101 rzs->disksize &= PAGE_MASK;
104 static void ramzswap_ioctl_get_stats(struct ramzswap *rzs,
105 struct ramzswap_ioctl_stats *s)
107 s->disksize = rzs->disksize;
109 #if defined(CONFIG_RAMZSWAP_STATS)
111 struct ramzswap_stats *rs = &rzs->stats;
112 size_t succ_writes, mem_used;
113 unsigned int good_compress_perc = 0, no_compress_perc = 0;
115 mem_used = xv_get_total_size_bytes(rzs->mem_pool)
116 + (rs->pages_expand << PAGE_SHIFT);
117 succ_writes = rzs_stat64_read(rzs, &rs->num_writes) -
118 rzs_stat64_read(rzs, &rs->failed_writes);
120 if (succ_writes && rs->pages_stored) {
121 good_compress_perc = rs->good_compress * 100
123 no_compress_perc = rs->pages_expand * 100
127 s->num_reads = rzs_stat64_read(rzs, &rs->num_reads);
128 s->num_writes = rzs_stat64_read(rzs, &rs->num_writes);
129 s->failed_reads = rzs_stat64_read(rzs, &rs->failed_reads);
130 s->failed_writes = rzs_stat64_read(rzs, &rs->failed_writes);
131 s->invalid_io = rzs_stat64_read(rzs, &rs->invalid_io);
132 s->notify_free = rzs_stat64_read(rzs, &rs->notify_free);
133 s->pages_zero = rs->pages_zero;
135 s->good_compress_pct = good_compress_perc;
136 s->pages_expand_pct = no_compress_perc;
138 s->pages_stored = rs->pages_stored;
139 s->pages_used = mem_used >> PAGE_SHIFT;
140 s->orig_data_size = rs->pages_stored << PAGE_SHIFT;
141 s->compr_data_size = rs->compr_size;
142 s->mem_used_total = mem_used;
144 #endif /* CONFIG_RAMZSWAP_STATS */
147 static void ramzswap_free_page(struct ramzswap *rzs, size_t index)
152 struct page *page = rzs->table[index].page;
153 u32 offset = rzs->table[index].offset;
155 if (unlikely(!page)) {
157 * No memory is allocated for zero filled pages.
158 * Simply clear zero page flag.
160 if (rzs_test_flag(rzs, index, RZS_ZERO)) {
161 rzs_clear_flag(rzs, index, RZS_ZERO);
162 rzs_stat_dec(&rzs->stats.pages_zero);
167 if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED))) {
170 rzs_clear_flag(rzs, index, RZS_UNCOMPRESSED);
171 rzs_stat_dec(&rzs->stats.pages_expand);
175 obj = kmap_atomic(page, KM_USER0) + offset;
176 clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
177 kunmap_atomic(obj, KM_USER0);
179 xv_free(rzs->mem_pool, page, offset);
180 if (clen <= PAGE_SIZE / 2)
181 rzs_stat_dec(&rzs->stats.good_compress);
184 rzs->stats.compr_size -= clen;
185 rzs_stat_dec(&rzs->stats.pages_stored);
187 rzs->table[index].page = NULL;
188 rzs->table[index].offset = 0;
191 static void handle_zero_page(struct page *page)
195 user_mem = kmap_atomic(page, KM_USER0);
196 memset(user_mem, 0, PAGE_SIZE);
197 kunmap_atomic(user_mem, KM_USER0);
199 flush_dcache_page(page);
202 static void handle_uncompressed_page(struct ramzswap *rzs,
203 struct page *page, u32 index)
205 unsigned char *user_mem, *cmem;
207 user_mem = kmap_atomic(page, KM_USER0);
208 cmem = kmap_atomic(rzs->table[index].page, KM_USER1) +
209 rzs->table[index].offset;
211 memcpy(user_mem, cmem, PAGE_SIZE);
212 kunmap_atomic(user_mem, KM_USER0);
213 kunmap_atomic(cmem, KM_USER1);
215 flush_dcache_page(page);
218 static int ramzswap_read(struct ramzswap *rzs, struct bio *bio)
223 struct bio_vec *bvec;
225 rzs_stat64_inc(rzs, &rzs->stats.num_reads);
227 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
228 bio_for_each_segment(bvec, bio, i) {
232 struct zobj_header *zheader;
233 unsigned char *user_mem, *cmem;
235 page = bvec->bv_page;
237 if (rzs_test_flag(rzs, index, RZS_ZERO)) {
238 handle_zero_page(page);
242 /* Requested page is not present in compressed area */
243 if (unlikely(!rzs->table[index].page)) {
244 pr_debug("Read before write: sector=%lu, size=%u",
245 (ulong)(bio->bi_sector), bio->bi_size);
250 /* Page is stored uncompressed since it's incompressible */
251 if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED))) {
252 handle_uncompressed_page(rzs, page, index);
256 user_mem = kmap_atomic(page, KM_USER0);
259 cmem = kmap_atomic(rzs->table[index].page, KM_USER1) +
260 rzs->table[index].offset;
262 ret = lzo1x_decompress_safe(
263 cmem + sizeof(*zheader),
264 xv_get_object_size(cmem) - sizeof(*zheader),
267 kunmap_atomic(user_mem, KM_USER0);
268 kunmap_atomic(cmem, KM_USER1);
270 /* Should NEVER happen. Return bio error if it does. */
271 if (unlikely(ret != LZO_E_OK)) {
272 pr_err("Decompression failed! err=%d, page=%u\n",
274 rzs_stat64_inc(rzs, &rzs->stats.failed_reads);
278 flush_dcache_page(page);
282 set_bit(BIO_UPTODATE, &bio->bi_flags);
291 static int ramzswap_write(struct ramzswap *rzs, struct bio *bio)
295 struct bio_vec *bvec;
297 rzs_stat64_inc(rzs, &rzs->stats.num_writes);
299 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
301 bio_for_each_segment(bvec, bio, i) {
305 struct zobj_header *zheader;
306 struct page *page, *page_store;
307 unsigned char *user_mem, *cmem, *src;
309 page = bvec->bv_page;
310 src = rzs->compress_buffer;
313 * System overwrites unused sectors. Free memory associated
314 * with this sector now.
316 if (rzs->table[index].page ||
317 rzs_test_flag(rzs, index, RZS_ZERO))
318 ramzswap_free_page(rzs, index);
320 mutex_lock(&rzs->lock);
322 user_mem = kmap_atomic(page, KM_USER0);
323 if (page_zero_filled(user_mem)) {
324 kunmap_atomic(user_mem, KM_USER0);
325 mutex_unlock(&rzs->lock);
326 rzs_stat_inc(&rzs->stats.pages_zero);
327 rzs_set_flag(rzs, index, RZS_ZERO);
331 ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
332 rzs->compress_workmem);
334 kunmap_atomic(user_mem, KM_USER0);
336 if (unlikely(ret != LZO_E_OK)) {
337 mutex_unlock(&rzs->lock);
338 pr_err("Compression failed! err=%d\n", ret);
339 rzs_stat64_inc(rzs, &rzs->stats.failed_writes);
344 * Page is incompressible. Store it as-is (uncompressed)
345 * since we do not want to return too many swap write
346 * errors which has side effect of hanging the system.
348 if (unlikely(clen > max_zpage_size)) {
350 page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
351 if (unlikely(!page_store)) {
352 mutex_unlock(&rzs->lock);
353 pr_info("Error allocating memory for "
354 "incompressible page: %u\n", index);
355 rzs_stat64_inc(rzs, &rzs->stats.failed_writes);
360 rzs_set_flag(rzs, index, RZS_UNCOMPRESSED);
361 rzs_stat_inc(&rzs->stats.pages_expand);
362 rzs->table[index].page = page_store;
363 src = kmap_atomic(page, KM_USER0);
367 if (xv_malloc(rzs->mem_pool, clen + sizeof(*zheader),
368 &rzs->table[index].page, &offset,
369 GFP_NOIO | __GFP_HIGHMEM)) {
370 mutex_unlock(&rzs->lock);
371 pr_info("Error allocating memory for compressed "
372 "page: %u, size=%zu\n", index, clen);
373 rzs_stat64_inc(rzs, &rzs->stats.failed_writes);
378 rzs->table[index].offset = offset;
380 cmem = kmap_atomic(rzs->table[index].page, KM_USER1) +
381 rzs->table[index].offset;
384 /* Back-reference needed for memory defragmentation */
385 if (!rzs_test_flag(rzs, index, RZS_UNCOMPRESSED)) {
386 zheader = (struct zobj_header *)cmem;
387 zheader->table_idx = index;
388 cmem += sizeof(*zheader);
392 memcpy(cmem, src, clen);
394 kunmap_atomic(cmem, KM_USER1);
395 if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED)))
396 kunmap_atomic(src, KM_USER0);
399 rzs->stats.compr_size += clen;
400 rzs_stat_inc(&rzs->stats.pages_stored);
401 if (clen <= PAGE_SIZE / 2)
402 rzs_stat_inc(&rzs->stats.good_compress);
404 mutex_unlock(&rzs->lock);
408 set_bit(BIO_UPTODATE, &bio->bi_flags);
418 * Check if request is within bounds and page aligned.
420 static inline int valid_io_request(struct ramzswap *rzs, struct bio *bio)
423 (bio->bi_sector >= (rzs->disksize >> SECTOR_SHIFT)) ||
424 (bio->bi_sector & (SECTORS_PER_PAGE - 1)) ||
425 (bio->bi_size & (PAGE_SIZE - 1)))) {
430 /* I/O request is valid */
435 * Handler function for all ramzswap I/O requests.
437 static int ramzswap_make_request(struct request_queue *queue, struct bio *bio)
440 struct ramzswap *rzs = queue->queuedata;
442 if (unlikely(!rzs->init_done)) {
447 if (!valid_io_request(rzs, bio)) {
448 rzs_stat64_inc(rzs, &rzs->stats.invalid_io);
453 switch (bio_data_dir(bio)) {
455 ret = ramzswap_read(rzs, bio);
459 ret = ramzswap_write(rzs, bio);
466 static void reset_device(struct ramzswap *rzs)
470 /* Do not accept any new I/O request */
473 /* Free various per-device buffers */
474 kfree(rzs->compress_workmem);
475 free_pages((unsigned long)rzs->compress_buffer, 1);
477 rzs->compress_workmem = NULL;
478 rzs->compress_buffer = NULL;
480 /* Free all pages that are still in this ramzswap device */
481 for (index = 0; index < rzs->disksize >> PAGE_SHIFT; index++) {
485 page = rzs->table[index].page;
486 offset = rzs->table[index].offset;
491 if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED)))
494 xv_free(rzs->mem_pool, page, offset);
500 xv_destroy_pool(rzs->mem_pool);
501 rzs->mem_pool = NULL;
504 memset(&rzs->stats, 0, sizeof(rzs->stats));
509 static int ramzswap_ioctl_init_device(struct ramzswap *rzs)
514 if (rzs->init_done) {
515 pr_info("Device already initialized!\n");
519 ramzswap_set_disksize(rzs, totalram_pages << PAGE_SHIFT);
521 rzs->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
522 if (!rzs->compress_workmem) {
523 pr_err("Error allocating compressor working memory!\n");
528 rzs->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
529 if (!rzs->compress_buffer) {
530 pr_err("Error allocating compressor buffer space\n");
535 num_pages = rzs->disksize >> PAGE_SHIFT;
536 rzs->table = vmalloc(num_pages * sizeof(*rzs->table));
538 pr_err("Error allocating ramzswap address table\n");
539 /* To prevent accessing table entries during cleanup */
544 memset(rzs->table, 0, num_pages * sizeof(*rzs->table));
546 set_capacity(rzs->disk, rzs->disksize >> SECTOR_SHIFT);
548 /* ramzswap devices sort of resembles non-rotational disks */
549 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rzs->disk->queue);
551 rzs->mem_pool = xv_create_pool();
552 if (!rzs->mem_pool) {
553 pr_err("Error creating memory pool\n");
560 pr_debug("Initialization done!\n");
566 pr_err("Initialization failed: err=%d\n", ret);
570 static int ramzswap_ioctl_reset_device(struct ramzswap *rzs)
578 static int ramzswap_ioctl(struct block_device *bdev, fmode_t mode,
579 unsigned int cmd, unsigned long arg)
584 struct ramzswap *rzs = bdev->bd_disk->private_data;
587 case RZSIO_SET_DISKSIZE_KB:
588 if (rzs->init_done) {
592 if (copy_from_user(&disksize_kb, (void *)arg,
597 rzs->disksize = disksize_kb << 10;
598 pr_info("Disk size set to %zu kB\n", disksize_kb);
601 case RZSIO_GET_STATS:
603 struct ramzswap_ioctl_stats *stats;
604 if (!rzs->init_done) {
608 stats = kzalloc(sizeof(*stats), GFP_KERNEL);
613 ramzswap_ioctl_get_stats(rzs, stats);
614 if (copy_to_user((void *)arg, stats, sizeof(*stats))) {
623 ret = ramzswap_ioctl_init_device(rzs);
627 /* Do not reset an active device! */
628 if (bdev->bd_holders) {
633 /* Make sure all pending I/O is finished */
637 ret = ramzswap_ioctl_reset_device(rzs);
641 pr_info("Invalid ioctl %u\n", cmd);
649 void ramzswap_slot_free_notify(struct block_device *bdev, unsigned long index)
651 struct ramzswap *rzs;
653 rzs = bdev->bd_disk->private_data;
654 ramzswap_free_page(rzs, index);
655 rzs_stat64_inc(rzs, &rzs->stats.notify_free);
658 static const struct block_device_operations ramzswap_devops = {
659 .ioctl = ramzswap_ioctl,
660 .swap_slot_free_notify = ramzswap_slot_free_notify,
664 static int create_device(struct ramzswap *rzs, int device_id)
668 mutex_init(&rzs->lock);
669 spin_lock_init(&rzs->stat64_lock);
671 rzs->queue = blk_alloc_queue(GFP_KERNEL);
673 pr_err("Error allocating disk queue for device %d\n",
679 blk_queue_make_request(rzs->queue, ramzswap_make_request);
680 rzs->queue->queuedata = rzs;
682 /* gendisk structure */
683 rzs->disk = alloc_disk(1);
685 blk_cleanup_queue(rzs->queue);
686 pr_warning("Error allocating disk structure for device %d\n",
692 rzs->disk->major = ramzswap_major;
693 rzs->disk->first_minor = device_id;
694 rzs->disk->fops = &ramzswap_devops;
695 rzs->disk->queue = rzs->queue;
696 rzs->disk->private_data = rzs;
697 snprintf(rzs->disk->disk_name, 16, "ramzswap%d", device_id);
699 /* Actual capacity set using RZSIO_SET_DISKSIZE_KB ioctl */
700 set_capacity(rzs->disk, 0);
703 * To ensure that we always get PAGE_SIZE aligned
704 * and n*PAGE_SIZED sized I/O requests.
706 blk_queue_physical_block_size(rzs->disk->queue, PAGE_SIZE);
707 blk_queue_logical_block_size(rzs->disk->queue, PAGE_SIZE);
708 blk_queue_io_min(rzs->disk->queue, PAGE_SIZE);
709 blk_queue_io_opt(rzs->disk->queue, PAGE_SIZE);
719 static void destroy_device(struct ramzswap *rzs)
722 del_gendisk(rzs->disk);
727 blk_cleanup_queue(rzs->queue);
730 static int __init ramzswap_init(void)
734 if (num_devices > max_num_devices) {
735 pr_warning("Invalid value for num_devices: %u\n",
741 ramzswap_major = register_blkdev(0, "ramzswap");
742 if (ramzswap_major <= 0) {
743 pr_warning("Unable to get major number\n");
749 pr_info("num_devices not specified. Using default: 1\n");
753 /* Allocate the device array and initialize each one */
754 pr_info("Creating %u devices ...\n", num_devices);
755 devices = kzalloc(num_devices * sizeof(struct ramzswap), GFP_KERNEL);
761 for (dev_id = 0; dev_id < num_devices; dev_id++) {
762 ret = create_device(&devices[dev_id], dev_id);
771 destroy_device(&devices[--dev_id]);
773 unregister_blkdev(ramzswap_major, "ramzswap");
778 static void __exit ramzswap_exit(void)
781 struct ramzswap *rzs;
783 for (i = 0; i < num_devices; i++) {
791 unregister_blkdev(ramzswap_major, "ramzswap");
794 pr_debug("Cleanup done!\n");
797 module_param(num_devices, uint, 0);
798 MODULE_PARM_DESC(num_devices, "Number of ramzswap devices");
800 module_init(ramzswap_init);
801 module_exit(ramzswap_exit);
803 MODULE_LICENSE("Dual BSD/GPL");
804 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
805 MODULE_DESCRIPTION("Compressed RAM Based Swap Device");