2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
12 * Project home: http://compcache.googlecode.com
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #ifdef CONFIG_ZRAM_DEBUG
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/lzo.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
39 static int zram_major;
40 static struct zram *zram_devices;
42 /* Module params (documentation at end) */
43 static unsigned int num_devices = 1;
45 static inline struct zram *dev_to_zram(struct device *dev)
47 return (struct zram *)dev_to_disk(dev)->private_data;
50 static ssize_t disksize_show(struct device *dev,
51 struct device_attribute *attr, char *buf)
53 struct zram *zram = dev_to_zram(dev);
55 return sprintf(buf, "%llu\n", zram->disksize);
58 static ssize_t initstate_show(struct device *dev,
59 struct device_attribute *attr, char *buf)
61 struct zram *zram = dev_to_zram(dev);
63 return sprintf(buf, "%u\n", zram->init_done);
66 static ssize_t num_reads_show(struct device *dev,
67 struct device_attribute *attr, char *buf)
69 struct zram *zram = dev_to_zram(dev);
71 return sprintf(buf, "%llu\n",
72 (u64)atomic64_read(&zram->stats.num_reads));
75 static ssize_t num_writes_show(struct device *dev,
76 struct device_attribute *attr, char *buf)
78 struct zram *zram = dev_to_zram(dev);
80 return sprintf(buf, "%llu\n",
81 (u64)atomic64_read(&zram->stats.num_writes));
84 static ssize_t invalid_io_show(struct device *dev,
85 struct device_attribute *attr, char *buf)
87 struct zram *zram = dev_to_zram(dev);
89 return sprintf(buf, "%llu\n",
90 (u64)atomic64_read(&zram->stats.invalid_io));
93 static ssize_t notify_free_show(struct device *dev,
94 struct device_attribute *attr, char *buf)
96 struct zram *zram = dev_to_zram(dev);
98 return sprintf(buf, "%llu\n",
99 (u64)atomic64_read(&zram->stats.notify_free));
102 static ssize_t zero_pages_show(struct device *dev,
103 struct device_attribute *attr, char *buf)
105 struct zram *zram = dev_to_zram(dev);
107 return sprintf(buf, "%u\n", zram->stats.pages_zero);
110 static ssize_t orig_data_size_show(struct device *dev,
111 struct device_attribute *attr, char *buf)
113 struct zram *zram = dev_to_zram(dev);
115 return sprintf(buf, "%llu\n",
116 (u64)(zram->stats.pages_stored) << PAGE_SHIFT);
119 static ssize_t compr_data_size_show(struct device *dev,
120 struct device_attribute *attr, char *buf)
122 struct zram *zram = dev_to_zram(dev);
124 return sprintf(buf, "%llu\n",
125 (u64)atomic64_read(&zram->stats.compr_size));
128 static ssize_t mem_used_total_show(struct device *dev,
129 struct device_attribute *attr, char *buf)
132 struct zram *zram = dev_to_zram(dev);
133 struct zram_meta *meta = zram->meta;
135 down_read(&zram->init_lock);
137 val = zs_get_total_size_bytes(meta->mem_pool);
138 up_read(&zram->init_lock);
140 return sprintf(buf, "%llu\n", val);
143 static int zram_test_flag(struct zram_meta *meta, u32 index,
144 enum zram_pageflags flag)
146 return meta->table[index].flags & BIT(flag);
149 static void zram_set_flag(struct zram_meta *meta, u32 index,
150 enum zram_pageflags flag)
152 meta->table[index].flags |= BIT(flag);
155 static void zram_clear_flag(struct zram_meta *meta, u32 index,
156 enum zram_pageflags flag)
158 meta->table[index].flags &= ~BIT(flag);
161 static inline int is_partial_io(struct bio_vec *bvec)
163 return bvec->bv_len != PAGE_SIZE;
167 * Check if request is within bounds and aligned on zram logical blocks.
169 static inline int valid_io_request(struct zram *zram, struct bio *bio)
171 u64 start, end, bound;
173 /* unaligned request */
174 if (unlikely(bio->bi_iter.bi_sector &
175 (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
177 if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
180 start = bio->bi_iter.bi_sector;
181 end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
182 bound = zram->disksize >> SECTOR_SHIFT;
183 /* out of range range */
184 if (unlikely(start >= bound || end > bound || start > end))
187 /* I/O request is valid */
191 static void zram_meta_free(struct zram_meta *meta)
193 zs_destroy_pool(meta->mem_pool);
194 kfree(meta->compress_workmem);
195 free_pages((unsigned long)meta->compress_buffer, 1);
200 static struct zram_meta *zram_meta_alloc(u64 disksize)
203 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
207 meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
208 if (!meta->compress_workmem)
211 meta->compress_buffer =
212 (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
213 if (!meta->compress_buffer) {
214 pr_err("Error allocating compressor buffer space\n");
218 num_pages = disksize >> PAGE_SHIFT;
219 meta->table = vzalloc(num_pages * sizeof(*meta->table));
221 pr_err("Error allocating zram address table\n");
225 meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
226 if (!meta->mem_pool) {
227 pr_err("Error creating memory pool\n");
236 free_pages((unsigned long)meta->compress_buffer, 1);
238 kfree(meta->compress_workmem);
246 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
248 if (*offset + bvec->bv_len >= PAGE_SIZE)
250 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
253 static int page_zero_filled(void *ptr)
258 page = (unsigned long *)ptr;
260 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
268 static void handle_zero_page(struct bio_vec *bvec)
270 struct page *page = bvec->bv_page;
273 user_mem = kmap_atomic(page);
274 if (is_partial_io(bvec))
275 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
277 clear_page(user_mem);
278 kunmap_atomic(user_mem);
280 flush_dcache_page(page);
283 static void zram_free_page(struct zram *zram, size_t index)
285 struct zram_meta *meta = zram->meta;
286 unsigned long handle = meta->table[index].handle;
287 u16 size = meta->table[index].size;
289 if (unlikely(!handle)) {
291 * No memory is allocated for zero filled pages.
292 * Simply clear zero page flag.
294 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
295 zram_clear_flag(meta, index, ZRAM_ZERO);
296 zram->stats.pages_zero--;
301 if (unlikely(size > max_zpage_size))
302 zram->stats.bad_compress--;
304 zs_free(meta->mem_pool, handle);
306 if (size <= PAGE_SIZE / 2)
307 zram->stats.good_compress--;
309 atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
310 zram->stats.pages_stored--;
312 meta->table[index].handle = 0;
313 meta->table[index].size = 0;
316 static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
319 size_t clen = PAGE_SIZE;
321 struct zram_meta *meta = zram->meta;
322 unsigned long handle = meta->table[index].handle;
324 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
329 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
330 if (meta->table[index].size == PAGE_SIZE)
331 copy_page(mem, cmem);
333 ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
335 zs_unmap_object(meta->mem_pool, handle);
337 /* Should NEVER happen. Return bio error if it does. */
338 if (unlikely(ret != LZO_E_OK)) {
339 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
340 atomic64_inc(&zram->stats.failed_reads);
347 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
348 u32 index, int offset, struct bio *bio)
352 unsigned char *user_mem, *uncmem = NULL;
353 struct zram_meta *meta = zram->meta;
354 page = bvec->bv_page;
356 if (unlikely(!meta->table[index].handle) ||
357 zram_test_flag(meta, index, ZRAM_ZERO)) {
358 handle_zero_page(bvec);
362 if (is_partial_io(bvec))
363 /* Use a temporary buffer to decompress the page */
364 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
366 user_mem = kmap_atomic(page);
367 if (!is_partial_io(bvec))
371 pr_info("Unable to allocate temp memory\n");
376 ret = zram_decompress_page(zram, uncmem, index);
377 /* Should NEVER happen. Return bio error if it does. */
378 if (unlikely(ret != LZO_E_OK))
381 if (is_partial_io(bvec))
382 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
385 flush_dcache_page(page);
388 kunmap_atomic(user_mem);
389 if (is_partial_io(bvec))
394 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
399 unsigned long handle;
401 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
402 struct zram_meta *meta = zram->meta;
404 page = bvec->bv_page;
405 src = meta->compress_buffer;
407 if (is_partial_io(bvec)) {
409 * This is a partial IO. We need to read the full page
410 * before to write the changes.
412 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
417 ret = zram_decompress_page(zram, uncmem, index);
422 user_mem = kmap_atomic(page);
424 if (is_partial_io(bvec)) {
425 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
427 kunmap_atomic(user_mem);
433 if (page_zero_filled(uncmem)) {
434 kunmap_atomic(user_mem);
435 /* Free memory associated with this sector now. */
436 zram_free_page(zram, index);
438 zram->stats.pages_zero++;
439 zram_set_flag(meta, index, ZRAM_ZERO);
445 * zram_slot_free_notify could miss free so that let's
448 if (unlikely(meta->table[index].handle ||
449 zram_test_flag(meta, index, ZRAM_ZERO)))
450 zram_free_page(zram, index);
452 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
453 meta->compress_workmem);
455 if (!is_partial_io(bvec)) {
456 kunmap_atomic(user_mem);
461 if (unlikely(ret != LZO_E_OK)) {
462 pr_err("Compression failed! err=%d\n", ret);
466 if (unlikely(clen > max_zpage_size)) {
467 zram->stats.bad_compress++;
470 if (is_partial_io(bvec))
474 handle = zs_malloc(meta->mem_pool, clen);
476 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
481 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
483 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
484 src = kmap_atomic(page);
485 copy_page(cmem, src);
488 memcpy(cmem, src, clen);
491 zs_unmap_object(meta->mem_pool, handle);
494 * Free memory associated with this sector
495 * before overwriting unused sectors.
497 zram_free_page(zram, index);
499 meta->table[index].handle = handle;
500 meta->table[index].size = clen;
503 atomic64_add(clen, &zram->stats.compr_size);
504 zram->stats.pages_stored++;
505 if (clen <= PAGE_SIZE / 2)
506 zram->stats.good_compress++;
509 if (is_partial_io(bvec))
513 atomic64_inc(&zram->stats.failed_writes);
517 static void handle_pending_slot_free(struct zram *zram)
519 struct zram_slot_free *free_rq;
521 spin_lock(&zram->slot_free_lock);
522 while (zram->slot_free_rq) {
523 free_rq = zram->slot_free_rq;
524 zram->slot_free_rq = free_rq->next;
525 zram_free_page(zram, free_rq->index);
528 spin_unlock(&zram->slot_free_lock);
531 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
532 int offset, struct bio *bio, int rw)
537 down_read(&zram->lock);
538 handle_pending_slot_free(zram);
539 ret = zram_bvec_read(zram, bvec, index, offset, bio);
540 up_read(&zram->lock);
542 down_write(&zram->lock);
543 handle_pending_slot_free(zram);
544 ret = zram_bvec_write(zram, bvec, index, offset);
545 up_write(&zram->lock);
551 static void zram_reset_device(struct zram *zram, bool reset_capacity)
554 struct zram_meta *meta;
556 flush_work(&zram->free_work);
558 down_write(&zram->init_lock);
559 if (!zram->init_done) {
560 up_write(&zram->init_lock);
567 /* Free all pages that are still in this zram device */
568 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
569 unsigned long handle = meta->table[index].handle;
573 zs_free(meta->mem_pool, handle);
576 zram_meta_free(zram->meta);
579 memset(&zram->stats, 0, sizeof(zram->stats));
583 set_capacity(zram->disk, 0);
584 up_write(&zram->init_lock);
587 static void zram_init_device(struct zram *zram, struct zram_meta *meta)
589 if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
591 "There is little point creating a zram of greater than "
592 "twice the size of memory since we expect a 2:1 compression "
593 "ratio. Note that zram uses about 0.1%% of the size of "
594 "the disk when not in use so a huge zram is "
596 "\tMemory Size: %lu kB\n"
597 "\tSize you selected: %llu kB\n"
598 "Continuing anyway ...\n",
599 (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
603 /* zram devices sort of resembles non-rotational disks */
604 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
609 pr_debug("Initialization done!\n");
612 static ssize_t disksize_store(struct device *dev,
613 struct device_attribute *attr, const char *buf, size_t len)
616 struct zram_meta *meta;
617 struct zram *zram = dev_to_zram(dev);
619 disksize = memparse(buf, NULL);
623 disksize = PAGE_ALIGN(disksize);
624 meta = zram_meta_alloc(disksize);
625 down_write(&zram->init_lock);
626 if (zram->init_done) {
627 up_write(&zram->init_lock);
628 zram_meta_free(meta);
629 pr_info("Cannot change disksize for initialized device\n");
633 zram->disksize = disksize;
634 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
635 zram_init_device(zram, meta);
636 up_write(&zram->init_lock);
641 static ssize_t reset_store(struct device *dev,
642 struct device_attribute *attr, const char *buf, size_t len)
645 unsigned short do_reset;
647 struct block_device *bdev;
649 zram = dev_to_zram(dev);
650 bdev = bdget_disk(zram->disk, 0);
655 /* Do not reset an active device! */
656 if (bdev->bd_holders) {
661 ret = kstrtou16(buf, 10, &do_reset);
670 /* Make sure all pending I/O is finished */
674 zram_reset_device(zram, true);
682 static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
687 struct bvec_iter iter;
691 atomic64_inc(&zram->stats.num_reads);
694 atomic64_inc(&zram->stats.num_writes);
698 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
699 offset = (bio->bi_iter.bi_sector &
700 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
702 bio_for_each_segment(bvec, bio, iter) {
703 int max_transfer_size = PAGE_SIZE - offset;
705 if (bvec.bv_len > max_transfer_size) {
707 * zram_bvec_rw() can only make operation on a single
708 * zram page. Split the bio vector.
712 bv.bv_page = bvec.bv_page;
713 bv.bv_len = max_transfer_size;
714 bv.bv_offset = bvec.bv_offset;
716 if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
719 bv.bv_len = bvec.bv_len - max_transfer_size;
720 bv.bv_offset += max_transfer_size;
721 if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
724 if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw)
728 update_position(&index, &offset, &bvec);
731 set_bit(BIO_UPTODATE, &bio->bi_flags);
740 * Handler function for all zram I/O requests.
742 static void zram_make_request(struct request_queue *queue, struct bio *bio)
744 struct zram *zram = queue->queuedata;
746 down_read(&zram->init_lock);
747 if (unlikely(!zram->init_done))
750 if (!valid_io_request(zram, bio)) {
751 atomic64_inc(&zram->stats.invalid_io);
755 __zram_make_request(zram, bio, bio_data_dir(bio));
756 up_read(&zram->init_lock);
761 up_read(&zram->init_lock);
765 static void zram_slot_free(struct work_struct *work)
769 zram = container_of(work, struct zram, free_work);
770 down_write(&zram->lock);
771 handle_pending_slot_free(zram);
772 up_write(&zram->lock);
775 static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
777 spin_lock(&zram->slot_free_lock);
778 free_rq->next = zram->slot_free_rq;
779 zram->slot_free_rq = free_rq;
780 spin_unlock(&zram->slot_free_lock);
783 static void zram_slot_free_notify(struct block_device *bdev,
787 struct zram_slot_free *free_rq;
789 zram = bdev->bd_disk->private_data;
790 atomic64_inc(&zram->stats.notify_free);
792 free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
796 free_rq->index = index;
797 add_slot_free(zram, free_rq);
798 schedule_work(&zram->free_work);
801 static const struct block_device_operations zram_devops = {
802 .swap_slot_free_notify = zram_slot_free_notify,
806 static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
807 disksize_show, disksize_store);
808 static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
809 static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
810 static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL);
811 static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
812 static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
813 static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL);
814 static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL);
815 static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
816 static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
817 static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
819 static struct attribute *zram_disk_attrs[] = {
820 &dev_attr_disksize.attr,
821 &dev_attr_initstate.attr,
822 &dev_attr_reset.attr,
823 &dev_attr_num_reads.attr,
824 &dev_attr_num_writes.attr,
825 &dev_attr_invalid_io.attr,
826 &dev_attr_notify_free.attr,
827 &dev_attr_zero_pages.attr,
828 &dev_attr_orig_data_size.attr,
829 &dev_attr_compr_data_size.attr,
830 &dev_attr_mem_used_total.attr,
834 static struct attribute_group zram_disk_attr_group = {
835 .attrs = zram_disk_attrs,
838 static int create_device(struct zram *zram, int device_id)
842 init_rwsem(&zram->lock);
843 init_rwsem(&zram->init_lock);
845 INIT_WORK(&zram->free_work, zram_slot_free);
846 spin_lock_init(&zram->slot_free_lock);
847 zram->slot_free_rq = NULL;
849 zram->queue = blk_alloc_queue(GFP_KERNEL);
851 pr_err("Error allocating disk queue for device %d\n",
856 blk_queue_make_request(zram->queue, zram_make_request);
857 zram->queue->queuedata = zram;
859 /* gendisk structure */
860 zram->disk = alloc_disk(1);
862 pr_warn("Error allocating disk structure for device %d\n",
867 zram->disk->major = zram_major;
868 zram->disk->first_minor = device_id;
869 zram->disk->fops = &zram_devops;
870 zram->disk->queue = zram->queue;
871 zram->disk->private_data = zram;
872 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
874 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
875 set_capacity(zram->disk, 0);
878 * To ensure that we always get PAGE_SIZE aligned
879 * and n*PAGE_SIZED sized I/O requests.
881 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
882 blk_queue_logical_block_size(zram->disk->queue,
883 ZRAM_LOGICAL_BLOCK_SIZE);
884 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
885 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
887 add_disk(zram->disk);
889 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
890 &zram_disk_attr_group);
892 pr_warn("Error creating sysfs group");
900 del_gendisk(zram->disk);
901 put_disk(zram->disk);
903 blk_cleanup_queue(zram->queue);
908 static void destroy_device(struct zram *zram)
910 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
911 &zram_disk_attr_group);
913 del_gendisk(zram->disk);
914 put_disk(zram->disk);
916 blk_cleanup_queue(zram->queue);
919 static int __init zram_init(void)
923 if (num_devices > max_num_devices) {
924 pr_warn("Invalid value for num_devices: %u\n",
930 zram_major = register_blkdev(0, "zram");
931 if (zram_major <= 0) {
932 pr_warn("Unable to get major number\n");
937 /* Allocate the device array and initialize each one */
938 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
944 for (dev_id = 0; dev_id < num_devices; dev_id++) {
945 ret = create_device(&zram_devices[dev_id], dev_id);
950 pr_info("Created %u device(s) ...\n", num_devices);
956 destroy_device(&zram_devices[--dev_id]);
959 unregister_blkdev(zram_major, "zram");
964 static void __exit zram_exit(void)
969 for (i = 0; i < num_devices; i++) {
970 zram = &zram_devices[i];
972 destroy_device(zram);
974 * Shouldn't access zram->disk after destroy_device
975 * because destroy_device already released zram->disk.
977 zram_reset_device(zram, false);
980 unregister_blkdev(zram_major, "zram");
983 pr_debug("Cleanup done!\n");
986 module_init(zram_init);
987 module_exit(zram_exit);
989 module_param(num_devices, uint, 0);
990 MODULE_PARM_DESC(num_devices, "Number of zram devices");
992 MODULE_LICENSE("Dual BSD/GPL");
993 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
994 MODULE_DESCRIPTION("Compressed RAM Block Device");