]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
staging: zram: replace xvmalloc with zsmalloc
authorNitin Gupta <ngupta@vflare.org>
Mon, 9 Jan 2012 22:51:59 +0000 (16:51 -0600)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 9 Feb 2012 01:13:00 +0000 (17:13 -0800)
Replaces xvmalloc with zsmalloc as the compressed page allocator
for zram

Signed-off-by: Nitin Gupta <ngupta@vflare.org>
Acked-by: Seth Jennings <sjenning@linux.vnet.ibm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/zram/Kconfig
drivers/staging/zram/Makefile
drivers/staging/zram/zram_drv.c
drivers/staging/zram/zram_drv.h
drivers/staging/zram/zram_sysfs.c

index 3bec4dba3fe5d656681e8365734beb172a27ac12..ee23a86ea7d5eec82644b4964dd3390a500d5abd 100644 (file)
@@ -1,11 +1,7 @@
-config XVMALLOC
-       bool
-       default n
-
 config ZRAM
        tristate "Compressed RAM block device support"
        depends on BLOCK && SYSFS
-       select XVMALLOC
+       select ZSMALLOC
        select LZO_COMPRESS
        select LZO_DECOMPRESS
        default n
index 2a6d3213a75644f2e3df419e8680b64b3b540fec..7f4a3019e9c4455db189355671f14bef285c3a5b 100644 (file)
@@ -1,4 +1,3 @@
 zram-y :=      zram_drv.o zram_sysfs.o
 
 obj-$(CONFIG_ZRAM)     +=      zram.o
-obj-$(CONFIG_XVMALLOC) +=      xvmalloc.o
\ No newline at end of file
index 2a2a92d389e63f9e4e6aacff9c1868ef97011f9c..5833156d228259e551f78a6cd7b1f9311c3494a3 100644 (file)
@@ -135,13 +135,9 @@ static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
 
 static void zram_free_page(struct zram *zram, size_t index)
 {
-       u32 clen;
-       void *obj;
+       void *handle = zram->table[index].handle;
 
-       struct page *page = zram->table[index].page;
-       u32 offset = zram->table[index].offset;
-
-       if (unlikely(!page)) {
+       if (unlikely(!handle)) {
                /*
                 * No memory is allocated for zero filled pages.
                 * Simply clear zero page flag.
@@ -154,27 +150,24 @@ static void zram_free_page(struct zram *zram, size_t index)
        }
 
        if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
-               clen = PAGE_SIZE;
-               __free_page(page);
+               __free_page(handle);
                zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
                zram_stat_dec(&zram->stats.pages_expand);
                goto out;
        }
 
-       obj = kmap_atomic(page, KM_USER0) + offset;
-       clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
-       kunmap_atomic(obj, KM_USER0);
+       zs_free(zram->mem_pool, handle);
 
-       xv_free(zram->mem_pool, page, offset);
-       if (clen <= PAGE_SIZE / 2)
+       if (zram->table[index].size <= PAGE_SIZE / 2)
                zram_stat_dec(&zram->stats.good_compress);
 
 out:
-       zram_stat64_sub(zram, &zram->stats.compr_size, clen);
+       zram_stat64_sub(zram, &zram->stats.compr_size,
+                       zram->table[index].size);
        zram_stat_dec(&zram->stats.pages_stored);
 
-       zram->table[index].page = NULL;
-       zram->table[index].offset = 0;
+       zram->table[index].handle = NULL;
+       zram->table[index].size = 0;
 }
 
 static void handle_zero_page(struct bio_vec *bvec)
@@ -196,7 +189,7 @@ static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
        unsigned char *user_mem, *cmem;
 
        user_mem = kmap_atomic(page, KM_USER0);
-       cmem = kmap_atomic(zram->table[index].page, KM_USER1);
+       cmem = kmap_atomic(zram->table[index].handle, KM_USER1);
 
        memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
        kunmap_atomic(cmem, KM_USER1);
@@ -227,7 +220,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
        }
 
        /* Requested page is not present in compressed area */
-       if (unlikely(!zram->table[index].page)) {
+       if (unlikely(!zram->table[index].handle)) {
                pr_debug("Read before write: sector=%lu, size=%u",
                         (ulong)(bio->bi_sector), bio->bi_size);
                handle_zero_page(bvec);
@@ -254,11 +247,10 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
                uncmem = user_mem;
        clen = PAGE_SIZE;
 
-       cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
-               zram->table[index].offset;
+       cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
 
        ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
-                                   xv_get_object_size(cmem) - sizeof(*zheader),
+                                   zram->table[index].size,
                                    uncmem, &clen);
 
        if (is_partial_io(bvec)) {
@@ -267,7 +259,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
                kfree(uncmem);
        }
 
-       kunmap_atomic(cmem, KM_USER1);
+       zs_unmap_object(zram->mem_pool, zram->table[index].handle);
        kunmap_atomic(user_mem, KM_USER0);
 
        /* Should NEVER happen. Return bio error if it does. */
@@ -290,13 +282,12 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
        unsigned char *cmem;
 
        if (zram_test_flag(zram, index, ZRAM_ZERO) ||
-           !zram->table[index].page) {
+           !zram->table[index].handle) {
                memset(mem, 0, PAGE_SIZE);
                return 0;
        }
 
-       cmem = kmap_atomic(zram->table[index].page, KM_USER0) +
-               zram->table[index].offset;
+       cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
 
        /* Page is stored uncompressed since it's incompressible */
        if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
@@ -306,9 +297,9 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
        }
 
        ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
-                                   xv_get_object_size(cmem) - sizeof(*zheader),
+                                   zram->table[index].size,
                                    mem, &clen);
-       kunmap_atomic(cmem, KM_USER0);
+       zs_unmap_object(zram->mem_pool, zram->table[index].handle);
 
        /* Should NEVER happen. Return bio error if it does. */
        if (unlikely(ret != LZO_E_OK)) {
@@ -326,6 +317,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
        int ret;
        u32 store_offset;
        size_t clen;
+       void *handle;
        struct zobj_header *zheader;
        struct page *page, *page_store;
        unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
@@ -355,7 +347,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
         * System overwrites unused sectors. Free memory associated
         * with this sector now.
         */
-       if (zram->table[index].page ||
+       if (zram->table[index].handle ||
            zram_test_flag(zram, index, ZRAM_ZERO))
                zram_free_page(zram, index);
 
@@ -407,26 +399,22 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
                store_offset = 0;
                zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
                zram_stat_inc(&zram->stats.pages_expand);
-               zram->table[index].page = page_store;
+               handle = page_store;
                src = kmap_atomic(page, KM_USER0);
+               cmem = kmap_atomic(page_store, KM_USER1);
                goto memstore;
        }
 
-       if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
-                     &zram->table[index].page, &store_offset,
-                     GFP_NOIO | __GFP_HIGHMEM)) {
+       handle = zs_malloc(zram->mem_pool, clen + sizeof(*zheader));
+       if (!handle) {
                pr_info("Error allocating memory for compressed "
                        "page: %u, size=%zu\n", index, clen);
                ret = -ENOMEM;
                goto out;
        }
+       cmem = zs_map_object(zram->mem_pool, handle);
 
 memstore:
-       zram->table[index].offset = store_offset;
-
-       cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
-               zram->table[index].offset;
-
 #if 0
        /* Back-reference needed for memory defragmentation */
        if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
@@ -438,9 +426,15 @@ memstore:
 
        memcpy(cmem, src, clen);
 
-       kunmap_atomic(cmem, KM_USER1);
-       if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
+       if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
+               kunmap_atomic(cmem, KM_USER1);
                kunmap_atomic(src, KM_USER0);
+       } else {
+               zs_unmap_object(zram->mem_pool, handle);
+       }
+
+       zram->table[index].handle = handle;
+       zram->table[index].size = clen;
 
        /* Update stats */
        zram_stat64_add(zram, &zram->stats.compr_size, clen);
@@ -598,25 +592,20 @@ void __zram_reset_device(struct zram *zram)
 
        /* Free all pages that are still in this zram device */
        for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
-               struct page *page;
-               u16 offset;
-
-               page = zram->table[index].page;
-               offset = zram->table[index].offset;
-
-               if (!page)
+               void *handle = zram->table[index].handle;
+               if (!handle)
                        continue;
 
                if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
-                       __free_page(page);
+                       __free_page(handle);
                else
-                       xv_free(zram->mem_pool, page, offset);
+                       zs_free(zram->mem_pool, handle);
        }
 
        vfree(zram->table);
        zram->table = NULL;
 
-       xv_destroy_pool(zram->mem_pool);
+       zs_destroy_pool(zram->mem_pool);
        zram->mem_pool = NULL;
 
        /* Reset stats */
@@ -674,7 +663,7 @@ int zram_init_device(struct zram *zram)
        /* zram devices sort of resembles non-rotational disks */
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
 
-       zram->mem_pool = xv_create_pool();
+       zram->mem_pool = zs_create_pool("zram", GFP_NOIO | __GFP_HIGHMEM);
        if (!zram->mem_pool) {
                pr_err("Error creating memory pool\n");
                ret = -ENOMEM;
index e5cd2469b6a088e034c1dea8c098dd8fd3463313..572faa8762bb1d4b384d86e9c97c6a20cd7ff2bc 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/spinlock.h>
 #include <linux/mutex.h>
 
-#include "xvmalloc.h"
+#include "../zsmalloc/zsmalloc.h"
 
 /*
  * Some arbitrary value. This is just to catch
@@ -51,7 +51,7 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
 
 /*
  * NOTE: max_zpage_size must be less than or equal to:
- *   XV_MAX_ALLOC_SIZE - sizeof(struct zobj_header)
+ *   ZS_MAX_ALLOC_SIZE - sizeof(struct zobj_header)
  * otherwise, xv_malloc() would always return failure.
  */
 
@@ -81,8 +81,8 @@ enum zram_pageflags {
 
 /* Allocated for each disk page */
 struct table {
-       struct page *page;
-       u16 offset;
+       void *handle;
+       u16 size;       /* object size (excluding header) */
        u8 count;       /* object ref count (not yet used) */
        u8 flags;
 } __attribute__((aligned(4)));
@@ -102,7 +102,7 @@ struct zram_stats {
 };
 
 struct zram {
-       struct xv_pool *mem_pool;
+       struct zs_pool *mem_pool;
        void *compress_workmem;
        void *compress_buffer;
        struct table *table;
index d521122826f65d5893c62f740cf408c47649513b..d2875c5690e8fc83c58fa0e1c7d26b5ee191af3d 100644 (file)
@@ -187,7 +187,7 @@ static ssize_t mem_used_total_show(struct device *dev,
        struct zram *zram = dev_to_zram(dev);
 
        if (zram->init_done) {
-               val = xv_get_total_size_bytes(zram->mem_pool) +
+               val = zs_get_total_size_bytes(zram->mem_pool) +
                        ((u64)(zram->stats.pages_expand) << PAGE_SHIFT);
        }