]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/zsmalloc.c
scsi: ufs: provide a quirk to disable the LCC
[karo-tx-linux.git] / mm / zsmalloc.c
index a9a9ff233a1380a520f9a0d5d6a6c943f6d97166..08bd7a3d464a9c6959a39e269d2284600e750a50 100644 (file)
@@ -1398,11 +1398,6 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
        /* extra space in chunk to keep the handle */
        size += ZS_HANDLE_SIZE;
        class = pool->size_class[get_size_class_index(size)];
-       /* In huge class size, we store the handle into first_page->private */
-       if (class->huge) {
-               size -= ZS_HANDLE_SIZE;
-               class = pool->size_class[get_size_class_index(size)];
-       }
 
        spin_lock(&class->lock);
        first_page = find_get_zspage(class);
@@ -1678,14 +1673,14 @@ static struct page *alloc_target_page(struct size_class *class)
 static void putback_zspage(struct zs_pool *pool, struct size_class *class,
                                struct page *first_page)
 {
-       int class_idx;
        enum fullness_group fullness;
 
        BUG_ON(!is_first_page(first_page));
 
-       get_zspage_mapping(first_page, &class_idx, &fullness);
+       fullness = get_fullness_group(first_page);
        insert_zspage(first_page, class, fullness);
-       fullness = fix_fullness_group(class, first_page);
+       set_zspage_mapping(first_page, class->index, fullness);
+
        if (fullness == ZS_EMPTY) {
                zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
                        class->size, class->pages_per_zspage));
@@ -1716,8 +1711,6 @@ static unsigned long __zs_compact(struct zs_pool *pool,
        struct page *dst_page = NULL;
        unsigned long nr_total_migrated = 0;
 
-       cond_resched();
-
        spin_lock(&class->lock);
        while ((src_page = isolate_source_page(class))) {