]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
btrfs: compress: put variables defined per compress type in struct to make cache...
authorByongho Lee <bhlee.kernel@gmail.com>
Wed, 14 Oct 2015 05:05:24 +0000 (14:05 +0900)
committerDavid Sterba <dsterba@suse.com>
Wed, 21 Oct 2015 16:28:48 +0000 (18:28 +0200)
Below variables are defined per compress type.
 - struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES]
 - spinlock_t comp_workspace_lock[BTRFS_COMPRESS_TYPES]
 - int comp_num_workspace[BTRFS_COMPRESS_TYPES]
 - atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES]
 - wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES]

BTW, while accessing one compress type of these variables, the next or
before address is other compress types of it.
So this patch puts these variables in a struct to make cache friendly.

Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: Byongho Lee <bhlee.kernel@gmail.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/compression.c

index 57ee8ca29b0601060fae924f43b7897f2c4c7c7c..b524e02d8cfb3a97b6b2c5dba603c0b2116292c3 100644 (file)
@@ -745,11 +745,13 @@ out:
        return ret;
 }
 
-static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES];
-static spinlock_t comp_workspace_lock[BTRFS_COMPRESS_TYPES];
-static int comp_num_workspace[BTRFS_COMPRESS_TYPES];
-static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES];
-static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES];
+static struct {
+       struct list_head idle_ws;
+       spinlock_t ws_lock;
+       int num_ws;
+       atomic_t alloc_ws;
+       wait_queue_head_t ws_wait;
+} btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
 
 static const struct btrfs_compress_op * const btrfs_compress_op[] = {
        &btrfs_zlib_compress,
@@ -761,10 +763,10 @@ void __init btrfs_init_compress(void)
        int i;
 
        for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
-               INIT_LIST_HEAD(&comp_idle_workspace[i]);
-               spin_lock_init(&comp_workspace_lock[i]);
-               atomic_set(&comp_alloc_workspace[i], 0);
-               init_waitqueue_head(&comp_workspace_wait[i]);
+               INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
+               spin_lock_init(&btrfs_comp_ws[i].ws_lock);
+               atomic_set(&btrfs_comp_ws[i].alloc_ws, 0);
+               init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
        }
 }
 
@@ -778,38 +780,38 @@ static struct list_head *find_workspace(int type)
        int cpus = num_online_cpus();
        int idx = type - 1;
 
-       struct list_head *idle_workspace        = &comp_idle_workspace[idx];
-       spinlock_t *workspace_lock              = &comp_workspace_lock[idx];
-       atomic_t *alloc_workspace               = &comp_alloc_workspace[idx];
-       wait_queue_head_t *workspace_wait       = &comp_workspace_wait[idx];
-       int *num_workspace                      = &comp_num_workspace[idx];
+       struct list_head *idle_ws       = &btrfs_comp_ws[idx].idle_ws;
+       spinlock_t *ws_lock             = &btrfs_comp_ws[idx].ws_lock;
+       atomic_t *alloc_ws              = &btrfs_comp_ws[idx].alloc_ws;
+       wait_queue_head_t *ws_wait      = &btrfs_comp_ws[idx].ws_wait;
+       int *num_ws                     = &btrfs_comp_ws[idx].num_ws;
 again:
-       spin_lock(workspace_lock);
-       if (!list_empty(idle_workspace)) {
-               workspace = idle_workspace->next;
+       spin_lock(ws_lock);
+       if (!list_empty(idle_ws)) {
+               workspace = idle_ws->next;
                list_del(workspace);
-               (*num_workspace)--;
-               spin_unlock(workspace_lock);
+               (*num_ws)--;
+               spin_unlock(ws_lock);
                return workspace;
 
        }
-       if (atomic_read(alloc_workspace) > cpus) {
+       if (atomic_read(alloc_ws) > cpus) {
                DEFINE_WAIT(wait);
 
-               spin_unlock(workspace_lock);
-               prepare_to_wait(workspace_wait, &wait, TASK_UNINTERRUPTIBLE);
-               if (atomic_read(alloc_workspace) > cpus && !*num_workspace)
+               spin_unlock(ws_lock);
+               prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
+               if (atomic_read(alloc_ws) > cpus && !*num_ws)
                        schedule();
-               finish_wait(workspace_wait, &wait);
+               finish_wait(ws_wait, &wait);
                goto again;
        }
-       atomic_inc(alloc_workspace);
-       spin_unlock(workspace_lock);
+       atomic_inc(alloc_ws);
+       spin_unlock(ws_lock);
 
        workspace = btrfs_compress_op[idx]->alloc_workspace();
        if (IS_ERR(workspace)) {
-               atomic_dec(alloc_workspace);
-               wake_up(workspace_wait);
+               atomic_dec(alloc_ws);
+               wake_up(ws_wait);
        }
        return workspace;
 }
@@ -821,27 +823,27 @@ again:
 static void free_workspace(int type, struct list_head *workspace)
 {
        int idx = type - 1;
-       struct list_head *idle_workspace        = &comp_idle_workspace[idx];
-       spinlock_t *workspace_lock              = &comp_workspace_lock[idx];
-       atomic_t *alloc_workspace               = &comp_alloc_workspace[idx];
-       wait_queue_head_t *workspace_wait       = &comp_workspace_wait[idx];
-       int *num_workspace                      = &comp_num_workspace[idx];
-
-       spin_lock(workspace_lock);
-       if (*num_workspace < num_online_cpus()) {
-               list_add(workspace, idle_workspace);
-               (*num_workspace)++;
-               spin_unlock(workspace_lock);
+       struct list_head *idle_ws       = &btrfs_comp_ws[idx].idle_ws;
+       spinlock_t *ws_lock             = &btrfs_comp_ws[idx].ws_lock;
+       atomic_t *alloc_ws              = &btrfs_comp_ws[idx].alloc_ws;
+       wait_queue_head_t *ws_wait      = &btrfs_comp_ws[idx].ws_wait;
+       int *num_ws                     = &btrfs_comp_ws[idx].num_ws;
+
+       spin_lock(ws_lock);
+       if (*num_ws < num_online_cpus()) {
+               list_add(workspace, idle_ws);
+               (*num_ws)++;
+               spin_unlock(ws_lock);
                goto wake;
        }
-       spin_unlock(workspace_lock);
+       spin_unlock(ws_lock);
 
        btrfs_compress_op[idx]->free_workspace(workspace);
-       atomic_dec(alloc_workspace);
+       atomic_dec(alloc_ws);
 wake:
        smp_mb();
-       if (waitqueue_active(workspace_wait))
-               wake_up(workspace_wait);
+       if (waitqueue_active(ws_wait))
+               wake_up(ws_wait);
 }
 
 /*
@@ -853,11 +855,11 @@ static void free_workspaces(void)
        int i;
 
        for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
-               while (!list_empty(&comp_idle_workspace[i])) {
-                       workspace = comp_idle_workspace[i].next;
+               while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
+                       workspace = btrfs_comp_ws[i].idle_ws.next;
                        list_del(workspace);
                        btrfs_compress_op[i]->free_workspace(workspace);
-                       atomic_dec(&comp_alloc_workspace[i]);
+                       atomic_dec(&btrfs_comp_ws[i].alloc_ws);
                }
        }
 }