]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
btrfs: Replace fs_info->scrub_* workqueue with btrfs_workqueue.
authorQu Wenruo <quwenruo@cn.fujitsu.com>
Fri, 28 Feb 2014 02:46:17 +0000 (10:46 +0800)
committerJosef Bacik <jbacik@fb.com>
Mon, 10 Mar 2014 19:17:14 +0000 (15:17 -0400)
Replace the fs_info->scrub_* with the newly created
btrfs_workqueue.

Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
Tested-by: David Sterba <dsterba@suse.cz>
Signed-off-by: Josef Bacik <jbacik@fb.com>
fs/btrfs/ctree.h
fs/btrfs/scrub.c
fs/btrfs/super.c

index 7b50def3f20665a8773c57e6b221c14ec709f31c..a98f86ac187e5969f914e1f3be5b21d2e0443917 100644 (file)
@@ -1605,9 +1605,9 @@ struct btrfs_fs_info {
        atomic_t scrub_cancel_req;
        wait_queue_head_t scrub_pause_wait;
        int scrub_workers_refcnt;
-       struct btrfs_workers scrub_workers;
-       struct btrfs_workers scrub_wr_completion_workers;
-       struct btrfs_workers scrub_nocow_workers;
+       struct btrfs_workqueue_struct *scrub_workers;
+       struct btrfs_workqueue_struct *scrub_wr_completion_workers;
+       struct btrfs_workqueue_struct *scrub_nocow_workers;
 
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
        u32 check_integrity_print_mask;
index 682ec3fca4a1b5d412523c68ed96c7292c95ee0c..5a240f5e6cebb4929a592dbbd2cea8985b5d749f 100644 (file)
@@ -96,7 +96,8 @@ struct scrub_bio {
 #endif
        int                     page_count;
        int                     next_free;
-       struct btrfs_work       work;
+       struct btrfs_work_struct
+                               work;
 };
 
 struct scrub_block {
@@ -154,7 +155,8 @@ struct scrub_fixup_nodatasum {
        struct btrfs_device     *dev;
        u64                     logical;
        struct btrfs_root       *root;
-       struct btrfs_work       work;
+       struct btrfs_work_struct
+                               work;
        int                     mirror_num;
 };
 
@@ -172,7 +174,8 @@ struct scrub_copy_nocow_ctx {
        int                     mirror_num;
        u64                     physical_for_dev_replace;
        struct list_head        inodes;
-       struct btrfs_work       work;
+       struct btrfs_work_struct
+                               work;
 };
 
 struct scrub_warning {
@@ -231,7 +234,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
                       u64 gen, int mirror_num, u8 *csum, int force,
                       u64 physical_for_dev_replace);
 static void scrub_bio_end_io(struct bio *bio, int err);
-static void scrub_bio_end_io_worker(struct btrfs_work *work);
+static void scrub_bio_end_io_worker(struct btrfs_work_struct *work);
 static void scrub_block_complete(struct scrub_block *sblock);
 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
                               u64 extent_logical, u64 extent_len,
@@ -248,14 +251,14 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
                                    struct scrub_page *spage);
 static void scrub_wr_submit(struct scrub_ctx *sctx);
 static void scrub_wr_bio_end_io(struct bio *bio, int err);
-static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
+static void scrub_wr_bio_end_io_worker(struct btrfs_work_struct *work);
 static int write_page_nocow(struct scrub_ctx *sctx,
                            u64 physical_for_dev_replace, struct page *page);
 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
                                      struct scrub_copy_nocow_ctx *ctx);
 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
                            int mirror_num, u64 physical_for_dev_replace);
-static void copy_nocow_pages_worker(struct btrfs_work *work);
+static void copy_nocow_pages_worker(struct btrfs_work_struct *work);
 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
 
@@ -428,7 +431,8 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
                sbio->index = i;
                sbio->sctx = sctx;
                sbio->page_count = 0;
-               sbio->work.func = scrub_bio_end_io_worker;
+               btrfs_init_work(&sbio->work, scrub_bio_end_io_worker,
+                               NULL, NULL);
 
                if (i != SCRUB_BIOS_PER_SCTX - 1)
                        sctx->bios[i]->next_free = i + 1;
@@ -733,7 +737,7 @@ out:
        return -EIO;
 }
 
-static void scrub_fixup_nodatasum(struct btrfs_work *work)
+static void scrub_fixup_nodatasum(struct btrfs_work_struct *work)
 {
        int ret;
        struct scrub_fixup_nodatasum *fixup;
@@ -997,9 +1001,10 @@ nodatasum_case:
                fixup_nodatasum->root = fs_info->extent_root;
                fixup_nodatasum->mirror_num = failed_mirror_index + 1;
                scrub_pending_trans_workers_inc(sctx);
-               fixup_nodatasum->work.func = scrub_fixup_nodatasum;
-               btrfs_queue_worker(&fs_info->scrub_workers,
-                                  &fixup_nodatasum->work);
+               btrfs_init_work(&fixup_nodatasum->work, scrub_fixup_nodatasum,
+                               NULL, NULL);
+               btrfs_queue_work(fs_info->scrub_workers,
+                                &fixup_nodatasum->work);
                goto out;
        }
 
@@ -1613,11 +1618,11 @@ static void scrub_wr_bio_end_io(struct bio *bio, int err)
        sbio->err = err;
        sbio->bio = bio;
 
-       sbio->work.func = scrub_wr_bio_end_io_worker;
-       btrfs_queue_worker(&fs_info->scrub_wr_completion_workers, &sbio->work);
+       btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL);
+       btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
 }
 
-static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
+static void scrub_wr_bio_end_io_worker(struct btrfs_work_struct *work)
 {
        struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
        struct scrub_ctx *sctx = sbio->sctx;
@@ -2082,10 +2087,10 @@ static void scrub_bio_end_io(struct bio *bio, int err)
        sbio->err = err;
        sbio->bio = bio;
 
-       btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
+       btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
 }
 
-static void scrub_bio_end_io_worker(struct btrfs_work *work)
+static void scrub_bio_end_io_worker(struct btrfs_work_struct *work)
 {
        struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
        struct scrub_ctx *sctx = sbio->sctx;
@@ -2780,33 +2785,35 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
                                                int is_dev_replace)
 {
        int ret = 0;
+       int flags = WQ_FREEZABLE | WQ_UNBOUND;
+       int max_active = fs_info->thread_pool_size;
 
        if (fs_info->scrub_workers_refcnt == 0) {
                if (is_dev_replace)
-                       btrfs_init_workers(&fs_info->scrub_workers, "scrub", 1,
-                                       &fs_info->generic_worker);
+                       fs_info->scrub_workers =
+                               btrfs_alloc_workqueue("btrfs-scrub", flags,
+                                                     1, 4);
                else
-                       btrfs_init_workers(&fs_info->scrub_workers, "scrub",
-                                       fs_info->thread_pool_size,
-                                       &fs_info->generic_worker);
-               fs_info->scrub_workers.idle_thresh = 4;
-               ret = btrfs_start_workers(&fs_info->scrub_workers);
-               if (ret)
+                       fs_info->scrub_workers =
+                               btrfs_alloc_workqueue("btrfs-scrub", flags,
+                                                     max_active, 4);
+               if (!fs_info->scrub_workers) {
+                       ret = -ENOMEM;
                        goto out;
-               btrfs_init_workers(&fs_info->scrub_wr_completion_workers,
-                                  "scrubwrc",
-                                  fs_info->thread_pool_size,
-                                  &fs_info->generic_worker);
-               fs_info->scrub_wr_completion_workers.idle_thresh = 2;
-               ret = btrfs_start_workers(
-                               &fs_info->scrub_wr_completion_workers);
-               if (ret)
+               }
+               fs_info->scrub_wr_completion_workers =
+                       btrfs_alloc_workqueue("btrfs-scrubwrc", flags,
+                                             max_active, 2);
+               if (!fs_info->scrub_wr_completion_workers) {
+                       ret = -ENOMEM;
                        goto out;
-               btrfs_init_workers(&fs_info->scrub_nocow_workers, "scrubnc", 1,
-                                  &fs_info->generic_worker);
-               ret = btrfs_start_workers(&fs_info->scrub_nocow_workers);
-               if (ret)
+               }
+               fs_info->scrub_nocow_workers =
+                       btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);
+               if (!fs_info->scrub_nocow_workers) {
+                       ret = -ENOMEM;
                        goto out;
+               }
        }
        ++fs_info->scrub_workers_refcnt;
 out:
@@ -2816,9 +2823,9 @@ out:
 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
 {
        if (--fs_info->scrub_workers_refcnt == 0) {
-               btrfs_stop_workers(&fs_info->scrub_workers);
-               btrfs_stop_workers(&fs_info->scrub_wr_completion_workers);
-               btrfs_stop_workers(&fs_info->scrub_nocow_workers);
+               btrfs_destroy_workqueue(fs_info->scrub_workers);
+               btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
+               btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
        }
        WARN_ON(fs_info->scrub_workers_refcnt < 0);
 }
@@ -3129,10 +3136,10 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
        nocow_ctx->len = len;
        nocow_ctx->mirror_num = mirror_num;
        nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
-       nocow_ctx->work.func = copy_nocow_pages_worker;
+       btrfs_init_work(&nocow_ctx->work, copy_nocow_pages_worker, NULL, NULL);
        INIT_LIST_HEAD(&nocow_ctx->inodes);
-       btrfs_queue_worker(&fs_info->scrub_nocow_workers,
-                          &nocow_ctx->work);
+       btrfs_queue_work(fs_info->scrub_nocow_workers,
+                        &nocow_ctx->work);
 
        return 0;
 }
@@ -3154,7 +3161,7 @@ static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
 
 #define COPY_COMPLETE 1
 
-static void copy_nocow_pages_worker(struct btrfs_work *work)
+static void copy_nocow_pages_worker(struct btrfs_work_struct *work)
 {
        struct scrub_copy_nocow_ctx *nocow_ctx =
                container_of(work, struct scrub_copy_nocow_ctx, work);
index fd07d039b2de14f74e79671a196aaee6967f3ac4..aed1e11060a01f2a393320f87002efea371a2a1b 100644 (file)
@@ -1336,8 +1336,8 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
        btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
        btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);
        btrfs_workqueue_set_max(fs_info->readahead_workers, new_pool_size);
-       btrfs_set_max_workers(&fs_info->scrub_wr_completion_workers,
-                             new_pool_size);
+       btrfs_workqueue_set_max(fs_info->scrub_wr_completion_workers,
+                               new_pool_size);
 }
 
 static inline void btrfs_remount_prepare(struct btrfs_fs_info *fs_info)