]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
block: rename blk_mq_freeze_queue_start()
authorMing Lei <tom.leiming@gmail.com>
Mon, 27 Mar 2017 12:06:57 +0000 (20:06 +0800)
committerJens Axboe <axboe@fb.com>
Wed, 29 Mar 2017 14:03:42 +0000 (08:03 -0600)
As the .q_usage_counter is used by both legacy and
mq path, we need to block new I/O if queue becomes
dead in blk_queue_enter().

So rename it and we can use this function in both
paths.

Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-core.c
block/blk-mq.c
drivers/block/mtip32xx/mtip32xx.c
drivers/nvme/host/core.c
include/linux/blk-mq.h

index 6373febc77169bfbf0e38635dbb7ec2885c293d7..7b66f76f9cff6123732c2c4ab6130b2d094c6a60 100644 (file)
@@ -670,7 +670,7 @@ int blk_queue_enter(struct request_queue *q, bool nowait)
                        return -EBUSY;
 
                /*
                        return -EBUSY;
 
                /*
-                * read pair of barrier in blk_mq_freeze_queue_start(),
+                * read pair of barrier in blk_freeze_queue_start(),
                 * we need to order reading __PERCPU_REF_DEAD flag of
                 * .q_usage_counter and reading .mq_freeze_depth,
                 * otherwise the following wait may never return if the
                 * we need to order reading __PERCPU_REF_DEAD flag of
                 * .q_usage_counter and reading .mq_freeze_depth,
                 * otherwise the following wait may never return if the
index baebd6c8210e43bea01024c5b8a52510ad4a8067..0ed00eca4d5adbfb27e6e35a6334ee55a723ffcf 100644 (file)
@@ -68,7 +68,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
        sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
 }
 
        sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
 }
 
-void blk_mq_freeze_queue_start(struct request_queue *q)
+void blk_freeze_queue_start(struct request_queue *q)
 {
        int freeze_depth;
 
 {
        int freeze_depth;
 
@@ -78,7 +78,7 @@ void blk_mq_freeze_queue_start(struct request_queue *q)
                blk_mq_run_hw_queues(q, false);
        }
 }
                blk_mq_run_hw_queues(q, false);
        }
 }
-EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
+EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
 
 void blk_mq_freeze_queue_wait(struct request_queue *q)
 {
 
 void blk_mq_freeze_queue_wait(struct request_queue *q)
 {
@@ -108,7 +108,7 @@ void blk_freeze_queue(struct request_queue *q)
         * no blk_unfreeze_queue(), and blk_freeze_queue() is not
         * exported to drivers as the only user for unfreeze is blk_mq.
         */
         * no blk_unfreeze_queue(), and blk_freeze_queue() is not
         * exported to drivers as the only user for unfreeze is blk_mq.
         */
-       blk_mq_freeze_queue_start(q);
+       blk_freeze_queue_start(q);
        blk_mq_freeze_queue_wait(q);
 }
 
        blk_mq_freeze_queue_wait(q);
 }
 
@@ -746,7 +746,7 @@ static void blk_mq_timeout_work(struct work_struct *work)
         * percpu_ref_tryget directly, because we need to be able to
         * obtain a reference even in the short window between the queue
         * starting to freeze, by dropping the first reference in
         * percpu_ref_tryget directly, because we need to be able to
         * obtain a reference even in the short window between the queue
         * starting to freeze, by dropping the first reference in
-        * blk_mq_freeze_queue_start, and the moment the last request is
+        * blk_freeze_queue_start, and the moment the last request is
         * consumed, marked by the instant q_usage_counter reaches
         * zero.
         */
         * consumed, marked by the instant q_usage_counter reaches
         * zero.
         */
@@ -2376,7 +2376,7 @@ static void blk_mq_queue_reinit_work(void)
         * take place in parallel.
         */
        list_for_each_entry(q, &all_q_list, all_q_node)
         * take place in parallel.
         */
        list_for_each_entry(q, &all_q_list, all_q_node)
-               blk_mq_freeze_queue_start(q);
+               blk_freeze_queue_start(q);
        list_for_each_entry(q, &all_q_list, all_q_node)
                blk_mq_freeze_queue_wait(q);
 
        list_for_each_entry(q, &all_q_list, all_q_node)
                blk_mq_freeze_queue_wait(q);
 
index f96ab717534c4c8fe60e830c4020c6bd0517cf07..c96c35ab39dffed324fc568fb679a7a72826c8b6 100644 (file)
@@ -4162,7 +4162,7 @@ static int mtip_block_remove(struct driver_data *dd)
                dev_info(&dd->pdev->dev, "device %s surprise removal\n",
                                                dd->disk->disk_name);
 
                dev_info(&dd->pdev->dev, "device %s surprise removal\n",
                                                dd->disk->disk_name);
 
-       blk_mq_freeze_queue_start(dd->queue);
+       blk_freeze_queue_start(dd->queue);
        blk_mq_stop_hw_queues(dd->queue);
        blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd);
 
        blk_mq_stop_hw_queues(dd->queue);
        blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd);
 
index 9b3b57fef446dc753c966c90fc2529bc9f846dd8..4a6d7f40876921ded21ad061399e94b46a2732ed 100644 (file)
@@ -2386,7 +2386,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl)
 
        mutex_lock(&ctrl->namespaces_mutex);
        list_for_each_entry(ns, &ctrl->namespaces, list)
 
        mutex_lock(&ctrl->namespaces_mutex);
        list_for_each_entry(ns, &ctrl->namespaces, list)
-               blk_mq_freeze_queue_start(ns->queue);
+               blk_freeze_queue_start(ns->queue);
        mutex_unlock(&ctrl->namespaces_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_start_freeze);
        mutex_unlock(&ctrl->namespaces_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_start_freeze);
index 5b3e201c8d4f4ae0b788dd986607690cd170ccb0..ea2e9dcd3aef4836f963df5ae486c23d1102da83 100644 (file)
@@ -243,7 +243,7 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
                busy_tag_iter_fn *fn, void *priv);
 void blk_mq_freeze_queue(struct request_queue *q);
 void blk_mq_unfreeze_queue(struct request_queue *q);
                busy_tag_iter_fn *fn, void *priv);
 void blk_mq_freeze_queue(struct request_queue *q);
 void blk_mq_unfreeze_queue(struct request_queue *q);
-void blk_mq_freeze_queue_start(struct request_queue *q);
+void blk_freeze_queue_start(struct request_queue *q);
 void blk_mq_freeze_queue_wait(struct request_queue *q);
 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
                                     unsigned long timeout);
 void blk_mq_freeze_queue_wait(struct request_queue *q);
 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
                                     unsigned long timeout);