]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - block/blk-mq.c
target/tcm_loop: Make TMF processing slightly faster
[karo-tx-linux.git] / block / blk-mq.c
index bf90684a007a21c6aa5068b3b2e3746e8e1accff..a69ad122ed66c6b93385f1b3959a893b407b9d05 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/blk-mq.h>
 #include "blk.h"
 #include "blk-mq.h"
+#include "blk-mq-debugfs.h"
 #include "blk-mq-tag.h"
 #include "blk-stat.h"
 #include "blk-wbt.h"
@@ -41,6 +42,7 @@ static LIST_HEAD(all_q_list);
 
 static void blk_mq_poll_stats_start(struct request_queue *q);
 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
+static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync);
 
 static int blk_mq_poll_stats_bkt(const struct request *rq)
 {
@@ -166,7 +168,7 @@ void blk_mq_quiesce_queue(struct request_queue *q)
        unsigned int i;
        bool rcu = false;
 
-       blk_mq_stop_hw_queues(q);
+       __blk_mq_stop_hw_queues(q, true);
 
        queue_for_each_hw_ctx(q, hctx, i) {
                if (hctx->flags & BLK_MQ_F_BLOCKING)
@@ -1218,20 +1220,34 @@ bool blk_mq_queue_stopped(struct request_queue *q)
 }
 EXPORT_SYMBOL(blk_mq_queue_stopped);
 
-void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
+static void __blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx, bool sync)
 {
-       cancel_delayed_work_sync(&hctx->run_work);
+       if (sync)
+               cancel_delayed_work_sync(&hctx->run_work);
+       else
+               cancel_delayed_work(&hctx->run_work);
+
        set_bit(BLK_MQ_S_STOPPED, &hctx->state);
 }
+
+void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
+{
+       __blk_mq_stop_hw_queue(hctx, false);
+}
 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
 
-void blk_mq_stop_hw_queues(struct request_queue *q)
+static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync)
 {
        struct blk_mq_hw_ctx *hctx;
        int i;
 
        queue_for_each_hw_ctx(q, hctx, i)
-               blk_mq_stop_hw_queue(hctx);
+               __blk_mq_stop_hw_queue(hctx, sync);
+}
+
+void blk_mq_stop_hw_queues(struct request_queue *q)
+{
+       __blk_mq_stop_hw_queues(q, false);
 }
 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
 
@@ -1538,13 +1554,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
        blk_queue_bounce(q, &bio);
 
+       blk_queue_split(q, &bio, q->bio_split);
+
        if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
                bio_io_error(bio);
                return BLK_QC_T_NONE;
        }
 
-       blk_queue_split(q, &bio, q->bio_split);
-
        if (!is_flush_fua && !blk_queue_nomerges(q) &&
            blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
                return BLK_QC_T_NONE;
@@ -1655,8 +1671,7 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
 
                        if (!rq)
                                continue;
-                       set->ops->exit_request(set->driver_data, rq,
-                                               hctx_idx, i);
+                       set->ops->exit_request(set, rq, hctx_idx);
                        tags->static_rqs[i] = NULL;
                }
        }
@@ -1787,8 +1802,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
 
                        tags->static_rqs[i] = rq;
                        if (set->ops->init_request) {
-                               if (set->ops->init_request(set->driver_data,
-                                               rq, hctx_idx, i,
+                               if (set->ops->init_request(set, rq, hctx_idx,
                                                node)) {
                                        tags->static_rqs[i] = NULL;
                                        goto fail;
@@ -1849,14 +1863,12 @@ static void blk_mq_exit_hctx(struct request_queue *q,
                struct blk_mq_tag_set *set,
                struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
 {
-       unsigned flush_start_tag = set->queue_depth;
+       blk_mq_debugfs_unregister_hctx(hctx);
 
        blk_mq_tag_idle(hctx);
 
        if (set->ops->exit_request)
-               set->ops->exit_request(set->driver_data,
-                                      hctx->fq->flush_rq, hctx_idx,
-                                      flush_start_tag + hctx_idx);
+               set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
 
        blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
 
@@ -1889,7 +1901,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
                struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
 {
        int node;
-       unsigned flush_start_tag = set->queue_depth;
 
        node = hctx->numa_node;
        if (node == NUMA_NO_NODE)
@@ -1933,14 +1944,15 @@ static int blk_mq_init_hctx(struct request_queue *q,
                goto sched_exit_hctx;
 
        if (set->ops->init_request &&
-           set->ops->init_request(set->driver_data,
-                                  hctx->fq->flush_rq, hctx_idx,
-                                  flush_start_tag + hctx_idx, node))
+           set->ops->init_request(set, hctx->fq->flush_rq, hctx_idx,
+                                  node))
                goto free_fq;
 
        if (hctx->flags & BLK_MQ_F_BLOCKING)
                init_srcu_struct(&hctx->queue_rq_srcu);
 
+       blk_mq_debugfs_register_hctx(q, hctx);
+
        return 0;
 
  free_fq:
@@ -2378,6 +2390,7 @@ static void blk_mq_queue_reinit(struct request_queue *q,
 {
        WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
 
+       blk_mq_debugfs_unregister_hctxs(q);
        blk_mq_sysfs_unregister(q);
 
        /*
@@ -2389,6 +2402,7 @@ static void blk_mq_queue_reinit(struct request_queue *q,
        blk_mq_map_swqueue(q, online_mask);
 
        blk_mq_sysfs_register(q);
+       blk_mq_debugfs_register_hctxs(q);
 }
 
 /*
@@ -2617,7 +2631,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
                return -EINVAL;
 
        blk_mq_freeze_queue(q);
-       blk_mq_quiesce_queue(q);
 
        ret = 0;
        queue_for_each_hw_ctx(q, hctx, i) {
@@ -2643,7 +2656,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
                q->nr_requests = nr;
 
        blk_mq_unfreeze_queue(q);
-       blk_mq_start_stopped_hw_queues(q, true);
 
        return ret;
 }