]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - block/blk-mq.c
blk-mq: introduce blk_mq_delay_kick_requeue_list()
[karo-tx-linux.git] / block / blk-mq.c
index 13f5a6c1de76827c3aa2eaab28061581971e0c5c..7ddc7969fba43b6786607c9ffda5f396ed87af3b 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/sched/sysctl.h>
 #include <linux/delay.h>
 #include <linux/crash_dump.h>
+#include <linux/prefetch.h>
 
 #include <trace/events/block.h>
 
@@ -501,7 +502,7 @@ EXPORT_SYMBOL(blk_mq_requeue_request);
 static void blk_mq_requeue_work(struct work_struct *work)
 {
        struct request_queue *q =
-               container_of(work, struct request_queue, requeue_work);
+               container_of(work, struct request_queue, requeue_work.work);
        LIST_HEAD(rq_list);
        struct request *rq, *next;
        unsigned long flags;
@@ -556,16 +557,24 @@ EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
 
 void blk_mq_cancel_requeue_work(struct request_queue *q)
 {
-       cancel_work_sync(&q->requeue_work);
+       cancel_delayed_work_sync(&q->requeue_work);
 }
 EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
 
 void blk_mq_kick_requeue_list(struct request_queue *q)
 {
-       kblockd_schedule_work(&q->requeue_work);
+       kblockd_schedule_delayed_work(&q->requeue_work, 0);
 }
 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
 
+void blk_mq_delay_kick_requeue_list(struct request_queue *q,
+                                   unsigned long msecs)
+{
+       kblockd_schedule_delayed_work(&q->requeue_work,
+                                     msecs_to_jiffies(msecs));
+}
+EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
+
 void blk_mq_abort_requeue_list(struct request_queue *q)
 {
        unsigned long flags;
@@ -588,8 +597,10 @@ EXPORT_SYMBOL(blk_mq_abort_requeue_list);
 
 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
 {
-       if (tag < tags->nr_tags)
+       if (tag < tags->nr_tags) {
+               prefetch(tags->rqs[tag]);
                return tags->rqs[tag];
+       }
 
        return NULL;
 }
@@ -936,8 +947,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
                put_cpu();
        }
 
-       kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
-                       &hctx->run_work, 0);
+       kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
 }
 
 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
@@ -958,7 +968,7 @@ EXPORT_SYMBOL(blk_mq_run_hw_queues);
 
 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
 {
-       cancel_delayed_work(&hctx->run_work);
+       cancel_work(&hctx->run_work);
        cancel_delayed_work(&hctx->delay_work);
        set_bit(BLK_MQ_S_STOPPED, &hctx->state);
 }
@@ -1011,7 +1021,7 @@ static void blk_mq_run_work_fn(struct work_struct *work)
 {
        struct blk_mq_hw_ctx *hctx;
 
-       hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
+       hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
 
        __blk_mq_run_hw_queue(hctx);
 }
@@ -1722,7 +1732,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
        if (node == NUMA_NO_NODE)
                node = hctx->numa_node = set->numa_node;
 
-       INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
+       INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
        INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
        spin_lock_init(&hctx->lock);
        INIT_LIST_HEAD(&hctx->dispatch);
@@ -2082,7 +2092,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 
        q->sg_reserved_size = INT_MAX;
 
-       INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
+       INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
        INIT_LIST_HEAD(&q->requeue_list);
        spin_lock_init(&q->requeue_lock);