]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - block/blk-mq.c
block: introduce blk_flush_queue to drive flush machinery
[karo-tx-linux.git] / block / blk-mq.c
index a3a80884ed95285996277d4fdfc9d3935947ac86..d39e8a5eaeaaab0c28e0d36daea18d0886776ace 100644 (file)
@@ -508,20 +508,22 @@ void blk_mq_kick_requeue_list(struct request_queue *q)
 }
 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
 
-static inline bool is_flush_request(struct request *rq, unsigned int tag)
+static inline bool is_flush_request(struct request *rq,
+               struct blk_flush_queue *fq, unsigned int tag)
 {
        return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
-                       rq->q->flush_rq->tag == tag);
+                       fq->flush_rq->tag == tag);
 }
 
 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
 {
        struct request *rq = tags->rqs[tag];
+       struct blk_flush_queue *fq = blk_get_flush_queue(rq->q);
 
-       if (!is_flush_request(rq, tag))
+       if (!is_flush_request(rq, fq, tag))
                return rq;
 
-       return rq->q->flush_rq;
+       return fq->flush_rq;
 }
 EXPORT_SYMBOL(blk_mq_tag_to_rq);
 
@@ -1509,6 +1511,20 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
        return NOTIFY_OK;
 }
 
+static void blk_mq_exit_hctx(struct request_queue *q,
+               struct blk_mq_tag_set *set,
+               struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
+{
+       blk_mq_tag_idle(hctx);
+
+       if (set->ops->exit_hctx)
+               set->ops->exit_hctx(hctx, hctx_idx);
+
+       blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
+       kfree(hctx->ctxs);
+       blk_mq_free_bitmap(&hctx->ctx_map);
+}
+
 static void blk_mq_exit_hw_queues(struct request_queue *q,
                struct blk_mq_tag_set *set, int nr_queue)
 {
@@ -1518,17 +1534,8 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
        queue_for_each_hw_ctx(q, hctx, i) {
                if (i == nr_queue)
                        break;
-
-               blk_mq_tag_idle(hctx);
-
-               if (set->ops->exit_hctx)
-                       set->ops->exit_hctx(hctx, i);
-
-               blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
-               kfree(hctx->ctxs);
-               blk_mq_free_bitmap(&hctx->ctx_map);
+               blk_mq_exit_hctx(q, set, hctx, i);
        }
-
 }
 
 static void blk_mq_free_hw_queues(struct request_queue *q,
@@ -1543,53 +1550,72 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
        }
 }
 
-static int blk_mq_init_hw_queues(struct request_queue *q,
-               struct blk_mq_tag_set *set)
+static int blk_mq_init_hctx(struct request_queue *q,
+               struct blk_mq_tag_set *set,
+               struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
 {
-       struct blk_mq_hw_ctx *hctx;
-       unsigned int i;
+       int node;
+
+       node = hctx->numa_node;
+       if (node == NUMA_NO_NODE)
+               node = hctx->numa_node = set->numa_node;
+
+       INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
+       INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
+       spin_lock_init(&hctx->lock);
+       INIT_LIST_HEAD(&hctx->dispatch);
+       hctx->queue = q;
+       hctx->queue_num = hctx_idx;
+       hctx->flags = set->flags;
+       hctx->cmd_size = set->cmd_size;
+
+       blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
+                                       blk_mq_hctx_notify, hctx);
+       blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
+
+       hctx->tags = set->tags[hctx_idx];
 
        /*
-        * Initialize hardware queues
+        * Allocate space for all possible cpus to avoid allocation at
+        * runtime
         */
-       queue_for_each_hw_ctx(q, hctx, i) {
-               int node;
+       hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
+                                       GFP_KERNEL, node);
+       if (!hctx->ctxs)
+               goto unregister_cpu_notifier;
 
-               node = hctx->numa_node;
-               if (node == NUMA_NO_NODE)
-                       node = hctx->numa_node = set->numa_node;
+       if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
+               goto free_ctxs;
 
-               INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
-               INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
-               spin_lock_init(&hctx->lock);
-               INIT_LIST_HEAD(&hctx->dispatch);
-               hctx->queue = q;
-               hctx->queue_num = i;
-               hctx->flags = set->flags;
-               hctx->cmd_size = set->cmd_size;
+       hctx->nr_ctx = 0;
 
-               blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
-                                               blk_mq_hctx_notify, hctx);
-               blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
+       if (set->ops->init_hctx &&
+           set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
+               goto free_bitmap;
 
-               hctx->tags = set->tags[i];
+       return 0;
 
-               /*
-                * Allocate space for all possible cpus to avoid allocation at
-                * runtime
-                */
-               hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
-                                               GFP_KERNEL, node);
-               if (!hctx->ctxs)
-                       break;
+ free_bitmap:
+       blk_mq_free_bitmap(&hctx->ctx_map);
+ free_ctxs:
+       kfree(hctx->ctxs);
+ unregister_cpu_notifier:
+       blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
 
-               if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
-                       break;
+       return -1;
+}
 
-               hctx->nr_ctx = 0;
+static int blk_mq_init_hw_queues(struct request_queue *q,
+               struct blk_mq_tag_set *set)
+{
+       struct blk_mq_hw_ctx *hctx;
+       unsigned int i;
 
-               if (set->ops->init_hctx &&
-                   set->ops->init_hctx(hctx, set->driver_data, i))
+       /*
+        * Initialize hardware queues
+        */
+       queue_for_each_hw_ctx(q, hctx, i) {
+               if (blk_mq_init_hctx(q, set, hctx, i))
                        break;
        }
 
@@ -1824,17 +1850,10 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
        if (set->ops->complete)
                blk_queue_softirq_done(q, set->ops->complete);
 
-       blk_mq_init_flush(q);
        blk_mq_init_cpu_queues(q, set->nr_hw_queues);
 
-       q->flush_rq = kzalloc(round_up(sizeof(struct request) +
-                               set->cmd_size, cache_line_size()),
-                               GFP_KERNEL);
-       if (!q->flush_rq)
-               goto err_hw;
-
        if (blk_mq_init_hw_queues(q, set))
-               goto err_flush_rq;
+               goto err_hw;
 
        mutex_lock(&all_q_mutex);
        list_add_tail(&q->all_q_node, &all_q_list);
@@ -1842,12 +1861,15 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 
        blk_mq_add_queue_tag_set(set, q);
 
+       if (blk_init_flush(q))
+               goto err_hw_queues;
+
        blk_mq_map_swqueue(q);
 
        return q;
 
-err_flush_rq:
-       kfree(q->flush_rq);
+err_hw_queues:
+       blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
 err_hw:
        blk_cleanup_queue(q);
 err_hctxs: