]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
blk-mq: move blk_mq_sched_{get,put}_request to blk-mq.c
authorChristoph Hellwig <hch@lst.de>
Fri, 16 Jun 2017 16:15:19 +0000 (18:15 +0200)
committerJens Axboe <axboe@kernel.dk>
Sun, 18 Jun 2017 16:08:55 +0000 (10:08 -0600)
Having them out of line in blk-mq-sched.c just makes the code flow
unnecessarily complicated.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-sched.c
block/blk-mq-sched.h
block/blk-mq.c

index c4e2afb9d12db87eb2043862cbfb3929abb8d404..62db188595dc1ed090ded62f0ba6b0529add592b 100644 (file)
@@ -58,8 +58,8 @@ static void __blk_mq_sched_assign_ioc(struct request_queue *q,
        rq->elv.icq = NULL;
 }
 
-static void blk_mq_sched_assign_ioc(struct request_queue *q,
-                                   struct request *rq, struct bio *bio)
+void blk_mq_sched_assign_ioc(struct request_queue *q, struct request *rq,
+                            struct bio *bio)
 {
        struct io_context *ioc;
 
@@ -68,71 +68,6 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q,
                __blk_mq_sched_assign_ioc(q, rq, bio, ioc);
 }
 
-struct request *blk_mq_sched_get_request(struct request_queue *q,
-                                        struct bio *bio,
-                                        unsigned int op,
-                                        struct blk_mq_alloc_data *data)
-{
-       struct elevator_queue *e = q->elevator;
-       struct request *rq;
-
-       blk_queue_enter_live(q);
-       data->q = q;
-       if (likely(!data->ctx))
-               data->ctx = blk_mq_get_ctx(q);
-       if (likely(!data->hctx))
-               data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
-
-       if (e) {
-               data->flags |= BLK_MQ_REQ_INTERNAL;
-
-               /*
-                * Flush requests are special and go directly to the
-                * dispatch list.
-                */
-               if (!op_is_flush(op) && e->type->ops.mq.get_request) {
-                       rq = e->type->ops.mq.get_request(q, op, data);
-                       if (rq)
-                               rq->rq_flags |= RQF_QUEUED;
-               } else
-                       rq = __blk_mq_alloc_request(data, op);
-       } else {
-               rq = __blk_mq_alloc_request(data, op);
-       }
-
-       if (rq) {
-               if (!op_is_flush(op)) {
-                       rq->elv.icq = NULL;
-                       if (e && e->type->icq_cache)
-                               blk_mq_sched_assign_ioc(q, rq, bio);
-               }
-               data->hctx->queued++;
-               return rq;
-       }
-
-       blk_queue_exit(q);
-       return NULL;
-}
-
-void blk_mq_sched_put_request(struct request *rq)
-{
-       struct request_queue *q = rq->q;
-       struct elevator_queue *e = q->elevator;
-
-       if (rq->rq_flags & RQF_ELVPRIV) {
-               blk_mq_sched_put_rq_priv(rq->q, rq);
-               if (rq->elv.icq) {
-                       put_io_context(rq->elv.icq->ioc);
-                       rq->elv.icq = NULL;
-               }
-       }
-
-       if ((rq->rq_flags & RQF_QUEUED) && e && e->type->ops.mq.put_request)
-               e->type->ops.mq.put_request(rq);
-       else
-               blk_mq_finish_request(rq);
-}
-
 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
 {
        struct request_queue *q = hctx->queue;
index b87e5be5db8cfb49e7154dcd68e53580b156c9c3..5d12529538d0c472824cdfafb72f515c4492b45c 100644 (file)
@@ -7,8 +7,8 @@
 void blk_mq_sched_free_hctx_data(struct request_queue *q,
                                 void (*exit)(struct blk_mq_hw_ctx *));
 
-struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bio, unsigned int op, struct blk_mq_alloc_data *data);
-void blk_mq_sched_put_request(struct request *rq);
+void blk_mq_sched_assign_ioc(struct request_queue *q, struct request *rq,
+                            struct bio *bio);
 
 void blk_mq_sched_request_inserted(struct request *rq);
 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
index e1d650804c8eee0dcad4483fed62bb4955a48a5b..694cbd69850750fc6c44bd63b1a52a3252922645 100644 (file)
@@ -277,6 +277,51 @@ struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
 }
 EXPORT_SYMBOL_GPL(__blk_mq_alloc_request);
 
+static struct request *blk_mq_get_request(struct request_queue *q,
+               struct bio *bio, unsigned int op,
+               struct blk_mq_alloc_data *data)
+{
+       struct elevator_queue *e = q->elevator;
+       struct request *rq;
+
+       blk_queue_enter_live(q);
+       data->q = q;
+       if (likely(!data->ctx))
+               data->ctx = blk_mq_get_ctx(q);
+       if (likely(!data->hctx))
+               data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
+
+       if (e) {
+               data->flags |= BLK_MQ_REQ_INTERNAL;
+
+               /*
+                * Flush requests are special and go directly to the
+                * dispatch list.
+                */
+               if (!op_is_flush(op) && e->type->ops.mq.get_request) {
+                       rq = e->type->ops.mq.get_request(q, op, data);
+                       if (rq)
+                               rq->rq_flags |= RQF_QUEUED;
+               } else
+                       rq = __blk_mq_alloc_request(data, op);
+       } else {
+               rq = __blk_mq_alloc_request(data, op);
+       }
+
+       if (rq) {
+               if (!op_is_flush(op)) {
+                       rq->elv.icq = NULL;
+                       if (e && e->type->icq_cache)
+                               blk_mq_sched_assign_ioc(q, rq, bio);
+               }
+               data->hctx->queued++;
+               return rq;
+       }
+
+       blk_queue_exit(q);
+       return NULL;
+}
+
 struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
                unsigned int flags)
 {
@@ -288,7 +333,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
        if (ret)
                return ERR_PTR(ret);
 
-       rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
+       rq = blk_mq_get_request(q, NULL, rw, &alloc_data);
 
        blk_mq_put_ctx(alloc_data.ctx);
        blk_queue_exit(q);
@@ -339,7 +384,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
        cpu = cpumask_first(alloc_data.hctx->cpumask);
        alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
 
-       rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
+       rq = blk_mq_get_request(q, NULL, rw, &alloc_data);
 
        blk_queue_exit(q);
 
@@ -389,7 +434,21 @@ EXPORT_SYMBOL_GPL(blk_mq_finish_request);
 
 void blk_mq_free_request(struct request *rq)
 {
-       blk_mq_sched_put_request(rq);
+       struct request_queue *q = rq->q;
+       struct elevator_queue *e = q->elevator;
+
+       if (rq->rq_flags & RQF_ELVPRIV) {
+               blk_mq_sched_put_rq_priv(rq->q, rq);
+               if (rq->elv.icq) {
+                       put_io_context(rq->elv.icq->ioc);
+                       rq->elv.icq = NULL;
+               }
+       }
+
+       if ((rq->rq_flags & RQF_QUEUED) && e && e->type->ops.mq.put_request)
+               e->type->ops.mq.put_request(rq);
+       else
+               blk_mq_finish_request(rq);
 }
 EXPORT_SYMBOL_GPL(blk_mq_free_request);
 
@@ -1494,7 +1553,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
        trace_block_getrq(q, bio, bio->bi_opf);
 
-       rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
+       rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
        if (unlikely(!rq)) {
                __wbt_done(q->rq_wb, wb_acct);
                return BLK_QC_T_NONE;