]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - block/blk-mq.c
Merge remote-tracking branch 'block/for-next'
[karo-tx-linux.git] / block / blk-mq.c
index 85f014327342efc775c31833a52f531b69a66329..1c27b3eaef645ab1ff12d93ac400fcbdd3ebb655 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/backing-dev.h>
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 #include <linux/backing-dev.h>
 #include <linux/bio.h>
 #include <linux/blkdev.h>
+#include <linux/kmemleak.h>
 #include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -77,47 +78,13 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
        clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
 }
 
        clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
 }
 
-static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
-{
-       while (true) {
-               int ret;
-
-               if (percpu_ref_tryget_live(&q->mq_usage_counter))
-                       return 0;
-
-               if (!(gfp & __GFP_WAIT))
-                       return -EBUSY;
-
-               ret = wait_event_interruptible(q->mq_freeze_wq,
-                               !atomic_read(&q->mq_freeze_depth) ||
-                               blk_queue_dying(q));
-               if (blk_queue_dying(q))
-                       return -ENODEV;
-               if (ret)
-                       return ret;
-       }
-}
-
-static void blk_mq_queue_exit(struct request_queue *q)
-{
-       percpu_ref_put(&q->mq_usage_counter);
-}
-
-static void blk_mq_usage_counter_release(struct percpu_ref *ref)
-{
-       struct request_queue *q =
-               container_of(ref, struct request_queue, mq_usage_counter);
-
-       wake_up_all(&q->mq_freeze_wq);
-}
-
 void blk_mq_freeze_queue_start(struct request_queue *q)
 {
        int freeze_depth;
 
        freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
        if (freeze_depth == 1) {
 void blk_mq_freeze_queue_start(struct request_queue *q)
 {
        int freeze_depth;
 
        freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
        if (freeze_depth == 1) {
-               percpu_ref_kill(&q->mq_usage_counter);
+               percpu_ref_kill(&q->q_usage_counter);
                blk_mq_run_hw_queues(q, false);
        }
 }
                blk_mq_run_hw_queues(q, false);
        }
 }
@@ -125,18 +92,34 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
 
 static void blk_mq_freeze_queue_wait(struct request_queue *q)
 {
 
 static void blk_mq_freeze_queue_wait(struct request_queue *q)
 {
-       wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
+       wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
 }
 
 /*
  * Guarantee no request is in use, so we can change any data structure of
  * the queue afterward.
  */
 }
 
 /*
  * Guarantee no request is in use, so we can change any data structure of
  * the queue afterward.
  */
-void blk_mq_freeze_queue(struct request_queue *q)
+void blk_freeze_queue(struct request_queue *q)
 {
 {
+       /*
+        * In the !blk_mq case we are only calling this to kill the
+        * q_usage_counter, otherwise this increases the freeze depth
+        * and waits for it to return to zero.  For this reason there is
+        * no blk_unfreeze_queue(), and blk_freeze_queue() is not
+        * exported to drivers as the only user for unfreeze is blk_mq.
+        */
        blk_mq_freeze_queue_start(q);
        blk_mq_freeze_queue_wait(q);
 }
        blk_mq_freeze_queue_start(q);
        blk_mq_freeze_queue_wait(q);
 }
+
+void blk_mq_freeze_queue(struct request_queue *q)
+{
+       /*
+        * ...just an alias to keep freeze and unfreeze actions balanced
+        * in the blk_mq_* namespace
+        */
+       blk_freeze_queue(q);
+}
 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
 
 void blk_mq_unfreeze_queue(struct request_queue *q)
 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
 
 void blk_mq_unfreeze_queue(struct request_queue *q)
@@ -146,7 +129,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
        freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
        WARN_ON_ONCE(freeze_depth < 0);
        if (!freeze_depth) {
        freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
        WARN_ON_ONCE(freeze_depth < 0);
        if (!freeze_depth) {
-               percpu_ref_reinit(&q->mq_usage_counter);
+               percpu_ref_reinit(&q->q_usage_counter);
                wake_up_all(&q->mq_freeze_wq);
        }
 }
                wake_up_all(&q->mq_freeze_wq);
        }
 }
@@ -255,7 +238,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
        struct blk_mq_alloc_data alloc_data;
        int ret;
 
        struct blk_mq_alloc_data alloc_data;
        int ret;
 
-       ret = blk_mq_queue_enter(q, gfp);
+       ret = blk_queue_enter(q, gfp);
        if (ret)
                return ERR_PTR(ret);
 
        if (ret)
                return ERR_PTR(ret);
 
@@ -278,7 +261,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
        }
        blk_mq_put_ctx(ctx);
        if (!rq) {
        }
        blk_mq_put_ctx(ctx);
        if (!rq) {
-               blk_mq_queue_exit(q);
+               blk_queue_exit(q);
                return ERR_PTR(-EWOULDBLOCK);
        }
        return rq;
                return ERR_PTR(-EWOULDBLOCK);
        }
        return rq;
@@ -297,7 +280,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
 
        clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
        blk_mq_put_tag(hctx, tag, &ctx->last_tag);
 
        clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
        blk_mq_put_tag(hctx, tag, &ctx->last_tag);
-       blk_mq_queue_exit(q);
+       blk_queue_exit(q);
 }
 
 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
 }
 
 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
@@ -989,18 +972,25 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
 }
 EXPORT_SYMBOL(blk_mq_delay_queue);
 
 }
 EXPORT_SYMBOL(blk_mq_delay_queue);
 
-static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
-                                   struct request *rq, bool at_head)
+static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
+                                           struct blk_mq_ctx *ctx,
+                                           struct request *rq,
+                                           bool at_head)
 {
 {
-       struct blk_mq_ctx *ctx = rq->mq_ctx;
-
        trace_block_rq_insert(hctx->queue, rq);
 
        if (at_head)
                list_add(&rq->queuelist, &ctx->rq_list);
        else
                list_add_tail(&rq->queuelist, &ctx->rq_list);
        trace_block_rq_insert(hctx->queue, rq);
 
        if (at_head)
                list_add(&rq->queuelist, &ctx->rq_list);
        else
                list_add_tail(&rq->queuelist, &ctx->rq_list);
+}
 
 
+static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
+                                   struct request *rq, bool at_head)
+{
+       struct blk_mq_ctx *ctx = rq->mq_ctx;
+
+       __blk_mq_insert_req_list(hctx, ctx, rq, at_head);
        blk_mq_hctx_mark_pending(hctx, ctx);
 }
 
        blk_mq_hctx_mark_pending(hctx, ctx);
 }
 
@@ -1056,8 +1046,9 @@ static void blk_mq_insert_requests(struct request_queue *q,
                rq = list_first_entry(list, struct request, queuelist);
                list_del_init(&rq->queuelist);
                rq->mq_ctx = ctx;
                rq = list_first_entry(list, struct request, queuelist);
                list_del_init(&rq->queuelist);
                rq->mq_ctx = ctx;
-               __blk_mq_insert_request(hctx, rq, false);
+               __blk_mq_insert_req_list(hctx, ctx, rq, false);
        }
        }
+       blk_mq_hctx_mark_pending(hctx, ctx);
        spin_unlock(&ctx->lock);
 
        blk_mq_run_hw_queue(hctx, from_schedule);
        spin_unlock(&ctx->lock);
 
        blk_mq_run_hw_queue(hctx, from_schedule);
@@ -1139,7 +1130,7 @@ static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
                                         struct blk_mq_ctx *ctx,
                                         struct request *rq, struct bio *bio)
 {
                                         struct blk_mq_ctx *ctx,
                                         struct request *rq, struct bio *bio)
 {
-       if (!hctx_allow_merges(hctx)) {
+       if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
                blk_mq_bio_to_request(rq, bio);
                spin_lock(&ctx->lock);
 insert_rq:
                blk_mq_bio_to_request(rq, bio);
                spin_lock(&ctx->lock);
 insert_rq:
@@ -1176,11 +1167,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
        int rw = bio_data_dir(bio);
        struct blk_mq_alloc_data alloc_data;
 
        int rw = bio_data_dir(bio);
        struct blk_mq_alloc_data alloc_data;
 
-       if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
-               bio_io_error(bio);
-               return NULL;
-       }
-
+       blk_queue_enter_live(q);
        ctx = blk_mq_get_ctx(q);
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
        ctx = blk_mq_get_ctx(q);
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
@@ -1267,9 +1254,12 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
        blk_queue_split(q, &bio, q->bio_split);
 
 
        blk_queue_split(q, &bio, q->bio_split);
 
-       if (!is_flush_fua && !blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
-               return;
+       if (!is_flush_fua && !blk_queue_nomerges(q)) {
+               if (blk_attempt_plug_merge(q, bio, &request_count,
+                                          &same_queue_rq))
+                       return;
+       } else
+               request_count = blk_plug_queued_count(q);
 
        rq = blk_mq_map_request(q, bio, &data);
        if (unlikely(!rq))
 
        rq = blk_mq_map_request(q, bio, &data);
        if (unlikely(!rq))
@@ -1376,7 +1366,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
        plug = current->plug;
        if (plug) {
                blk_mq_bio_to_request(rq, bio);
        plug = current->plug;
        if (plug) {
                blk_mq_bio_to_request(rq, bio);
-               if (list_empty(&plug->mq_list))
+               if (!request_count)
                        trace_block_plug(q);
                else if (request_count >= BLK_MAX_REQUEST_COUNT) {
                        blk_flush_plug_list(plug, false);
                        trace_block_plug(q);
                else if (request_count >= BLK_MAX_REQUEST_COUNT) {
                        blk_flush_plug_list(plug, false);
@@ -1430,6 +1420,11 @@ static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
        while (!list_empty(&tags->page_list)) {
                page = list_first_entry(&tags->page_list, struct page, lru);
                list_del_init(&page->lru);
        while (!list_empty(&tags->page_list)) {
                page = list_first_entry(&tags->page_list, struct page, lru);
                list_del_init(&page->lru);
+               /*
+                * Remove kmemleak object previously allocated in
+                * blk_mq_init_rq_map().
+                */
+               kmemleak_free(page_address(page));
                __free_pages(page, page->private);
        }
 
                __free_pages(page, page->private);
        }
 
@@ -1502,6 +1497,11 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
                list_add_tail(&page->lru, &tags->page_list);
 
                p = page_address(page);
                list_add_tail(&page->lru, &tags->page_list);
 
                p = page_address(page);
+               /*
+                * Allow kmemleak to scan these pages as they contain pointers
+                * to additional allocations like via ops->init_request().
+                */
+               kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
                entries_per_page = order_to_size(this_order) / rq_size;
                to_do = min(entries_per_page, set->queue_depth - i);
                left -= to_do * rq_size;
                entries_per_page = order_to_size(this_order) / rq_size;
                to_do = min(entries_per_page, set->queue_depth - i);
                left -= to_do * rq_size;
@@ -1673,7 +1673,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
        INIT_LIST_HEAD(&hctx->dispatch);
        hctx->queue = q;
        hctx->queue_num = hctx_idx;
        INIT_LIST_HEAD(&hctx->dispatch);
        hctx->queue = q;
        hctx->queue_num = hctx_idx;
-       hctx->flags = set->flags;
+       hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
 
        blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
                                        blk_mq_hctx_notify, hctx);
 
        blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
                                        blk_mq_hctx_notify, hctx);
@@ -1860,27 +1860,26 @@ static void blk_mq_map_swqueue(struct request_queue *q,
        }
 }
 
        }
 }
 
-static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
+static void queue_set_hctx_shared(struct request_queue *q, bool shared)
 {
        struct blk_mq_hw_ctx *hctx;
 {
        struct blk_mq_hw_ctx *hctx;
-       struct request_queue *q;
-       bool shared;
        int i;
 
        int i;
 
-       if (set->tag_list.next == set->tag_list.prev)
-               shared = false;
-       else
-               shared = true;
+       queue_for_each_hw_ctx(q, hctx, i) {
+               if (shared)
+                       hctx->flags |= BLK_MQ_F_TAG_SHARED;
+               else
+                       hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
+       }
+}
+
+static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
+{
+       struct request_queue *q;
 
        list_for_each_entry(q, &set->tag_list, tag_set_list) {
                blk_mq_freeze_queue(q);
 
        list_for_each_entry(q, &set->tag_list, tag_set_list) {
                blk_mq_freeze_queue(q);
-
-               queue_for_each_hw_ctx(q, hctx, i) {
-                       if (shared)
-                               hctx->flags |= BLK_MQ_F_TAG_SHARED;
-                       else
-                               hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
-               }
+               queue_set_hctx_shared(q, shared);
                blk_mq_unfreeze_queue(q);
        }
 }
                blk_mq_unfreeze_queue(q);
        }
 }
@@ -1891,7 +1890,12 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
 
        mutex_lock(&set->tag_list_lock);
        list_del_init(&q->tag_set_list);
 
        mutex_lock(&set->tag_list_lock);
        list_del_init(&q->tag_set_list);
-       blk_mq_update_tag_set_depth(set);
+       if (list_is_singular(&set->tag_list)) {
+               /* just transitioned to unshared */
+               set->flags &= ~BLK_MQ_F_TAG_SHARED;
+               /* update existing queue */
+               blk_mq_update_tag_set_depth(set, false);
+       }
        mutex_unlock(&set->tag_list_lock);
 }
 
        mutex_unlock(&set->tag_list_lock);
 }
 
@@ -1901,8 +1905,17 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
        q->tag_set = set;
 
        mutex_lock(&set->tag_list_lock);
        q->tag_set = set;
 
        mutex_lock(&set->tag_list_lock);
+
+       /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
+       if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
+               set->flags |= BLK_MQ_F_TAG_SHARED;
+               /* update existing queue */
+               blk_mq_update_tag_set_depth(set, true);
+       }
+       if (set->flags & BLK_MQ_F_TAG_SHARED)
+               queue_set_hctx_shared(q, true);
        list_add_tail(&q->tag_set_list, &set->tag_list);
        list_add_tail(&q->tag_set_list, &set->tag_list);
-       blk_mq_update_tag_set_depth(set);
+
        mutex_unlock(&set->tag_list_lock);
 }
 
        mutex_unlock(&set->tag_list_lock);
 }
 
@@ -1989,14 +2002,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
                hctxs[i]->queue_num = i;
        }
 
                hctxs[i]->queue_num = i;
        }
 
-       /*
-        * Init percpu_ref in atomic mode so that it's faster to shutdown.
-        * See blk_register_queue() for details.
-        */
-       if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
-                           PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
-               goto err_hctxs;
-
        setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
        blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
 
        setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
        blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
 
@@ -2077,8 +2082,6 @@ void blk_mq_free_queue(struct request_queue *q)
 
        blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
        blk_mq_free_hw_queues(q, set);
 
        blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
        blk_mq_free_hw_queues(q, set);
-
-       percpu_ref_exit(&q->mq_usage_counter);
 }
 
 /* Basically redo blk_mq_init_queue with queue frozen */
 }
 
 /* Basically redo blk_mq_init_queue with queue frozen */