]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - block/blk-mq.c
MAINTAINERS: change email address from atmel to microchip
[karo-tx-linux.git] / block / blk-mq.c
index d79fdc11b1ee7de35a5a93d5659f231cc3e4fea3..a8e67a155d04f6d937e890473b1647fbe95d210f 100644 (file)
@@ -1605,7 +1605,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
        INIT_LIST_HEAD(&tags->page_list);
 
        tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
-                                GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+                                GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
                                 set->numa_node);
        if (!tags->rqs) {
                blk_mq_free_tags(tags);
@@ -1631,7 +1631,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
 
                do {
                        page = alloc_pages_node(set->numa_node,
-                               GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
+                               GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
                                this_order);
                        if (page)
                                break;
@@ -1652,7 +1652,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
                 * Allow kmemleak to scan these pages as they contain pointers
                 * to additional allocations like via ops->init_request().
                 */
-               kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
+               kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
                entries_per_page = order_to_size(this_order) / rq_size;
                to_do = min(entries_per_page, set->queue_depth - i);
                left -= to_do * rq_size;
@@ -1870,7 +1870,7 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
 static void blk_mq_map_swqueue(struct request_queue *q,
                               const struct cpumask *online_mask)
 {
-       unsigned int i;
+       unsigned int i, hctx_idx;
        struct blk_mq_hw_ctx *hctx;
        struct blk_mq_ctx *ctx;
        struct blk_mq_tag_set *set = q->tag_set;
@@ -1893,6 +1893,21 @@ static void blk_mq_map_swqueue(struct request_queue *q,
                if (!cpumask_test_cpu(i, online_mask))
                        continue;
 
+               hctx_idx = q->mq_map[i];
+               /* unmapped hw queue can be remapped after CPU topo changed */
+               if (!set->tags[hctx_idx]) {
+                       set->tags[hctx_idx] = blk_mq_init_rq_map(set, hctx_idx);
+
+                       /*
+                        * If tags initialization fail for some hctx,
+                        * that hctx won't be brought online.  In this
+                        * case, remap the current ctx to hctx[0] which
+                        * is guaranteed to always have tags allocated
+                        */
+                       if (!set->tags[hctx_idx])
+                               q->mq_map[i] = 0;
+               }
+
                ctx = per_cpu_ptr(q->queue_ctx, i);
                hctx = blk_mq_map_queue(q, i);
 
@@ -1909,7 +1924,11 @@ static void blk_mq_map_swqueue(struct request_queue *q,
                 * disable it and free the request entries.
                 */
                if (!hctx->nr_ctx) {
-                       if (set->tags[i]) {
+                       /* Never unmap queue 0.  We need it as a
+                        * fallback in case of a new remap fails
+                        * allocation
+                        */
+                       if (i && set->tags[i]) {
                                blk_mq_free_rq_map(set, set->tags[i], i);
                                set->tags[i] = NULL;
                        }
@@ -1917,9 +1936,6 @@ static void blk_mq_map_swqueue(struct request_queue *q,
                        continue;
                }
 
-               /* unmapped hw queue can be remapped after CPU topo changed */
-               if (!set->tags[i])
-                       set->tags[i] = blk_mq_init_rq_map(set, i);
                hctx->tags = set->tags[i];
                WARN_ON(!hctx->tags);
 
@@ -2553,7 +2569,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
         * This will be replaced with the stats tracking code, using
         * 'avg_completion_time / 2' as the pre-sleep target.
         */
-       kt = ktime_set(0, nsecs);
+       kt = nsecs;
 
        mode = HRTIMER_MODE_REL;
        hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);