]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - block/blk-mq.c
blk-mq: remove blk_mq_alloc_request_pinned
[karo-tx-linux.git] / block / blk-mq.c
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
4 #include <linux/bio.h>
5 #include <linux/blkdev.h>
6 #include <linux/mm.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
11 #include <linux/llist.h>
12 #include <linux/list_sort.h>
13 #include <linux/cpu.h>
14 #include <linux/cache.h>
15 #include <linux/sched/sysctl.h>
16 #include <linux/delay.h>
17
18 #include <trace/events/block.h>
19
20 #include <linux/blk-mq.h>
21 #include "blk.h"
22 #include "blk-mq.h"
23 #include "blk-mq-tag.h"
24
25 static DEFINE_MUTEX(all_q_mutex);
26 static LIST_HEAD(all_q_list);
27
28 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
29
30 static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
31                                            unsigned int cpu)
32 {
33         return per_cpu_ptr(q->queue_ctx, cpu);
34 }
35
36 /*
37  * This assumes per-cpu software queueing queues. They could be per-node
38  * as well, for instance. For now this is hardcoded as-is. Note that we don't
39  * care about preemption, since we know the ctx's are persistent. This does
40  * mean that we can't rely on ctx always matching the currently running CPU.
41  */
42 static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
43 {
44         return __blk_mq_get_ctx(q, get_cpu());
45 }
46
47 static void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
48 {
49         put_cpu();
50 }
51
52 /*
53  * Check if any of the ctx's have pending work in this hardware queue
54  */
55 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
56 {
57         unsigned int i;
58
59         for (i = 0; i < hctx->ctx_map.map_size; i++)
60                 if (hctx->ctx_map.map[i].word)
61                         return true;
62
63         return false;
64 }
65
66 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
67                                               struct blk_mq_ctx *ctx)
68 {
69         return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
70 }
71
72 #define CTX_TO_BIT(hctx, ctx)   \
73         ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
74
75 /*
76  * Mark this ctx as having pending work in this hardware queue
77  */
78 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
79                                      struct blk_mq_ctx *ctx)
80 {
81         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
82
83         if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
84                 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
85 }
86
87 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
88                                       struct blk_mq_ctx *ctx)
89 {
90         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
91
92         clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
93 }
94
95 static int blk_mq_queue_enter(struct request_queue *q)
96 {
97         int ret;
98
99         __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
100         smp_wmb();
101         /* we have problems to freeze the queue if it's initializing */
102         if (!blk_queue_bypass(q) || !blk_queue_init_done(q))
103                 return 0;
104
105         __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
106
107         spin_lock_irq(q->queue_lock);
108         ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
109                 !blk_queue_bypass(q) || blk_queue_dying(q),
110                 *q->queue_lock);
111         /* inc usage with lock hold to avoid freeze_queue runs here */
112         if (!ret && !blk_queue_dying(q))
113                 __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
114         else if (blk_queue_dying(q))
115                 ret = -ENODEV;
116         spin_unlock_irq(q->queue_lock);
117
118         return ret;
119 }
120
121 static void blk_mq_queue_exit(struct request_queue *q)
122 {
123         __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
124 }
125
126 static void __blk_mq_drain_queue(struct request_queue *q)
127 {
128         while (true) {
129                 s64 count;
130
131                 spin_lock_irq(q->queue_lock);
132                 count = percpu_counter_sum(&q->mq_usage_counter);
133                 spin_unlock_irq(q->queue_lock);
134
135                 if (count == 0)
136                         break;
137                 blk_mq_run_queues(q, false);
138                 msleep(10);
139         }
140 }
141
142 /*
143  * Guarantee no request is in use, so we can change any data structure of
144  * the queue afterward.
145  */
146 static void blk_mq_freeze_queue(struct request_queue *q)
147 {
148         bool drain;
149
150         spin_lock_irq(q->queue_lock);
151         drain = !q->bypass_depth++;
152         queue_flag_set(QUEUE_FLAG_BYPASS, q);
153         spin_unlock_irq(q->queue_lock);
154
155         if (drain)
156                 __blk_mq_drain_queue(q);
157 }
158
159 void blk_mq_drain_queue(struct request_queue *q)
160 {
161         __blk_mq_drain_queue(q);
162 }
163
164 static void blk_mq_unfreeze_queue(struct request_queue *q)
165 {
166         bool wake = false;
167
168         spin_lock_irq(q->queue_lock);
169         if (!--q->bypass_depth) {
170                 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
171                 wake = true;
172         }
173         WARN_ON_ONCE(q->bypass_depth < 0);
174         spin_unlock_irq(q->queue_lock);
175         if (wake)
176                 wake_up_all(&q->mq_freeze_wq);
177 }
178
179 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
180 {
181         return blk_mq_has_free_tags(hctx->tags);
182 }
183 EXPORT_SYMBOL(blk_mq_can_queue);
184
185 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
186                                struct request *rq, unsigned int rw_flags)
187 {
188         if (blk_queue_io_stat(q))
189                 rw_flags |= REQ_IO_STAT;
190
191         INIT_LIST_HEAD(&rq->queuelist);
192         /* csd/requeue_work/fifo_time is initialized before use */
193         rq->q = q;
194         rq->mq_ctx = ctx;
195         rq->cmd_flags |= rw_flags;
196         rq->cmd_type = 0;
197         /* do not touch atomic flags, it needs atomic ops against the timer */
198         rq->cpu = -1;
199         rq->__data_len = 0;
200         rq->__sector = (sector_t) -1;
201         rq->bio = NULL;
202         rq->biotail = NULL;
203         INIT_HLIST_NODE(&rq->hash);
204         RB_CLEAR_NODE(&rq->rb_node);
205         memset(&rq->flush, 0, max(sizeof(rq->flush), sizeof(rq->elv)));
206         rq->rq_disk = NULL;
207         rq->part = NULL;
208         rq->start_time = jiffies;
209 #ifdef CONFIG_BLK_CGROUP
210         rq->rl = NULL;
211         set_start_time_ns(rq);
212         rq->io_start_time_ns = 0;
213 #endif
214         rq->nr_phys_segments = 0;
215 #if defined(CONFIG_BLK_DEV_INTEGRITY)
216         rq->nr_integrity_segments = 0;
217 #endif
218         rq->ioprio = 0;
219         rq->special = NULL;
220         /* tag was already set */
221         rq->errors = 0;
222         memset(rq->__cmd, 0, sizeof(rq->__cmd));
223         rq->cmd = rq->__cmd;
224         rq->cmd_len = BLK_MAX_CDB;
225
226         rq->extra_len = 0;
227         rq->sense_len = 0;
228         rq->resid_len = 0;
229         rq->sense = NULL;
230
231         rq->deadline = 0;
232         INIT_LIST_HEAD(&rq->timeout_list);
233         rq->timeout = 0;
234         rq->retries = 0;
235         rq->end_io = NULL;
236         rq->end_io_data = NULL;
237         rq->next_rq = NULL;
238
239         ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
240 }
241
242 static struct request *
243 __blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
244                 struct blk_mq_ctx *ctx, int rw, gfp_t gfp, bool reserved)
245 {
246         struct request *rq;
247         unsigned int tag;
248
249         tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved);
250         if (tag != BLK_MQ_TAG_FAIL) {
251                 rq = hctx->tags->rqs[tag];
252
253                 rq->cmd_flags = 0;
254                 if (blk_mq_tag_busy(hctx)) {
255                         rq->cmd_flags = REQ_MQ_INFLIGHT;
256                         atomic_inc(&hctx->nr_active);
257                 }
258
259                 rq->tag = tag;
260                 blk_mq_rq_ctx_init(q, ctx, rq, rw);
261                 return rq;
262         }
263
264         return NULL;
265 }
266
267 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
268                 bool reserved)
269 {
270         struct blk_mq_ctx *ctx;
271         struct blk_mq_hw_ctx *hctx;
272         struct request *rq;
273
274         if (blk_mq_queue_enter(q))
275                 return NULL;
276
277         ctx = blk_mq_get_ctx(q);
278         hctx = q->mq_ops->map_queue(q, ctx->cpu);
279
280         rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp & ~__GFP_WAIT,
281                                     reserved);
282         if (!rq && (gfp & __GFP_WAIT)) {
283                 __blk_mq_run_hw_queue(hctx);
284                 blk_mq_put_ctx(ctx);
285
286                 ctx = blk_mq_get_ctx(q);
287                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
288                 rq =  __blk_mq_alloc_request(q, hctx, ctx, rw, gfp, reserved);
289         }
290         blk_mq_put_ctx(ctx);
291         return rq;
292 }
293 EXPORT_SYMBOL(blk_mq_alloc_request);
294
295 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
296                                   struct blk_mq_ctx *ctx, struct request *rq)
297 {
298         const int tag = rq->tag;
299         struct request_queue *q = rq->q;
300
301         if (rq->cmd_flags & REQ_MQ_INFLIGHT)
302                 atomic_dec(&hctx->nr_active);
303
304         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
305         blk_mq_put_tag(hctx, tag, &ctx->last_tag);
306         blk_mq_queue_exit(q);
307 }
308
309 void blk_mq_free_request(struct request *rq)
310 {
311         struct blk_mq_ctx *ctx = rq->mq_ctx;
312         struct blk_mq_hw_ctx *hctx;
313         struct request_queue *q = rq->q;
314
315         ctx->rq_completed[rq_is_sync(rq)]++;
316
317         hctx = q->mq_ops->map_queue(q, ctx->cpu);
318         __blk_mq_free_request(hctx, ctx, rq);
319 }
320
321 /*
322  * Clone all relevant state from a request that has been put on hold in
323  * the flush state machine into the preallocated flush request that hangs
324  * off the request queue.
325  *
326  * For a driver the flush request should be invisible, that's why we are
327  * impersonating the original request here.
328  */
329 void blk_mq_clone_flush_request(struct request *flush_rq,
330                 struct request *orig_rq)
331 {
332         struct blk_mq_hw_ctx *hctx =
333                 orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu);
334
335         flush_rq->mq_ctx = orig_rq->mq_ctx;
336         flush_rq->tag = orig_rq->tag;
337         memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq),
338                 hctx->cmd_size);
339 }
340
341 inline void __blk_mq_end_io(struct request *rq, int error)
342 {
343         blk_account_io_done(rq);
344
345         if (rq->end_io) {
346                 rq->end_io(rq, error);
347         } else {
348                 if (unlikely(blk_bidi_rq(rq)))
349                         blk_mq_free_request(rq->next_rq);
350                 blk_mq_free_request(rq);
351         }
352 }
353 EXPORT_SYMBOL(__blk_mq_end_io);
354
355 void blk_mq_end_io(struct request *rq, int error)
356 {
357         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
358                 BUG();
359         __blk_mq_end_io(rq, error);
360 }
361 EXPORT_SYMBOL(blk_mq_end_io);
362
363 static void __blk_mq_complete_request_remote(void *data)
364 {
365         struct request *rq = data;
366
367         rq->q->softirq_done_fn(rq);
368 }
369
370 void __blk_mq_complete_request(struct request *rq)
371 {
372         struct blk_mq_ctx *ctx = rq->mq_ctx;
373         bool shared = false;
374         int cpu;
375
376         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
377                 rq->q->softirq_done_fn(rq);
378                 return;
379         }
380
381         cpu = get_cpu();
382         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
383                 shared = cpus_share_cache(cpu, ctx->cpu);
384
385         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
386                 rq->csd.func = __blk_mq_complete_request_remote;
387                 rq->csd.info = rq;
388                 rq->csd.flags = 0;
389                 smp_call_function_single_async(ctx->cpu, &rq->csd);
390         } else {
391                 rq->q->softirq_done_fn(rq);
392         }
393         put_cpu();
394 }
395
396 /**
397  * blk_mq_complete_request - end I/O on a request
398  * @rq:         the request being processed
399  *
400  * Description:
401  *      Ends all I/O on a request. It does not handle partial completions.
402  *      The actual completion happens out-of-order, through a IPI handler.
403  **/
404 void blk_mq_complete_request(struct request *rq)
405 {
406         struct request_queue *q = rq->q;
407
408         if (unlikely(blk_should_fake_timeout(q)))
409                 return;
410         if (!blk_mark_rq_complete(rq)) {
411                 if (q->softirq_done_fn)
412                         __blk_mq_complete_request(rq);
413                 else
414                         blk_mq_end_io(rq, rq->errors);
415         }
416 }
417 EXPORT_SYMBOL(blk_mq_complete_request);
418
419 static void blk_mq_start_request(struct request *rq, bool last)
420 {
421         struct request_queue *q = rq->q;
422
423         trace_block_rq_issue(q, rq);
424
425         rq->resid_len = blk_rq_bytes(rq);
426         if (unlikely(blk_bidi_rq(rq)))
427                 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
428
429         /*
430          * Just mark start time and set the started bit. Due to memory
431          * ordering, we know we'll see the correct deadline as long as
432          * REQ_ATOMIC_STARTED is seen. Use the default queue timeout,
433          * unless one has been set in the request.
434          */
435         if (!rq->timeout)
436                 rq->deadline = jiffies + q->rq_timeout;
437         else
438                 rq->deadline = jiffies + rq->timeout;
439
440         /*
441          * Mark us as started and clear complete. Complete might have been
442          * set if requeue raced with timeout, which then marked it as
443          * complete. So be sure to clear complete again when we start
444          * the request, otherwise we'll ignore the completion event.
445          */
446         set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
447         clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
448
449         if (q->dma_drain_size && blk_rq_bytes(rq)) {
450                 /*
451                  * Make sure space for the drain appears.  We know we can do
452                  * this because max_hw_segments has been adjusted to be one
453                  * fewer than the device can handle.
454                  */
455                 rq->nr_phys_segments++;
456         }
457
458         /*
459          * Flag the last request in the series so that drivers know when IO
460          * should be kicked off, if they don't do it on a per-request basis.
461          *
462          * Note: the flag isn't the only condition drivers should do kick off.
463          * If drive is busy, the last request might not have the bit set.
464          */
465         if (last)
466                 rq->cmd_flags |= REQ_END;
467 }
468
469 static void __blk_mq_requeue_request(struct request *rq)
470 {
471         struct request_queue *q = rq->q;
472
473         trace_block_rq_requeue(q, rq);
474         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
475
476         rq->cmd_flags &= ~REQ_END;
477
478         if (q->dma_drain_size && blk_rq_bytes(rq))
479                 rq->nr_phys_segments--;
480 }
481
482 void blk_mq_requeue_request(struct request *rq)
483 {
484         __blk_mq_requeue_request(rq);
485         blk_clear_rq_complete(rq);
486
487         BUG_ON(blk_queued_rq(rq));
488         blk_mq_add_to_requeue_list(rq, true);
489 }
490 EXPORT_SYMBOL(blk_mq_requeue_request);
491
492 static void blk_mq_requeue_work(struct work_struct *work)
493 {
494         struct request_queue *q =
495                 container_of(work, struct request_queue, requeue_work);
496         LIST_HEAD(rq_list);
497         struct request *rq, *next;
498         unsigned long flags;
499
500         spin_lock_irqsave(&q->requeue_lock, flags);
501         list_splice_init(&q->requeue_list, &rq_list);
502         spin_unlock_irqrestore(&q->requeue_lock, flags);
503
504         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
505                 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
506                         continue;
507
508                 rq->cmd_flags &= ~REQ_SOFTBARRIER;
509                 list_del_init(&rq->queuelist);
510                 blk_mq_insert_request(rq, true, false, false);
511         }
512
513         while (!list_empty(&rq_list)) {
514                 rq = list_entry(rq_list.next, struct request, queuelist);
515                 list_del_init(&rq->queuelist);
516                 blk_mq_insert_request(rq, false, false, false);
517         }
518
519         blk_mq_run_queues(q, false);
520 }
521
522 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
523 {
524         struct request_queue *q = rq->q;
525         unsigned long flags;
526
527         /*
528          * We abuse this flag that is otherwise used by the I/O scheduler to
529          * request head insertation from the workqueue.
530          */
531         BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
532
533         spin_lock_irqsave(&q->requeue_lock, flags);
534         if (at_head) {
535                 rq->cmd_flags |= REQ_SOFTBARRIER;
536                 list_add(&rq->queuelist, &q->requeue_list);
537         } else {
538                 list_add_tail(&rq->queuelist, &q->requeue_list);
539         }
540         spin_unlock_irqrestore(&q->requeue_lock, flags);
541 }
542 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
543
544 void blk_mq_kick_requeue_list(struct request_queue *q)
545 {
546         kblockd_schedule_work(&q->requeue_work);
547 }
548 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
549
550 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
551 {
552         return tags->rqs[tag];
553 }
554 EXPORT_SYMBOL(blk_mq_tag_to_rq);
555
556 struct blk_mq_timeout_data {
557         struct blk_mq_hw_ctx *hctx;
558         unsigned long *next;
559         unsigned int *next_set;
560 };
561
562 static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
563 {
564         struct blk_mq_timeout_data *data = __data;
565         struct blk_mq_hw_ctx *hctx = data->hctx;
566         unsigned int tag;
567
568          /* It may not be in flight yet (this is where
569          * the REQ_ATOMIC_STARTED flag comes in). The requests are
570          * statically allocated, so we know it's always safe to access the
571          * memory associated with a bit offset into ->rqs[].
572          */
573         tag = 0;
574         do {
575                 struct request *rq;
576
577                 tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag);
578                 if (tag >= hctx->tags->nr_tags)
579                         break;
580
581                 rq = blk_mq_tag_to_rq(hctx->tags, tag++);
582                 if (rq->q != hctx->queue)
583                         continue;
584                 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
585                         continue;
586
587                 blk_rq_check_expired(rq, data->next, data->next_set);
588         } while (1);
589 }
590
591 static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
592                                         unsigned long *next,
593                                         unsigned int *next_set)
594 {
595         struct blk_mq_timeout_data data = {
596                 .hctx           = hctx,
597                 .next           = next,
598                 .next_set       = next_set,
599         };
600
601         /*
602          * Ask the tagging code to iterate busy requests, so we can
603          * check them for timeout.
604          */
605         blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
606 }
607
608 static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
609 {
610         struct request_queue *q = rq->q;
611
612         /*
613          * We know that complete is set at this point. If STARTED isn't set
614          * anymore, then the request isn't active and the "timeout" should
615          * just be ignored. This can happen due to the bitflag ordering.
616          * Timeout first checks if STARTED is set, and if it is, assumes
617          * the request is active. But if we race with completion, then
618          * we both flags will get cleared. So check here again, and ignore
619          * a timeout event with a request that isn't active.
620          */
621         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
622                 return BLK_EH_NOT_HANDLED;
623
624         if (!q->mq_ops->timeout)
625                 return BLK_EH_RESET_TIMER;
626
627         return q->mq_ops->timeout(rq);
628 }
629
630 static void blk_mq_rq_timer(unsigned long data)
631 {
632         struct request_queue *q = (struct request_queue *) data;
633         struct blk_mq_hw_ctx *hctx;
634         unsigned long next = 0;
635         int i, next_set = 0;
636
637         queue_for_each_hw_ctx(q, hctx, i) {
638                 /*
639                  * If not software queues are currently mapped to this
640                  * hardware queue, there's nothing to check
641                  */
642                 if (!hctx->nr_ctx || !hctx->tags)
643                         continue;
644
645                 blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
646         }
647
648         if (next_set) {
649                 next = blk_rq_timeout(round_jiffies_up(next));
650                 mod_timer(&q->timeout, next);
651         } else {
652                 queue_for_each_hw_ctx(q, hctx, i)
653                         blk_mq_tag_idle(hctx);
654         }
655 }
656
657 /*
658  * Reverse check our software queue for entries that we could potentially
659  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
660  * too much time checking for merges.
661  */
662 static bool blk_mq_attempt_merge(struct request_queue *q,
663                                  struct blk_mq_ctx *ctx, struct bio *bio)
664 {
665         struct request *rq;
666         int checked = 8;
667
668         list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
669                 int el_ret;
670
671                 if (!checked--)
672                         break;
673
674                 if (!blk_rq_merge_ok(rq, bio))
675                         continue;
676
677                 el_ret = blk_try_merge(rq, bio);
678                 if (el_ret == ELEVATOR_BACK_MERGE) {
679                         if (bio_attempt_back_merge(q, rq, bio)) {
680                                 ctx->rq_merged++;
681                                 return true;
682                         }
683                         break;
684                 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
685                         if (bio_attempt_front_merge(q, rq, bio)) {
686                                 ctx->rq_merged++;
687                                 return true;
688                         }
689                         break;
690                 }
691         }
692
693         return false;
694 }
695
696 /*
697  * Process software queues that have been marked busy, splicing them
698  * to the for-dispatch
699  */
700 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
701 {
702         struct blk_mq_ctx *ctx;
703         int i;
704
705         for (i = 0; i < hctx->ctx_map.map_size; i++) {
706                 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
707                 unsigned int off, bit;
708
709                 if (!bm->word)
710                         continue;
711
712                 bit = 0;
713                 off = i * hctx->ctx_map.bits_per_word;
714                 do {
715                         bit = find_next_bit(&bm->word, bm->depth, bit);
716                         if (bit >= bm->depth)
717                                 break;
718
719                         ctx = hctx->ctxs[bit + off];
720                         clear_bit(bit, &bm->word);
721                         spin_lock(&ctx->lock);
722                         list_splice_tail_init(&ctx->rq_list, list);
723                         spin_unlock(&ctx->lock);
724
725                         bit++;
726                 } while (1);
727         }
728 }
729
730 /*
731  * Run this hardware queue, pulling any software queues mapped to it in.
732  * Note that this function currently has various problems around ordering
733  * of IO. In particular, we'd like FIFO behaviour on handling existing
734  * items on the hctx->dispatch list. Ignore that for now.
735  */
736 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
737 {
738         struct request_queue *q = hctx->queue;
739         struct request *rq;
740         LIST_HEAD(rq_list);
741         int queued;
742
743         WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
744
745         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
746                 return;
747
748         hctx->run++;
749
750         /*
751          * Touch any software queue that has pending entries.
752          */
753         flush_busy_ctxs(hctx, &rq_list);
754
755         /*
756          * If we have previous entries on our dispatch list, grab them
757          * and stuff them at the front for more fair dispatch.
758          */
759         if (!list_empty_careful(&hctx->dispatch)) {
760                 spin_lock(&hctx->lock);
761                 if (!list_empty(&hctx->dispatch))
762                         list_splice_init(&hctx->dispatch, &rq_list);
763                 spin_unlock(&hctx->lock);
764         }
765
766         /*
767          * Now process all the entries, sending them to the driver.
768          */
769         queued = 0;
770         while (!list_empty(&rq_list)) {
771                 int ret;
772
773                 rq = list_first_entry(&rq_list, struct request, queuelist);
774                 list_del_init(&rq->queuelist);
775
776                 blk_mq_start_request(rq, list_empty(&rq_list));
777
778                 ret = q->mq_ops->queue_rq(hctx, rq);
779                 switch (ret) {
780                 case BLK_MQ_RQ_QUEUE_OK:
781                         queued++;
782                         continue;
783                 case BLK_MQ_RQ_QUEUE_BUSY:
784                         list_add(&rq->queuelist, &rq_list);
785                         __blk_mq_requeue_request(rq);
786                         break;
787                 default:
788                         pr_err("blk-mq: bad return on queue: %d\n", ret);
789                 case BLK_MQ_RQ_QUEUE_ERROR:
790                         rq->errors = -EIO;
791                         blk_mq_end_io(rq, rq->errors);
792                         break;
793                 }
794
795                 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
796                         break;
797         }
798
799         if (!queued)
800                 hctx->dispatched[0]++;
801         else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
802                 hctx->dispatched[ilog2(queued) + 1]++;
803
804         /*
805          * Any items that need requeuing? Stuff them into hctx->dispatch,
806          * that is where we will continue on next queue run.
807          */
808         if (!list_empty(&rq_list)) {
809                 spin_lock(&hctx->lock);
810                 list_splice(&rq_list, &hctx->dispatch);
811                 spin_unlock(&hctx->lock);
812         }
813 }
814
815 /*
816  * It'd be great if the workqueue API had a way to pass
817  * in a mask and had some smarts for more clever placement.
818  * For now we just round-robin here, switching for every
819  * BLK_MQ_CPU_WORK_BATCH queued items.
820  */
821 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
822 {
823         int cpu = hctx->next_cpu;
824
825         if (--hctx->next_cpu_batch <= 0) {
826                 int next_cpu;
827
828                 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
829                 if (next_cpu >= nr_cpu_ids)
830                         next_cpu = cpumask_first(hctx->cpumask);
831
832                 hctx->next_cpu = next_cpu;
833                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
834         }
835
836         return cpu;
837 }
838
839 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
840 {
841         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
842                 return;
843
844         if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
845                 __blk_mq_run_hw_queue(hctx);
846         else if (hctx->queue->nr_hw_queues == 1)
847                 kblockd_schedule_delayed_work(&hctx->run_work, 0);
848         else {
849                 unsigned int cpu;
850
851                 cpu = blk_mq_hctx_next_cpu(hctx);
852                 kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
853         }
854 }
855
856 void blk_mq_run_queues(struct request_queue *q, bool async)
857 {
858         struct blk_mq_hw_ctx *hctx;
859         int i;
860
861         queue_for_each_hw_ctx(q, hctx, i) {
862                 if ((!blk_mq_hctx_has_pending(hctx) &&
863                     list_empty_careful(&hctx->dispatch)) ||
864                     test_bit(BLK_MQ_S_STOPPED, &hctx->state))
865                         continue;
866
867                 preempt_disable();
868                 blk_mq_run_hw_queue(hctx, async);
869                 preempt_enable();
870         }
871 }
872 EXPORT_SYMBOL(blk_mq_run_queues);
873
874 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
875 {
876         cancel_delayed_work(&hctx->run_work);
877         cancel_delayed_work(&hctx->delay_work);
878         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
879 }
880 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
881
882 void blk_mq_stop_hw_queues(struct request_queue *q)
883 {
884         struct blk_mq_hw_ctx *hctx;
885         int i;
886
887         queue_for_each_hw_ctx(q, hctx, i)
888                 blk_mq_stop_hw_queue(hctx);
889 }
890 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
891
892 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
893 {
894         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
895
896         preempt_disable();
897         __blk_mq_run_hw_queue(hctx);
898         preempt_enable();
899 }
900 EXPORT_SYMBOL(blk_mq_start_hw_queue);
901
902 void blk_mq_start_hw_queues(struct request_queue *q)
903 {
904         struct blk_mq_hw_ctx *hctx;
905         int i;
906
907         queue_for_each_hw_ctx(q, hctx, i)
908                 blk_mq_start_hw_queue(hctx);
909 }
910 EXPORT_SYMBOL(blk_mq_start_hw_queues);
911
912
913 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
914 {
915         struct blk_mq_hw_ctx *hctx;
916         int i;
917
918         queue_for_each_hw_ctx(q, hctx, i) {
919                 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
920                         continue;
921
922                 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
923                 preempt_disable();
924                 blk_mq_run_hw_queue(hctx, async);
925                 preempt_enable();
926         }
927 }
928 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
929
930 static void blk_mq_run_work_fn(struct work_struct *work)
931 {
932         struct blk_mq_hw_ctx *hctx;
933
934         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
935
936         __blk_mq_run_hw_queue(hctx);
937 }
938
939 static void blk_mq_delay_work_fn(struct work_struct *work)
940 {
941         struct blk_mq_hw_ctx *hctx;
942
943         hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
944
945         if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
946                 __blk_mq_run_hw_queue(hctx);
947 }
948
949 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
950 {
951         unsigned long tmo = msecs_to_jiffies(msecs);
952
953         if (hctx->queue->nr_hw_queues == 1)
954                 kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
955         else {
956                 unsigned int cpu;
957
958                 cpu = blk_mq_hctx_next_cpu(hctx);
959                 kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
960         }
961 }
962 EXPORT_SYMBOL(blk_mq_delay_queue);
963
964 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
965                                     struct request *rq, bool at_head)
966 {
967         struct blk_mq_ctx *ctx = rq->mq_ctx;
968
969         trace_block_rq_insert(hctx->queue, rq);
970
971         if (at_head)
972                 list_add(&rq->queuelist, &ctx->rq_list);
973         else
974                 list_add_tail(&rq->queuelist, &ctx->rq_list);
975
976         blk_mq_hctx_mark_pending(hctx, ctx);
977
978         /*
979          * We do this early, to ensure we are on the right CPU.
980          */
981         blk_add_timer(rq);
982 }
983
984 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
985                 bool async)
986 {
987         struct request_queue *q = rq->q;
988         struct blk_mq_hw_ctx *hctx;
989         struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
990
991         current_ctx = blk_mq_get_ctx(q);
992         if (!cpu_online(ctx->cpu))
993                 rq->mq_ctx = ctx = current_ctx;
994
995         hctx = q->mq_ops->map_queue(q, ctx->cpu);
996
997         if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
998             !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
999                 blk_insert_flush(rq);
1000         } else {
1001                 spin_lock(&ctx->lock);
1002                 __blk_mq_insert_request(hctx, rq, at_head);
1003                 spin_unlock(&ctx->lock);
1004         }
1005
1006         if (run_queue)
1007                 blk_mq_run_hw_queue(hctx, async);
1008
1009         blk_mq_put_ctx(current_ctx);
1010 }
1011
1012 static void blk_mq_insert_requests(struct request_queue *q,
1013                                      struct blk_mq_ctx *ctx,
1014                                      struct list_head *list,
1015                                      int depth,
1016                                      bool from_schedule)
1017
1018 {
1019         struct blk_mq_hw_ctx *hctx;
1020         struct blk_mq_ctx *current_ctx;
1021
1022         trace_block_unplug(q, depth, !from_schedule);
1023
1024         current_ctx = blk_mq_get_ctx(q);
1025
1026         if (!cpu_online(ctx->cpu))
1027                 ctx = current_ctx;
1028         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1029
1030         /*
1031          * preemption doesn't flush plug list, so it's possible ctx->cpu is
1032          * offline now
1033          */
1034         spin_lock(&ctx->lock);
1035         while (!list_empty(list)) {
1036                 struct request *rq;
1037
1038                 rq = list_first_entry(list, struct request, queuelist);
1039                 list_del_init(&rq->queuelist);
1040                 rq->mq_ctx = ctx;
1041                 __blk_mq_insert_request(hctx, rq, false);
1042         }
1043         spin_unlock(&ctx->lock);
1044
1045         blk_mq_run_hw_queue(hctx, from_schedule);
1046         blk_mq_put_ctx(current_ctx);
1047 }
1048
1049 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1050 {
1051         struct request *rqa = container_of(a, struct request, queuelist);
1052         struct request *rqb = container_of(b, struct request, queuelist);
1053
1054         return !(rqa->mq_ctx < rqb->mq_ctx ||
1055                  (rqa->mq_ctx == rqb->mq_ctx &&
1056                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1057 }
1058
1059 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1060 {
1061         struct blk_mq_ctx *this_ctx;
1062         struct request_queue *this_q;
1063         struct request *rq;
1064         LIST_HEAD(list);
1065         LIST_HEAD(ctx_list);
1066         unsigned int depth;
1067
1068         list_splice_init(&plug->mq_list, &list);
1069
1070         list_sort(NULL, &list, plug_ctx_cmp);
1071
1072         this_q = NULL;
1073         this_ctx = NULL;
1074         depth = 0;
1075
1076         while (!list_empty(&list)) {
1077                 rq = list_entry_rq(list.next);
1078                 list_del_init(&rq->queuelist);
1079                 BUG_ON(!rq->q);
1080                 if (rq->mq_ctx != this_ctx) {
1081                         if (this_ctx) {
1082                                 blk_mq_insert_requests(this_q, this_ctx,
1083                                                         &ctx_list, depth,
1084                                                         from_schedule);
1085                         }
1086
1087                         this_ctx = rq->mq_ctx;
1088                         this_q = rq->q;
1089                         depth = 0;
1090                 }
1091
1092                 depth++;
1093                 list_add_tail(&rq->queuelist, &ctx_list);
1094         }
1095
1096         /*
1097          * If 'this_ctx' is set, we know we have entries to complete
1098          * on 'ctx_list'. Do those.
1099          */
1100         if (this_ctx) {
1101                 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1102                                        from_schedule);
1103         }
1104 }
1105
1106 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1107 {
1108         init_request_from_bio(rq, bio);
1109         blk_account_io_start(rq, 1);
1110 }
1111
1112 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1113                                          struct blk_mq_ctx *ctx,
1114                                          struct request *rq, struct bio *bio)
1115 {
1116         struct request_queue *q = hctx->queue;
1117
1118         if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
1119                 blk_mq_bio_to_request(rq, bio);
1120                 spin_lock(&ctx->lock);
1121 insert_rq:
1122                 __blk_mq_insert_request(hctx, rq, false);
1123                 spin_unlock(&ctx->lock);
1124                 return false;
1125         } else {
1126                 spin_lock(&ctx->lock);
1127                 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1128                         blk_mq_bio_to_request(rq, bio);
1129                         goto insert_rq;
1130                 }
1131
1132                 spin_unlock(&ctx->lock);
1133                 __blk_mq_free_request(hctx, ctx, rq);
1134                 return true;
1135         }
1136 }
1137
1138 struct blk_map_ctx {
1139         struct blk_mq_hw_ctx *hctx;
1140         struct blk_mq_ctx *ctx;
1141 };
1142
1143 static struct request *blk_mq_map_request(struct request_queue *q,
1144                                           struct bio *bio,
1145                                           struct blk_map_ctx *data)
1146 {
1147         struct blk_mq_hw_ctx *hctx;
1148         struct blk_mq_ctx *ctx;
1149         struct request *rq;
1150         int rw = bio_data_dir(bio);
1151
1152         if (unlikely(blk_mq_queue_enter(q))) {
1153                 bio_endio(bio, -EIO);
1154                 return NULL;
1155         }
1156
1157         ctx = blk_mq_get_ctx(q);
1158         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1159
1160         if (rw_is_sync(bio->bi_rw))
1161                 rw |= REQ_SYNC;
1162
1163         trace_block_getrq(q, bio, rw);
1164         rq = __blk_mq_alloc_request(q, hctx, ctx, rw, GFP_ATOMIC, false);
1165         if (unlikely(!rq)) {
1166                 __blk_mq_run_hw_queue(hctx);
1167                 blk_mq_put_ctx(ctx);
1168                 trace_block_sleeprq(q, bio, rw);
1169
1170                 ctx = blk_mq_get_ctx(q);
1171                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1172                 rq = __blk_mq_alloc_request(q, hctx, ctx, rw,
1173                                             __GFP_WAIT|GFP_ATOMIC, false);
1174         }
1175
1176         hctx->queued++;
1177         data->hctx = hctx;
1178         data->ctx = ctx;
1179         return rq;
1180 }
1181
1182 /*
1183  * Multiple hardware queue variant. This will not use per-process plugs,
1184  * but will attempt to bypass the hctx queueing if we can go straight to
1185  * hardware for SYNC IO.
1186  */
1187 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1188 {
1189         const int is_sync = rw_is_sync(bio->bi_rw);
1190         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1191         struct blk_map_ctx data;
1192         struct request *rq;
1193
1194         blk_queue_bounce(q, &bio);
1195
1196         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1197                 bio_endio(bio, -EIO);
1198                 return;
1199         }
1200
1201         rq = blk_mq_map_request(q, bio, &data);
1202         if (unlikely(!rq))
1203                 return;
1204
1205         if (unlikely(is_flush_fua)) {
1206                 blk_mq_bio_to_request(rq, bio);
1207                 blk_insert_flush(rq);
1208                 goto run_queue;
1209         }
1210
1211         if (is_sync) {
1212                 int ret;
1213
1214                 blk_mq_bio_to_request(rq, bio);
1215                 blk_mq_start_request(rq, true);
1216
1217                 /*
1218                  * For OK queue, we are done. For error, kill it. Any other
1219                  * error (busy), just add it to our list as we previously
1220                  * would have done
1221                  */
1222                 ret = q->mq_ops->queue_rq(data.hctx, rq);
1223                 if (ret == BLK_MQ_RQ_QUEUE_OK)
1224                         goto done;
1225                 else {
1226                         __blk_mq_requeue_request(rq);
1227
1228                         if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1229                                 rq->errors = -EIO;
1230                                 blk_mq_end_io(rq, rq->errors);
1231                                 goto done;
1232                         }
1233                 }
1234         }
1235
1236         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1237                 /*
1238                  * For a SYNC request, send it to the hardware immediately. For
1239                  * an ASYNC request, just ensure that we run it later on. The
1240                  * latter allows for merging opportunities and more efficient
1241                  * dispatching.
1242                  */
1243 run_queue:
1244                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1245         }
1246 done:
1247         blk_mq_put_ctx(data.ctx);
1248 }
1249
1250 /*
1251  * Single hardware queue variant. This will attempt to use any per-process
1252  * plug for merging and IO deferral.
1253  */
1254 static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1255 {
1256         const int is_sync = rw_is_sync(bio->bi_rw);
1257         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1258         unsigned int use_plug, request_count = 0;
1259         struct blk_map_ctx data;
1260         struct request *rq;
1261
1262         /*
1263          * If we have multiple hardware queues, just go directly to
1264          * one of those for sync IO.
1265          */
1266         use_plug = !is_flush_fua && !is_sync;
1267
1268         blk_queue_bounce(q, &bio);
1269
1270         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1271                 bio_endio(bio, -EIO);
1272                 return;
1273         }
1274
1275         if (use_plug && !blk_queue_nomerges(q) &&
1276             blk_attempt_plug_merge(q, bio, &request_count))
1277                 return;
1278
1279         rq = blk_mq_map_request(q, bio, &data);
1280
1281         if (unlikely(is_flush_fua)) {
1282                 blk_mq_bio_to_request(rq, bio);
1283                 blk_insert_flush(rq);
1284                 goto run_queue;
1285         }
1286
1287         /*
1288          * A task plug currently exists. Since this is completely lockless,
1289          * utilize that to temporarily store requests until the task is
1290          * either done or scheduled away.
1291          */
1292         if (use_plug) {
1293                 struct blk_plug *plug = current->plug;
1294
1295                 if (plug) {
1296                         blk_mq_bio_to_request(rq, bio);
1297                         if (list_empty(&plug->mq_list))
1298                                 trace_block_plug(q);
1299                         else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1300                                 blk_flush_plug_list(plug, false);
1301                                 trace_block_plug(q);
1302                         }
1303                         list_add_tail(&rq->queuelist, &plug->mq_list);
1304                         blk_mq_put_ctx(data.ctx);
1305                         return;
1306                 }
1307         }
1308
1309         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1310                 /*
1311                  * For a SYNC request, send it to the hardware immediately. For
1312                  * an ASYNC request, just ensure that we run it later on. The
1313                  * latter allows for merging opportunities and more efficient
1314                  * dispatching.
1315                  */
1316 run_queue:
1317                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1318         }
1319
1320         blk_mq_put_ctx(data.ctx);
1321 }
1322
1323 /*
1324  * Default mapping to a software queue, since we use one per CPU.
1325  */
1326 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1327 {
1328         return q->queue_hw_ctx[q->mq_map[cpu]];
1329 }
1330 EXPORT_SYMBOL(blk_mq_map_queue);
1331
1332 struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set,
1333                                                    unsigned int hctx_index,
1334                                                    int node)
1335 {
1336         return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node);
1337 }
1338 EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue);
1339
1340 void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx,
1341                                  unsigned int hctx_index)
1342 {
1343         kfree(hctx);
1344 }
1345 EXPORT_SYMBOL(blk_mq_free_single_hw_queue);
1346
1347 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1348                 struct blk_mq_tags *tags, unsigned int hctx_idx)
1349 {
1350         struct page *page;
1351
1352         if (tags->rqs && set->ops->exit_request) {
1353                 int i;
1354
1355                 for (i = 0; i < tags->nr_tags; i++) {
1356                         if (!tags->rqs[i])
1357                                 continue;
1358                         set->ops->exit_request(set->driver_data, tags->rqs[i],
1359                                                 hctx_idx, i);
1360                 }
1361         }
1362
1363         while (!list_empty(&tags->page_list)) {
1364                 page = list_first_entry(&tags->page_list, struct page, lru);
1365                 list_del_init(&page->lru);
1366                 __free_pages(page, page->private);
1367         }
1368
1369         kfree(tags->rqs);
1370
1371         blk_mq_free_tags(tags);
1372 }
1373
1374 static size_t order_to_size(unsigned int order)
1375 {
1376         return (size_t)PAGE_SIZE << order;
1377 }
1378
1379 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1380                 unsigned int hctx_idx)
1381 {
1382         struct blk_mq_tags *tags;
1383         unsigned int i, j, entries_per_page, max_order = 4;
1384         size_t rq_size, left;
1385
1386         tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1387                                 set->numa_node);
1388         if (!tags)
1389                 return NULL;
1390
1391         INIT_LIST_HEAD(&tags->page_list);
1392
1393         tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *),
1394                                         GFP_KERNEL, set->numa_node);
1395         if (!tags->rqs) {
1396                 blk_mq_free_tags(tags);
1397                 return NULL;
1398         }
1399
1400         /*
1401          * rq_size is the size of the request plus driver payload, rounded
1402          * to the cacheline size
1403          */
1404         rq_size = round_up(sizeof(struct request) + set->cmd_size,
1405                                 cache_line_size());
1406         left = rq_size * set->queue_depth;
1407
1408         for (i = 0; i < set->queue_depth; ) {
1409                 int this_order = max_order;
1410                 struct page *page;
1411                 int to_do;
1412                 void *p;
1413
1414                 while (left < order_to_size(this_order - 1) && this_order)
1415                         this_order--;
1416
1417                 do {
1418                         page = alloc_pages_node(set->numa_node, GFP_KERNEL,
1419                                                 this_order);
1420                         if (page)
1421                                 break;
1422                         if (!this_order--)
1423                                 break;
1424                         if (order_to_size(this_order) < rq_size)
1425                                 break;
1426                 } while (1);
1427
1428                 if (!page)
1429                         goto fail;
1430
1431                 page->private = this_order;
1432                 list_add_tail(&page->lru, &tags->page_list);
1433
1434                 p = page_address(page);
1435                 entries_per_page = order_to_size(this_order) / rq_size;
1436                 to_do = min(entries_per_page, set->queue_depth - i);
1437                 left -= to_do * rq_size;
1438                 for (j = 0; j < to_do; j++) {
1439                         tags->rqs[i] = p;
1440                         if (set->ops->init_request) {
1441                                 if (set->ops->init_request(set->driver_data,
1442                                                 tags->rqs[i], hctx_idx, i,
1443                                                 set->numa_node))
1444                                         goto fail;
1445                         }
1446
1447                         p += rq_size;
1448                         i++;
1449                 }
1450         }
1451
1452         return tags;
1453
1454 fail:
1455         pr_warn("%s: failed to allocate requests\n", __func__);
1456         blk_mq_free_rq_map(set, tags, hctx_idx);
1457         return NULL;
1458 }
1459
1460 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1461 {
1462         kfree(bitmap->map);
1463 }
1464
1465 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1466 {
1467         unsigned int bpw = 8, total, num_maps, i;
1468
1469         bitmap->bits_per_word = bpw;
1470
1471         num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1472         bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1473                                         GFP_KERNEL, node);
1474         if (!bitmap->map)
1475                 return -ENOMEM;
1476
1477         bitmap->map_size = num_maps;
1478
1479         total = nr_cpu_ids;
1480         for (i = 0; i < num_maps; i++) {
1481                 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1482                 total -= bitmap->map[i].depth;
1483         }
1484
1485         return 0;
1486 }
1487
1488 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1489 {
1490         struct request_queue *q = hctx->queue;
1491         struct blk_mq_ctx *ctx;
1492         LIST_HEAD(tmp);
1493
1494         /*
1495          * Move ctx entries to new CPU, if this one is going away.
1496          */
1497         ctx = __blk_mq_get_ctx(q, cpu);
1498
1499         spin_lock(&ctx->lock);
1500         if (!list_empty(&ctx->rq_list)) {
1501                 list_splice_init(&ctx->rq_list, &tmp);
1502                 blk_mq_hctx_clear_pending(hctx, ctx);
1503         }
1504         spin_unlock(&ctx->lock);
1505
1506         if (list_empty(&tmp))
1507                 return NOTIFY_OK;
1508
1509         ctx = blk_mq_get_ctx(q);
1510         spin_lock(&ctx->lock);
1511
1512         while (!list_empty(&tmp)) {
1513                 struct request *rq;
1514
1515                 rq = list_first_entry(&tmp, struct request, queuelist);
1516                 rq->mq_ctx = ctx;
1517                 list_move_tail(&rq->queuelist, &ctx->rq_list);
1518         }
1519
1520         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1521         blk_mq_hctx_mark_pending(hctx, ctx);
1522
1523         spin_unlock(&ctx->lock);
1524
1525         blk_mq_run_hw_queue(hctx, true);
1526         blk_mq_put_ctx(ctx);
1527         return NOTIFY_OK;
1528 }
1529
1530 static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1531 {
1532         struct request_queue *q = hctx->queue;
1533         struct blk_mq_tag_set *set = q->tag_set;
1534
1535         if (set->tags[hctx->queue_num])
1536                 return NOTIFY_OK;
1537
1538         set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1539         if (!set->tags[hctx->queue_num])
1540                 return NOTIFY_STOP;
1541
1542         hctx->tags = set->tags[hctx->queue_num];
1543         return NOTIFY_OK;
1544 }
1545
1546 static int blk_mq_hctx_notify(void *data, unsigned long action,
1547                               unsigned int cpu)
1548 {
1549         struct blk_mq_hw_ctx *hctx = data;
1550
1551         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1552                 return blk_mq_hctx_cpu_offline(hctx, cpu);
1553         else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1554                 return blk_mq_hctx_cpu_online(hctx, cpu);
1555
1556         return NOTIFY_OK;
1557 }
1558
1559 static void blk_mq_exit_hw_queues(struct request_queue *q,
1560                 struct blk_mq_tag_set *set, int nr_queue)
1561 {
1562         struct blk_mq_hw_ctx *hctx;
1563         unsigned int i;
1564
1565         queue_for_each_hw_ctx(q, hctx, i) {
1566                 if (i == nr_queue)
1567                         break;
1568
1569                 if (set->ops->exit_hctx)
1570                         set->ops->exit_hctx(hctx, i);
1571
1572                 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1573                 kfree(hctx->ctxs);
1574                 blk_mq_free_bitmap(&hctx->ctx_map);
1575         }
1576
1577 }
1578
1579 static void blk_mq_free_hw_queues(struct request_queue *q,
1580                 struct blk_mq_tag_set *set)
1581 {
1582         struct blk_mq_hw_ctx *hctx;
1583         unsigned int i;
1584
1585         queue_for_each_hw_ctx(q, hctx, i) {
1586                 free_cpumask_var(hctx->cpumask);
1587                 set->ops->free_hctx(hctx, i);
1588         }
1589 }
1590
1591 static int blk_mq_init_hw_queues(struct request_queue *q,
1592                 struct blk_mq_tag_set *set)
1593 {
1594         struct blk_mq_hw_ctx *hctx;
1595         unsigned int i;
1596
1597         /*
1598          * Initialize hardware queues
1599          */
1600         queue_for_each_hw_ctx(q, hctx, i) {
1601                 int node;
1602
1603                 node = hctx->numa_node;
1604                 if (node == NUMA_NO_NODE)
1605                         node = hctx->numa_node = set->numa_node;
1606
1607                 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1608                 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1609                 spin_lock_init(&hctx->lock);
1610                 INIT_LIST_HEAD(&hctx->dispatch);
1611                 hctx->queue = q;
1612                 hctx->queue_num = i;
1613                 hctx->flags = set->flags;
1614                 hctx->cmd_size = set->cmd_size;
1615
1616                 blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1617                                                 blk_mq_hctx_notify, hctx);
1618                 blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1619
1620                 hctx->tags = set->tags[i];
1621
1622                 /*
1623                  * Allocate space for all possible cpus to avoid allocation in
1624                  * runtime
1625                  */
1626                 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1627                                                 GFP_KERNEL, node);
1628                 if (!hctx->ctxs)
1629                         break;
1630
1631                 if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1632                         break;
1633
1634                 hctx->nr_ctx = 0;
1635
1636                 if (set->ops->init_hctx &&
1637                     set->ops->init_hctx(hctx, set->driver_data, i))
1638                         break;
1639         }
1640
1641         if (i == q->nr_hw_queues)
1642                 return 0;
1643
1644         /*
1645          * Init failed
1646          */
1647         blk_mq_exit_hw_queues(q, set, i);
1648
1649         return 1;
1650 }
1651
1652 static void blk_mq_init_cpu_queues(struct request_queue *q,
1653                                    unsigned int nr_hw_queues)
1654 {
1655         unsigned int i;
1656
1657         for_each_possible_cpu(i) {
1658                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1659                 struct blk_mq_hw_ctx *hctx;
1660
1661                 memset(__ctx, 0, sizeof(*__ctx));
1662                 __ctx->cpu = i;
1663                 spin_lock_init(&__ctx->lock);
1664                 INIT_LIST_HEAD(&__ctx->rq_list);
1665                 __ctx->queue = q;
1666
1667                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1668                 if (!cpu_online(i))
1669                         continue;
1670
1671                 hctx = q->mq_ops->map_queue(q, i);
1672                 cpumask_set_cpu(i, hctx->cpumask);
1673                 hctx->nr_ctx++;
1674
1675                 /*
1676                  * Set local node, IFF we have more than one hw queue. If
1677                  * not, we remain on the home node of the device
1678                  */
1679                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1680                         hctx->numa_node = cpu_to_node(i);
1681         }
1682 }
1683
1684 static void blk_mq_map_swqueue(struct request_queue *q)
1685 {
1686         unsigned int i;
1687         struct blk_mq_hw_ctx *hctx;
1688         struct blk_mq_ctx *ctx;
1689
1690         queue_for_each_hw_ctx(q, hctx, i) {
1691                 cpumask_clear(hctx->cpumask);
1692                 hctx->nr_ctx = 0;
1693         }
1694
1695         /*
1696          * Map software to hardware queues
1697          */
1698         queue_for_each_ctx(q, ctx, i) {
1699                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1700                 if (!cpu_online(i))
1701                         continue;
1702
1703                 hctx = q->mq_ops->map_queue(q, i);
1704                 cpumask_set_cpu(i, hctx->cpumask);
1705                 ctx->index_hw = hctx->nr_ctx;
1706                 hctx->ctxs[hctx->nr_ctx++] = ctx;
1707         }
1708
1709         queue_for_each_hw_ctx(q, hctx, i) {
1710                 /*
1711                  * If not software queues are mapped to this hardware queue,
1712                  * disable it and free the request entries
1713                  */
1714                 if (!hctx->nr_ctx) {
1715                         struct blk_mq_tag_set *set = q->tag_set;
1716
1717                         if (set->tags[i]) {
1718                                 blk_mq_free_rq_map(set, set->tags[i], i);
1719                                 set->tags[i] = NULL;
1720                                 hctx->tags = NULL;
1721                         }
1722                         continue;
1723                 }
1724
1725                 /*
1726                  * Initialize batch roundrobin counts
1727                  */
1728                 hctx->next_cpu = cpumask_first(hctx->cpumask);
1729                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1730         }
1731 }
1732
1733 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1734 {
1735         struct blk_mq_hw_ctx *hctx;
1736         struct request_queue *q;
1737         bool shared;
1738         int i;
1739
1740         if (set->tag_list.next == set->tag_list.prev)
1741                 shared = false;
1742         else
1743                 shared = true;
1744
1745         list_for_each_entry(q, &set->tag_list, tag_set_list) {
1746                 blk_mq_freeze_queue(q);
1747
1748                 queue_for_each_hw_ctx(q, hctx, i) {
1749                         if (shared)
1750                                 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1751                         else
1752                                 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1753                 }
1754                 blk_mq_unfreeze_queue(q);
1755         }
1756 }
1757
1758 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1759 {
1760         struct blk_mq_tag_set *set = q->tag_set;
1761
1762         blk_mq_freeze_queue(q);
1763
1764         mutex_lock(&set->tag_list_lock);
1765         list_del_init(&q->tag_set_list);
1766         blk_mq_update_tag_set_depth(set);
1767         mutex_unlock(&set->tag_list_lock);
1768
1769         blk_mq_unfreeze_queue(q);
1770 }
1771
1772 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1773                                      struct request_queue *q)
1774 {
1775         q->tag_set = set;
1776
1777         mutex_lock(&set->tag_list_lock);
1778         list_add_tail(&q->tag_set_list, &set->tag_list);
1779         blk_mq_update_tag_set_depth(set);
1780         mutex_unlock(&set->tag_list_lock);
1781 }
1782
1783 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1784 {
1785         struct blk_mq_hw_ctx **hctxs;
1786         struct blk_mq_ctx *ctx;
1787         struct request_queue *q;
1788         unsigned int *map;
1789         int i;
1790
1791         ctx = alloc_percpu(struct blk_mq_ctx);
1792         if (!ctx)
1793                 return ERR_PTR(-ENOMEM);
1794
1795         hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1796                         set->numa_node);
1797
1798         if (!hctxs)
1799                 goto err_percpu;
1800
1801         map = blk_mq_make_queue_map(set);
1802         if (!map)
1803                 goto err_map;
1804
1805         for (i = 0; i < set->nr_hw_queues; i++) {
1806                 int node = blk_mq_hw_queue_to_node(map, i);
1807
1808                 hctxs[i] = set->ops->alloc_hctx(set, i, node);
1809                 if (!hctxs[i])
1810                         goto err_hctxs;
1811
1812                 if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL))
1813                         goto err_hctxs;
1814
1815                 atomic_set(&hctxs[i]->nr_active, 0);
1816                 hctxs[i]->numa_node = node;
1817                 hctxs[i]->queue_num = i;
1818         }
1819
1820         q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1821         if (!q)
1822                 goto err_hctxs;
1823
1824         if (percpu_counter_init(&q->mq_usage_counter, 0))
1825                 goto err_map;
1826
1827         setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1828         blk_queue_rq_timeout(q, 30000);
1829
1830         q->nr_queues = nr_cpu_ids;
1831         q->nr_hw_queues = set->nr_hw_queues;
1832         q->mq_map = map;
1833
1834         q->queue_ctx = ctx;
1835         q->queue_hw_ctx = hctxs;
1836
1837         q->mq_ops = set->ops;
1838         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1839
1840         q->sg_reserved_size = INT_MAX;
1841
1842         INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1843         INIT_LIST_HEAD(&q->requeue_list);
1844         spin_lock_init(&q->requeue_lock);
1845
1846         if (q->nr_hw_queues > 1)
1847                 blk_queue_make_request(q, blk_mq_make_request);
1848         else
1849                 blk_queue_make_request(q, blk_sq_make_request);
1850
1851         blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
1852         if (set->timeout)
1853                 blk_queue_rq_timeout(q, set->timeout);
1854
1855         /*
1856          * Do this after blk_queue_make_request() overrides it...
1857          */
1858         q->nr_requests = set->queue_depth;
1859
1860         if (set->ops->complete)
1861                 blk_queue_softirq_done(q, set->ops->complete);
1862
1863         blk_mq_init_flush(q);
1864         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1865
1866         q->flush_rq = kzalloc(round_up(sizeof(struct request) +
1867                                 set->cmd_size, cache_line_size()),
1868                                 GFP_KERNEL);
1869         if (!q->flush_rq)
1870                 goto err_hw;
1871
1872         if (blk_mq_init_hw_queues(q, set))
1873                 goto err_flush_rq;
1874
1875         mutex_lock(&all_q_mutex);
1876         list_add_tail(&q->all_q_node, &all_q_list);
1877         mutex_unlock(&all_q_mutex);
1878
1879         blk_mq_add_queue_tag_set(set, q);
1880
1881         blk_mq_map_swqueue(q);
1882
1883         return q;
1884
1885 err_flush_rq:
1886         kfree(q->flush_rq);
1887 err_hw:
1888         blk_cleanup_queue(q);
1889 err_hctxs:
1890         kfree(map);
1891         for (i = 0; i < set->nr_hw_queues; i++) {
1892                 if (!hctxs[i])
1893                         break;
1894                 free_cpumask_var(hctxs[i]->cpumask);
1895                 set->ops->free_hctx(hctxs[i], i);
1896         }
1897 err_map:
1898         kfree(hctxs);
1899 err_percpu:
1900         free_percpu(ctx);
1901         return ERR_PTR(-ENOMEM);
1902 }
1903 EXPORT_SYMBOL(blk_mq_init_queue);
1904
1905 void blk_mq_free_queue(struct request_queue *q)
1906 {
1907         struct blk_mq_tag_set   *set = q->tag_set;
1908
1909         blk_mq_del_queue_tag_set(q);
1910
1911         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
1912         blk_mq_free_hw_queues(q, set);
1913
1914         percpu_counter_destroy(&q->mq_usage_counter);
1915
1916         free_percpu(q->queue_ctx);
1917         kfree(q->queue_hw_ctx);
1918         kfree(q->mq_map);
1919
1920         q->queue_ctx = NULL;
1921         q->queue_hw_ctx = NULL;
1922         q->mq_map = NULL;
1923
1924         mutex_lock(&all_q_mutex);
1925         list_del_init(&q->all_q_node);
1926         mutex_unlock(&all_q_mutex);
1927 }
1928
1929 /* Basically redo blk_mq_init_queue with queue frozen */
1930 static void blk_mq_queue_reinit(struct request_queue *q)
1931 {
1932         blk_mq_freeze_queue(q);
1933
1934         blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
1935
1936         /*
1937          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
1938          * we should change hctx numa_node according to new topology (this
1939          * involves free and re-allocate memory, worthy doing?)
1940          */
1941
1942         blk_mq_map_swqueue(q);
1943
1944         blk_mq_unfreeze_queue(q);
1945 }
1946
1947 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1948                                       unsigned long action, void *hcpu)
1949 {
1950         struct request_queue *q;
1951
1952         /*
1953          * Before new mappings are established, hotadded cpu might already
1954          * start handling requests. This doesn't break anything as we map
1955          * offline CPUs to first hardware queue. We will re-init the queue
1956          * below to get optimal settings.
1957          */
1958         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1959             action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
1960                 return NOTIFY_OK;
1961
1962         mutex_lock(&all_q_mutex);
1963         list_for_each_entry(q, &all_q_list, all_q_node)
1964                 blk_mq_queue_reinit(q);
1965         mutex_unlock(&all_q_mutex);
1966         return NOTIFY_OK;
1967 }
1968
1969 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1970 {
1971         int i;
1972
1973         if (!set->nr_hw_queues)
1974                 return -EINVAL;
1975         if (!set->queue_depth || set->queue_depth > BLK_MQ_MAX_DEPTH)
1976                 return -EINVAL;
1977         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
1978                 return -EINVAL;
1979
1980         if (!set->nr_hw_queues ||
1981             !set->ops->queue_rq || !set->ops->map_queue ||
1982             !set->ops->alloc_hctx || !set->ops->free_hctx)
1983                 return -EINVAL;
1984
1985
1986         set->tags = kmalloc_node(set->nr_hw_queues *
1987                                  sizeof(struct blk_mq_tags *),
1988                                  GFP_KERNEL, set->numa_node);
1989         if (!set->tags)
1990                 goto out;
1991
1992         for (i = 0; i < set->nr_hw_queues; i++) {
1993                 set->tags[i] = blk_mq_init_rq_map(set, i);
1994                 if (!set->tags[i])
1995                         goto out_unwind;
1996         }
1997
1998         mutex_init(&set->tag_list_lock);
1999         INIT_LIST_HEAD(&set->tag_list);
2000
2001         return 0;
2002
2003 out_unwind:
2004         while (--i >= 0)
2005                 blk_mq_free_rq_map(set, set->tags[i], i);
2006 out:
2007         return -ENOMEM;
2008 }
2009 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2010
2011 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2012 {
2013         int i;
2014
2015         for (i = 0; i < set->nr_hw_queues; i++) {
2016                 if (set->tags[i])
2017                         blk_mq_free_rq_map(set, set->tags[i], i);
2018         }
2019
2020         kfree(set->tags);
2021 }
2022 EXPORT_SYMBOL(blk_mq_free_tag_set);
2023
2024 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2025 {
2026         struct blk_mq_tag_set *set = q->tag_set;
2027         struct blk_mq_hw_ctx *hctx;
2028         int i, ret;
2029
2030         if (!set || nr > set->queue_depth)
2031                 return -EINVAL;
2032
2033         ret = 0;
2034         queue_for_each_hw_ctx(q, hctx, i) {
2035                 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2036                 if (ret)
2037                         break;
2038         }
2039
2040         if (!ret)
2041                 q->nr_requests = nr;
2042
2043         return ret;
2044 }
2045
2046 void blk_mq_disable_hotplug(void)
2047 {
2048         mutex_lock(&all_q_mutex);
2049 }
2050
2051 void blk_mq_enable_hotplug(void)
2052 {
2053         mutex_unlock(&all_q_mutex);
2054 }
2055
2056 static int __init blk_mq_init(void)
2057 {
2058         blk_mq_cpu_init();
2059
2060         /* Must be called after percpu_counter_hotcpu_callback() */
2061         hotcpu_notifier(blk_mq_queue_reinit_notify, -10);
2062
2063         return 0;
2064 }
2065 subsys_initcall(blk_mq_init);