]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - block/blk-mq.c
regmap: rbtree: When adding a reg do a bsearch for target node
[karo-tx-linux.git] / block / blk-mq.c
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/mm.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16 #include <linux/smp.h>
17 #include <linux/llist.h>
18 #include <linux/list_sort.h>
19 #include <linux/cpu.h>
20 #include <linux/cache.h>
21 #include <linux/sched/sysctl.h>
22 #include <linux/delay.h>
23 #include <linux/crash_dump.h>
24
25 #include <trace/events/block.h>
26
27 #include <linux/blk-mq.h>
28 #include "blk.h"
29 #include "blk-mq.h"
30 #include "blk-mq-tag.h"
31
32 static DEFINE_MUTEX(all_q_mutex);
33 static LIST_HEAD(all_q_list);
34
35 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
36
37 /*
38  * Check if any of the ctx's have pending work in this hardware queue
39  */
40 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
41 {
42         unsigned int i;
43
44         for (i = 0; i < hctx->ctx_map.size; i++)
45                 if (hctx->ctx_map.map[i].word)
46                         return true;
47
48         return false;
49 }
50
51 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
52                                               struct blk_mq_ctx *ctx)
53 {
54         return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
55 }
56
57 #define CTX_TO_BIT(hctx, ctx)   \
58         ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
59
60 /*
61  * Mark this ctx as having pending work in this hardware queue
62  */
63 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
64                                      struct blk_mq_ctx *ctx)
65 {
66         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
67
68         if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
69                 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
70 }
71
72 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
73                                       struct blk_mq_ctx *ctx)
74 {
75         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
76
77         clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
78 }
79
80 static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
81 {
82         while (true) {
83                 int ret;
84
85                 if (percpu_ref_tryget_live(&q->mq_usage_counter))
86                         return 0;
87
88                 if (!(gfp & __GFP_WAIT))
89                         return -EBUSY;
90
91                 ret = wait_event_interruptible(q->mq_freeze_wq,
92                                 !atomic_read(&q->mq_freeze_depth) ||
93                                 blk_queue_dying(q));
94                 if (blk_queue_dying(q))
95                         return -ENODEV;
96                 if (ret)
97                         return ret;
98         }
99 }
100
101 static void blk_mq_queue_exit(struct request_queue *q)
102 {
103         percpu_ref_put(&q->mq_usage_counter);
104 }
105
106 static void blk_mq_usage_counter_release(struct percpu_ref *ref)
107 {
108         struct request_queue *q =
109                 container_of(ref, struct request_queue, mq_usage_counter);
110
111         wake_up_all(&q->mq_freeze_wq);
112 }
113
114 void blk_mq_freeze_queue_start(struct request_queue *q)
115 {
116         int freeze_depth;
117
118         freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
119         if (freeze_depth == 1) {
120                 percpu_ref_kill(&q->mq_usage_counter);
121                 blk_mq_run_hw_queues(q, false);
122         }
123 }
124 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
125
126 static void blk_mq_freeze_queue_wait(struct request_queue *q)
127 {
128         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
129 }
130
131 /*
132  * Guarantee no request is in use, so we can change any data structure of
133  * the queue afterward.
134  */
135 void blk_mq_freeze_queue(struct request_queue *q)
136 {
137         blk_mq_freeze_queue_start(q);
138         blk_mq_freeze_queue_wait(q);
139 }
140 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
141
142 void blk_mq_unfreeze_queue(struct request_queue *q)
143 {
144         int freeze_depth;
145
146         freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
147         WARN_ON_ONCE(freeze_depth < 0);
148         if (!freeze_depth) {
149                 percpu_ref_reinit(&q->mq_usage_counter);
150                 wake_up_all(&q->mq_freeze_wq);
151         }
152 }
153 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
154
155 void blk_mq_wake_waiters(struct request_queue *q)
156 {
157         struct blk_mq_hw_ctx *hctx;
158         unsigned int i;
159
160         queue_for_each_hw_ctx(q, hctx, i)
161                 if (blk_mq_hw_queue_mapped(hctx))
162                         blk_mq_tag_wakeup_all(hctx->tags, true);
163
164         /*
165          * If we are called because the queue has now been marked as
166          * dying, we need to ensure that processes currently waiting on
167          * the queue are notified as well.
168          */
169         wake_up_all(&q->mq_freeze_wq);
170 }
171
172 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
173 {
174         return blk_mq_has_free_tags(hctx->tags);
175 }
176 EXPORT_SYMBOL(blk_mq_can_queue);
177
178 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
179                                struct request *rq, unsigned int rw_flags)
180 {
181         if (blk_queue_io_stat(q))
182                 rw_flags |= REQ_IO_STAT;
183
184         INIT_LIST_HEAD(&rq->queuelist);
185         /* csd/requeue_work/fifo_time is initialized before use */
186         rq->q = q;
187         rq->mq_ctx = ctx;
188         rq->cmd_flags |= rw_flags;
189         /* do not touch atomic flags, it needs atomic ops against the timer */
190         rq->cpu = -1;
191         INIT_HLIST_NODE(&rq->hash);
192         RB_CLEAR_NODE(&rq->rb_node);
193         rq->rq_disk = NULL;
194         rq->part = NULL;
195         rq->start_time = jiffies;
196 #ifdef CONFIG_BLK_CGROUP
197         rq->rl = NULL;
198         set_start_time_ns(rq);
199         rq->io_start_time_ns = 0;
200 #endif
201         rq->nr_phys_segments = 0;
202 #if defined(CONFIG_BLK_DEV_INTEGRITY)
203         rq->nr_integrity_segments = 0;
204 #endif
205         rq->special = NULL;
206         /* tag was already set */
207         rq->errors = 0;
208
209         rq->cmd = rq->__cmd;
210
211         rq->extra_len = 0;
212         rq->sense_len = 0;
213         rq->resid_len = 0;
214         rq->sense = NULL;
215
216         INIT_LIST_HEAD(&rq->timeout_list);
217         rq->timeout = 0;
218
219         rq->end_io = NULL;
220         rq->end_io_data = NULL;
221         rq->next_rq = NULL;
222
223         ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
224 }
225
226 static struct request *
227 __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
228 {
229         struct request *rq;
230         unsigned int tag;
231
232         tag = blk_mq_get_tag(data);
233         if (tag != BLK_MQ_TAG_FAIL) {
234                 rq = data->hctx->tags->rqs[tag];
235
236                 if (blk_mq_tag_busy(data->hctx)) {
237                         rq->cmd_flags = REQ_MQ_INFLIGHT;
238                         atomic_inc(&data->hctx->nr_active);
239                 }
240
241                 rq->tag = tag;
242                 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
243                 return rq;
244         }
245
246         return NULL;
247 }
248
249 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
250                 bool reserved)
251 {
252         struct blk_mq_ctx *ctx;
253         struct blk_mq_hw_ctx *hctx;
254         struct request *rq;
255         struct blk_mq_alloc_data alloc_data;
256         int ret;
257
258         ret = blk_mq_queue_enter(q, gfp);
259         if (ret)
260                 return ERR_PTR(ret);
261
262         ctx = blk_mq_get_ctx(q);
263         hctx = q->mq_ops->map_queue(q, ctx->cpu);
264         blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
265                         reserved, ctx, hctx);
266
267         rq = __blk_mq_alloc_request(&alloc_data, rw);
268         if (!rq && (gfp & __GFP_WAIT)) {
269                 __blk_mq_run_hw_queue(hctx);
270                 blk_mq_put_ctx(ctx);
271
272                 ctx = blk_mq_get_ctx(q);
273                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
274                 blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
275                                 hctx);
276                 rq =  __blk_mq_alloc_request(&alloc_data, rw);
277                 ctx = alloc_data.ctx;
278         }
279         blk_mq_put_ctx(ctx);
280         if (!rq) {
281                 blk_mq_queue_exit(q);
282                 return ERR_PTR(-EWOULDBLOCK);
283         }
284         return rq;
285 }
286 EXPORT_SYMBOL(blk_mq_alloc_request);
287
288 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
289                                   struct blk_mq_ctx *ctx, struct request *rq)
290 {
291         const int tag = rq->tag;
292         struct request_queue *q = rq->q;
293
294         if (rq->cmd_flags & REQ_MQ_INFLIGHT)
295                 atomic_dec(&hctx->nr_active);
296         rq->cmd_flags = 0;
297
298         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
299         blk_mq_put_tag(hctx, tag, &ctx->last_tag);
300         blk_mq_queue_exit(q);
301 }
302
303 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
304 {
305         struct blk_mq_ctx *ctx = rq->mq_ctx;
306
307         ctx->rq_completed[rq_is_sync(rq)]++;
308         __blk_mq_free_request(hctx, ctx, rq);
309
310 }
311 EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
312
313 void blk_mq_free_request(struct request *rq)
314 {
315         struct blk_mq_hw_ctx *hctx;
316         struct request_queue *q = rq->q;
317
318         hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
319         blk_mq_free_hctx_request(hctx, rq);
320 }
321 EXPORT_SYMBOL_GPL(blk_mq_free_request);
322
323 inline void __blk_mq_end_request(struct request *rq, int error)
324 {
325         blk_account_io_done(rq);
326
327         if (rq->end_io) {
328                 rq->end_io(rq, error);
329         } else {
330                 if (unlikely(blk_bidi_rq(rq)))
331                         blk_mq_free_request(rq->next_rq);
332                 blk_mq_free_request(rq);
333         }
334 }
335 EXPORT_SYMBOL(__blk_mq_end_request);
336
337 void blk_mq_end_request(struct request *rq, int error)
338 {
339         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
340                 BUG();
341         __blk_mq_end_request(rq, error);
342 }
343 EXPORT_SYMBOL(blk_mq_end_request);
344
345 static void __blk_mq_complete_request_remote(void *data)
346 {
347         struct request *rq = data;
348
349         rq->q->softirq_done_fn(rq);
350 }
351
352 static void blk_mq_ipi_complete_request(struct request *rq)
353 {
354         struct blk_mq_ctx *ctx = rq->mq_ctx;
355         bool shared = false;
356         int cpu;
357
358         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
359                 rq->q->softirq_done_fn(rq);
360                 return;
361         }
362
363         cpu = get_cpu();
364         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
365                 shared = cpus_share_cache(cpu, ctx->cpu);
366
367         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
368                 rq->csd.func = __blk_mq_complete_request_remote;
369                 rq->csd.info = rq;
370                 rq->csd.flags = 0;
371                 smp_call_function_single_async(ctx->cpu, &rq->csd);
372         } else {
373                 rq->q->softirq_done_fn(rq);
374         }
375         put_cpu();
376 }
377
378 void __blk_mq_complete_request(struct request *rq)
379 {
380         struct request_queue *q = rq->q;
381
382         if (!q->softirq_done_fn)
383                 blk_mq_end_request(rq, rq->errors);
384         else
385                 blk_mq_ipi_complete_request(rq);
386 }
387
388 /**
389  * blk_mq_complete_request - end I/O on a request
390  * @rq:         the request being processed
391  *
392  * Description:
393  *      Ends all I/O on a request. It does not handle partial completions.
394  *      The actual completion happens out-of-order, through a IPI handler.
395  **/
396 void blk_mq_complete_request(struct request *rq)
397 {
398         struct request_queue *q = rq->q;
399
400         if (unlikely(blk_should_fake_timeout(q)))
401                 return;
402         if (!blk_mark_rq_complete(rq))
403                 __blk_mq_complete_request(rq);
404 }
405 EXPORT_SYMBOL(blk_mq_complete_request);
406
407 int blk_mq_request_started(struct request *rq)
408 {
409         return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
410 }
411 EXPORT_SYMBOL_GPL(blk_mq_request_started);
412
413 void blk_mq_start_request(struct request *rq)
414 {
415         struct request_queue *q = rq->q;
416
417         trace_block_rq_issue(q, rq);
418
419         rq->resid_len = blk_rq_bytes(rq);
420         if (unlikely(blk_bidi_rq(rq)))
421                 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
422
423         blk_add_timer(rq);
424
425         /*
426          * Ensure that ->deadline is visible before set the started
427          * flag and clear the completed flag.
428          */
429         smp_mb__before_atomic();
430
431         /*
432          * Mark us as started and clear complete. Complete might have been
433          * set if requeue raced with timeout, which then marked it as
434          * complete. So be sure to clear complete again when we start
435          * the request, otherwise we'll ignore the completion event.
436          */
437         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
438                 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
439         if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
440                 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
441
442         if (q->dma_drain_size && blk_rq_bytes(rq)) {
443                 /*
444                  * Make sure space for the drain appears.  We know we can do
445                  * this because max_hw_segments has been adjusted to be one
446                  * fewer than the device can handle.
447                  */
448                 rq->nr_phys_segments++;
449         }
450 }
451 EXPORT_SYMBOL(blk_mq_start_request);
452
453 static void __blk_mq_requeue_request(struct request *rq)
454 {
455         struct request_queue *q = rq->q;
456
457         trace_block_rq_requeue(q, rq);
458
459         if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
460                 if (q->dma_drain_size && blk_rq_bytes(rq))
461                         rq->nr_phys_segments--;
462         }
463 }
464
465 void blk_mq_requeue_request(struct request *rq)
466 {
467         __blk_mq_requeue_request(rq);
468
469         BUG_ON(blk_queued_rq(rq));
470         blk_mq_add_to_requeue_list(rq, true);
471 }
472 EXPORT_SYMBOL(blk_mq_requeue_request);
473
474 static void blk_mq_requeue_work(struct work_struct *work)
475 {
476         struct request_queue *q =
477                 container_of(work, struct request_queue, requeue_work);
478         LIST_HEAD(rq_list);
479         struct request *rq, *next;
480         unsigned long flags;
481
482         spin_lock_irqsave(&q->requeue_lock, flags);
483         list_splice_init(&q->requeue_list, &rq_list);
484         spin_unlock_irqrestore(&q->requeue_lock, flags);
485
486         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
487                 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
488                         continue;
489
490                 rq->cmd_flags &= ~REQ_SOFTBARRIER;
491                 list_del_init(&rq->queuelist);
492                 blk_mq_insert_request(rq, true, false, false);
493         }
494
495         while (!list_empty(&rq_list)) {
496                 rq = list_entry(rq_list.next, struct request, queuelist);
497                 list_del_init(&rq->queuelist);
498                 blk_mq_insert_request(rq, false, false, false);
499         }
500
501         /*
502          * Use the start variant of queue running here, so that running
503          * the requeue work will kick stopped queues.
504          */
505         blk_mq_start_hw_queues(q);
506 }
507
508 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
509 {
510         struct request_queue *q = rq->q;
511         unsigned long flags;
512
513         /*
514          * We abuse this flag that is otherwise used by the I/O scheduler to
515          * request head insertation from the workqueue.
516          */
517         BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
518
519         spin_lock_irqsave(&q->requeue_lock, flags);
520         if (at_head) {
521                 rq->cmd_flags |= REQ_SOFTBARRIER;
522                 list_add(&rq->queuelist, &q->requeue_list);
523         } else {
524                 list_add_tail(&rq->queuelist, &q->requeue_list);
525         }
526         spin_unlock_irqrestore(&q->requeue_lock, flags);
527 }
528 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
529
530 void blk_mq_cancel_requeue_work(struct request_queue *q)
531 {
532         cancel_work_sync(&q->requeue_work);
533 }
534 EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
535
536 void blk_mq_kick_requeue_list(struct request_queue *q)
537 {
538         kblockd_schedule_work(&q->requeue_work);
539 }
540 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
541
542 void blk_mq_abort_requeue_list(struct request_queue *q)
543 {
544         unsigned long flags;
545         LIST_HEAD(rq_list);
546
547         spin_lock_irqsave(&q->requeue_lock, flags);
548         list_splice_init(&q->requeue_list, &rq_list);
549         spin_unlock_irqrestore(&q->requeue_lock, flags);
550
551         while (!list_empty(&rq_list)) {
552                 struct request *rq;
553
554                 rq = list_first_entry(&rq_list, struct request, queuelist);
555                 list_del_init(&rq->queuelist);
556                 rq->errors = -EIO;
557                 blk_mq_end_request(rq, rq->errors);
558         }
559 }
560 EXPORT_SYMBOL(blk_mq_abort_requeue_list);
561
562 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
563 {
564         return tags->rqs[tag];
565 }
566 EXPORT_SYMBOL(blk_mq_tag_to_rq);
567
568 struct blk_mq_timeout_data {
569         unsigned long next;
570         unsigned int next_set;
571 };
572
573 void blk_mq_rq_timed_out(struct request *req, bool reserved)
574 {
575         struct blk_mq_ops *ops = req->q->mq_ops;
576         enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
577
578         /*
579          * We know that complete is set at this point. If STARTED isn't set
580          * anymore, then the request isn't active and the "timeout" should
581          * just be ignored. This can happen due to the bitflag ordering.
582          * Timeout first checks if STARTED is set, and if it is, assumes
583          * the request is active. But if we race with completion, then
584          * we both flags will get cleared. So check here again, and ignore
585          * a timeout event with a request that isn't active.
586          */
587         if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
588                 return;
589
590         if (ops->timeout)
591                 ret = ops->timeout(req, reserved);
592
593         switch (ret) {
594         case BLK_EH_HANDLED:
595                 __blk_mq_complete_request(req);
596                 break;
597         case BLK_EH_RESET_TIMER:
598                 blk_add_timer(req);
599                 blk_clear_rq_complete(req);
600                 break;
601         case BLK_EH_NOT_HANDLED:
602                 break;
603         default:
604                 printk(KERN_ERR "block: bad eh return: %d\n", ret);
605                 break;
606         }
607 }
608
609 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
610                 struct request *rq, void *priv, bool reserved)
611 {
612         struct blk_mq_timeout_data *data = priv;
613
614         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
615                 /*
616                  * If a request wasn't started before the queue was
617                  * marked dying, kill it here or it'll go unnoticed.
618                  */
619                 if (unlikely(blk_queue_dying(rq->q))) {
620                         rq->errors = -EIO;
621                         blk_mq_complete_request(rq);
622                 }
623                 return;
624         }
625         if (rq->cmd_flags & REQ_NO_TIMEOUT)
626                 return;
627
628         if (time_after_eq(jiffies, rq->deadline)) {
629                 if (!blk_mark_rq_complete(rq))
630                         blk_mq_rq_timed_out(rq, reserved);
631         } else if (!data->next_set || time_after(data->next, rq->deadline)) {
632                 data->next = rq->deadline;
633                 data->next_set = 1;
634         }
635 }
636
637 static void blk_mq_rq_timer(unsigned long priv)
638 {
639         struct request_queue *q = (struct request_queue *)priv;
640         struct blk_mq_timeout_data data = {
641                 .next           = 0,
642                 .next_set       = 0,
643         };
644         struct blk_mq_hw_ctx *hctx;
645         int i;
646
647         queue_for_each_hw_ctx(q, hctx, i) {
648                 /*
649                  * If not software queues are currently mapped to this
650                  * hardware queue, there's nothing to check
651                  */
652                 if (!blk_mq_hw_queue_mapped(hctx))
653                         continue;
654
655                 blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
656         }
657
658         if (data.next_set) {
659                 data.next = blk_rq_timeout(round_jiffies_up(data.next));
660                 mod_timer(&q->timeout, data.next);
661         } else {
662                 queue_for_each_hw_ctx(q, hctx, i) {
663                         /* the hctx may be unmapped, so check it here */
664                         if (blk_mq_hw_queue_mapped(hctx))
665                                 blk_mq_tag_idle(hctx);
666                 }
667         }
668 }
669
670 /*
671  * Reverse check our software queue for entries that we could potentially
672  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
673  * too much time checking for merges.
674  */
675 static bool blk_mq_attempt_merge(struct request_queue *q,
676                                  struct blk_mq_ctx *ctx, struct bio *bio)
677 {
678         struct request *rq;
679         int checked = 8;
680
681         list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
682                 int el_ret;
683
684                 if (!checked--)
685                         break;
686
687                 if (!blk_rq_merge_ok(rq, bio))
688                         continue;
689
690                 el_ret = blk_try_merge(rq, bio);
691                 if (el_ret == ELEVATOR_BACK_MERGE) {
692                         if (bio_attempt_back_merge(q, rq, bio)) {
693                                 ctx->rq_merged++;
694                                 return true;
695                         }
696                         break;
697                 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
698                         if (bio_attempt_front_merge(q, rq, bio)) {
699                                 ctx->rq_merged++;
700                                 return true;
701                         }
702                         break;
703                 }
704         }
705
706         return false;
707 }
708
709 /*
710  * Process software queues that have been marked busy, splicing them
711  * to the for-dispatch
712  */
713 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
714 {
715         struct blk_mq_ctx *ctx;
716         int i;
717
718         for (i = 0; i < hctx->ctx_map.size; i++) {
719                 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
720                 unsigned int off, bit;
721
722                 if (!bm->word)
723                         continue;
724
725                 bit = 0;
726                 off = i * hctx->ctx_map.bits_per_word;
727                 do {
728                         bit = find_next_bit(&bm->word, bm->depth, bit);
729                         if (bit >= bm->depth)
730                                 break;
731
732                         ctx = hctx->ctxs[bit + off];
733                         clear_bit(bit, &bm->word);
734                         spin_lock(&ctx->lock);
735                         list_splice_tail_init(&ctx->rq_list, list);
736                         spin_unlock(&ctx->lock);
737
738                         bit++;
739                 } while (1);
740         }
741 }
742
743 /*
744  * Run this hardware queue, pulling any software queues mapped to it in.
745  * Note that this function currently has various problems around ordering
746  * of IO. In particular, we'd like FIFO behaviour on handling existing
747  * items on the hctx->dispatch list. Ignore that for now.
748  */
749 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
750 {
751         struct request_queue *q = hctx->queue;
752         struct request *rq;
753         LIST_HEAD(rq_list);
754         LIST_HEAD(driver_list);
755         struct list_head *dptr;
756         int queued;
757
758         WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
759
760         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
761                 return;
762
763         hctx->run++;
764
765         /*
766          * Touch any software queue that has pending entries.
767          */
768         flush_busy_ctxs(hctx, &rq_list);
769
770         /*
771          * If we have previous entries on our dispatch list, grab them
772          * and stuff them at the front for more fair dispatch.
773          */
774         if (!list_empty_careful(&hctx->dispatch)) {
775                 spin_lock(&hctx->lock);
776                 if (!list_empty(&hctx->dispatch))
777                         list_splice_init(&hctx->dispatch, &rq_list);
778                 spin_unlock(&hctx->lock);
779         }
780
781         /*
782          * Start off with dptr being NULL, so we start the first request
783          * immediately, even if we have more pending.
784          */
785         dptr = NULL;
786
787         /*
788          * Now process all the entries, sending them to the driver.
789          */
790         queued = 0;
791         while (!list_empty(&rq_list)) {
792                 struct blk_mq_queue_data bd;
793                 int ret;
794
795                 rq = list_first_entry(&rq_list, struct request, queuelist);
796                 list_del_init(&rq->queuelist);
797
798                 bd.rq = rq;
799                 bd.list = dptr;
800                 bd.last = list_empty(&rq_list);
801
802                 ret = q->mq_ops->queue_rq(hctx, &bd);
803                 switch (ret) {
804                 case BLK_MQ_RQ_QUEUE_OK:
805                         queued++;
806                         continue;
807                 case BLK_MQ_RQ_QUEUE_BUSY:
808                         list_add(&rq->queuelist, &rq_list);
809                         __blk_mq_requeue_request(rq);
810                         break;
811                 default:
812                         pr_err("blk-mq: bad return on queue: %d\n", ret);
813                 case BLK_MQ_RQ_QUEUE_ERROR:
814                         rq->errors = -EIO;
815                         blk_mq_end_request(rq, rq->errors);
816                         break;
817                 }
818
819                 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
820                         break;
821
822                 /*
823                  * We've done the first request. If we have more than 1
824                  * left in the list, set dptr to defer issue.
825                  */
826                 if (!dptr && rq_list.next != rq_list.prev)
827                         dptr = &driver_list;
828         }
829
830         if (!queued)
831                 hctx->dispatched[0]++;
832         else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
833                 hctx->dispatched[ilog2(queued) + 1]++;
834
835         /*
836          * Any items that need requeuing? Stuff them into hctx->dispatch,
837          * that is where we will continue on next queue run.
838          */
839         if (!list_empty(&rq_list)) {
840                 spin_lock(&hctx->lock);
841                 list_splice(&rq_list, &hctx->dispatch);
842                 spin_unlock(&hctx->lock);
843                 /*
844                  * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
845                  * it's possible the queue is stopped and restarted again
846                  * before this. Queue restart will dispatch requests. And since
847                  * requests in rq_list aren't added into hctx->dispatch yet,
848                  * the requests in rq_list might get lost.
849                  *
850                  * blk_mq_run_hw_queue() already checks the STOPPED bit
851                  **/
852                 blk_mq_run_hw_queue(hctx, true);
853         }
854 }
855
856 /*
857  * It'd be great if the workqueue API had a way to pass
858  * in a mask and had some smarts for more clever placement.
859  * For now we just round-robin here, switching for every
860  * BLK_MQ_CPU_WORK_BATCH queued items.
861  */
862 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
863 {
864         if (hctx->queue->nr_hw_queues == 1)
865                 return WORK_CPU_UNBOUND;
866
867         if (--hctx->next_cpu_batch <= 0) {
868                 int cpu = hctx->next_cpu, next_cpu;
869
870                 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
871                 if (next_cpu >= nr_cpu_ids)
872                         next_cpu = cpumask_first(hctx->cpumask);
873
874                 hctx->next_cpu = next_cpu;
875                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
876
877                 return cpu;
878         }
879
880         return hctx->next_cpu;
881 }
882
883 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
884 {
885         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) ||
886             !blk_mq_hw_queue_mapped(hctx)))
887                 return;
888
889         if (!async) {
890                 int cpu = get_cpu();
891                 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
892                         __blk_mq_run_hw_queue(hctx);
893                         put_cpu();
894                         return;
895                 }
896
897                 put_cpu();
898         }
899
900         kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
901                         &hctx->run_work, 0);
902 }
903
904 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
905 {
906         struct blk_mq_hw_ctx *hctx;
907         int i;
908
909         queue_for_each_hw_ctx(q, hctx, i) {
910                 if ((!blk_mq_hctx_has_pending(hctx) &&
911                     list_empty_careful(&hctx->dispatch)) ||
912                     test_bit(BLK_MQ_S_STOPPED, &hctx->state))
913                         continue;
914
915                 blk_mq_run_hw_queue(hctx, async);
916         }
917 }
918 EXPORT_SYMBOL(blk_mq_run_hw_queues);
919
920 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
921 {
922         cancel_delayed_work(&hctx->run_work);
923         cancel_delayed_work(&hctx->delay_work);
924         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
925 }
926 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
927
928 void blk_mq_stop_hw_queues(struct request_queue *q)
929 {
930         struct blk_mq_hw_ctx *hctx;
931         int i;
932
933         queue_for_each_hw_ctx(q, hctx, i)
934                 blk_mq_stop_hw_queue(hctx);
935 }
936 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
937
938 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
939 {
940         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
941
942         blk_mq_run_hw_queue(hctx, false);
943 }
944 EXPORT_SYMBOL(blk_mq_start_hw_queue);
945
946 void blk_mq_start_hw_queues(struct request_queue *q)
947 {
948         struct blk_mq_hw_ctx *hctx;
949         int i;
950
951         queue_for_each_hw_ctx(q, hctx, i)
952                 blk_mq_start_hw_queue(hctx);
953 }
954 EXPORT_SYMBOL(blk_mq_start_hw_queues);
955
956 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
957 {
958         struct blk_mq_hw_ctx *hctx;
959         int i;
960
961         queue_for_each_hw_ctx(q, hctx, i) {
962                 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
963                         continue;
964
965                 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
966                 blk_mq_run_hw_queue(hctx, async);
967         }
968 }
969 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
970
971 static void blk_mq_run_work_fn(struct work_struct *work)
972 {
973         struct blk_mq_hw_ctx *hctx;
974
975         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
976
977         __blk_mq_run_hw_queue(hctx);
978 }
979
980 static void blk_mq_delay_work_fn(struct work_struct *work)
981 {
982         struct blk_mq_hw_ctx *hctx;
983
984         hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
985
986         if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
987                 __blk_mq_run_hw_queue(hctx);
988 }
989
990 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
991 {
992         if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
993                 return;
994
995         kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
996                         &hctx->delay_work, msecs_to_jiffies(msecs));
997 }
998 EXPORT_SYMBOL(blk_mq_delay_queue);
999
1000 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
1001                                     struct request *rq, bool at_head)
1002 {
1003         struct blk_mq_ctx *ctx = rq->mq_ctx;
1004
1005         trace_block_rq_insert(hctx->queue, rq);
1006
1007         if (at_head)
1008                 list_add(&rq->queuelist, &ctx->rq_list);
1009         else
1010                 list_add_tail(&rq->queuelist, &ctx->rq_list);
1011
1012         blk_mq_hctx_mark_pending(hctx, ctx);
1013 }
1014
1015 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
1016                 bool async)
1017 {
1018         struct request_queue *q = rq->q;
1019         struct blk_mq_hw_ctx *hctx;
1020         struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
1021
1022         current_ctx = blk_mq_get_ctx(q);
1023         if (!cpu_online(ctx->cpu))
1024                 rq->mq_ctx = ctx = current_ctx;
1025
1026         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1027
1028         spin_lock(&ctx->lock);
1029         __blk_mq_insert_request(hctx, rq, at_head);
1030         spin_unlock(&ctx->lock);
1031
1032         if (run_queue)
1033                 blk_mq_run_hw_queue(hctx, async);
1034
1035         blk_mq_put_ctx(current_ctx);
1036 }
1037
1038 static void blk_mq_insert_requests(struct request_queue *q,
1039                                      struct blk_mq_ctx *ctx,
1040                                      struct list_head *list,
1041                                      int depth,
1042                                      bool from_schedule)
1043
1044 {
1045         struct blk_mq_hw_ctx *hctx;
1046         struct blk_mq_ctx *current_ctx;
1047
1048         trace_block_unplug(q, depth, !from_schedule);
1049
1050         current_ctx = blk_mq_get_ctx(q);
1051
1052         if (!cpu_online(ctx->cpu))
1053                 ctx = current_ctx;
1054         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1055
1056         /*
1057          * preemption doesn't flush plug list, so it's possible ctx->cpu is
1058          * offline now
1059          */
1060         spin_lock(&ctx->lock);
1061         while (!list_empty(list)) {
1062                 struct request *rq;
1063
1064                 rq = list_first_entry(list, struct request, queuelist);
1065                 list_del_init(&rq->queuelist);
1066                 rq->mq_ctx = ctx;
1067                 __blk_mq_insert_request(hctx, rq, false);
1068         }
1069         spin_unlock(&ctx->lock);
1070
1071         blk_mq_run_hw_queue(hctx, from_schedule);
1072         blk_mq_put_ctx(current_ctx);
1073 }
1074
1075 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1076 {
1077         struct request *rqa = container_of(a, struct request, queuelist);
1078         struct request *rqb = container_of(b, struct request, queuelist);
1079
1080         return !(rqa->mq_ctx < rqb->mq_ctx ||
1081                  (rqa->mq_ctx == rqb->mq_ctx &&
1082                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1083 }
1084
1085 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1086 {
1087         struct blk_mq_ctx *this_ctx;
1088         struct request_queue *this_q;
1089         struct request *rq;
1090         LIST_HEAD(list);
1091         LIST_HEAD(ctx_list);
1092         unsigned int depth;
1093
1094         list_splice_init(&plug->mq_list, &list);
1095
1096         list_sort(NULL, &list, plug_ctx_cmp);
1097
1098         this_q = NULL;
1099         this_ctx = NULL;
1100         depth = 0;
1101
1102         while (!list_empty(&list)) {
1103                 rq = list_entry_rq(list.next);
1104                 list_del_init(&rq->queuelist);
1105                 BUG_ON(!rq->q);
1106                 if (rq->mq_ctx != this_ctx) {
1107                         if (this_ctx) {
1108                                 blk_mq_insert_requests(this_q, this_ctx,
1109                                                         &ctx_list, depth,
1110                                                         from_schedule);
1111                         }
1112
1113                         this_ctx = rq->mq_ctx;
1114                         this_q = rq->q;
1115                         depth = 0;
1116                 }
1117
1118                 depth++;
1119                 list_add_tail(&rq->queuelist, &ctx_list);
1120         }
1121
1122         /*
1123          * If 'this_ctx' is set, we know we have entries to complete
1124          * on 'ctx_list'. Do those.
1125          */
1126         if (this_ctx) {
1127                 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1128                                        from_schedule);
1129         }
1130 }
1131
1132 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1133 {
1134         init_request_from_bio(rq, bio);
1135
1136         if (blk_do_io_stat(rq))
1137                 blk_account_io_start(rq, 1);
1138 }
1139
1140 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1141 {
1142         return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1143                 !blk_queue_nomerges(hctx->queue);
1144 }
1145
1146 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1147                                          struct blk_mq_ctx *ctx,
1148                                          struct request *rq, struct bio *bio)
1149 {
1150         if (!hctx_allow_merges(hctx)) {
1151                 blk_mq_bio_to_request(rq, bio);
1152                 spin_lock(&ctx->lock);
1153 insert_rq:
1154                 __blk_mq_insert_request(hctx, rq, false);
1155                 spin_unlock(&ctx->lock);
1156                 return false;
1157         } else {
1158                 struct request_queue *q = hctx->queue;
1159
1160                 spin_lock(&ctx->lock);
1161                 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1162                         blk_mq_bio_to_request(rq, bio);
1163                         goto insert_rq;
1164                 }
1165
1166                 spin_unlock(&ctx->lock);
1167                 __blk_mq_free_request(hctx, ctx, rq);
1168                 return true;
1169         }
1170 }
1171
1172 struct blk_map_ctx {
1173         struct blk_mq_hw_ctx *hctx;
1174         struct blk_mq_ctx *ctx;
1175 };
1176
1177 static struct request *blk_mq_map_request(struct request_queue *q,
1178                                           struct bio *bio,
1179                                           struct blk_map_ctx *data)
1180 {
1181         struct blk_mq_hw_ctx *hctx;
1182         struct blk_mq_ctx *ctx;
1183         struct request *rq;
1184         int rw = bio_data_dir(bio);
1185         struct blk_mq_alloc_data alloc_data;
1186
1187         if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
1188                 bio_io_error(bio);
1189                 return NULL;
1190         }
1191
1192         ctx = blk_mq_get_ctx(q);
1193         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1194
1195         if (rw_is_sync(bio->bi_rw))
1196                 rw |= REQ_SYNC;
1197
1198         trace_block_getrq(q, bio, rw);
1199         blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1200                         hctx);
1201         rq = __blk_mq_alloc_request(&alloc_data, rw);
1202         if (unlikely(!rq)) {
1203                 __blk_mq_run_hw_queue(hctx);
1204                 blk_mq_put_ctx(ctx);
1205                 trace_block_sleeprq(q, bio, rw);
1206
1207                 ctx = blk_mq_get_ctx(q);
1208                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1209                 blk_mq_set_alloc_data(&alloc_data, q,
1210                                 __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1211                 rq = __blk_mq_alloc_request(&alloc_data, rw);
1212                 ctx = alloc_data.ctx;
1213                 hctx = alloc_data.hctx;
1214         }
1215
1216         hctx->queued++;
1217         data->hctx = hctx;
1218         data->ctx = ctx;
1219         return rq;
1220 }
1221
1222 static int blk_mq_direct_issue_request(struct request *rq)
1223 {
1224         int ret;
1225         struct request_queue *q = rq->q;
1226         struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q,
1227                         rq->mq_ctx->cpu);
1228         struct blk_mq_queue_data bd = {
1229                 .rq = rq,
1230                 .list = NULL,
1231                 .last = 1
1232         };
1233
1234         /*
1235          * For OK queue, we are done. For error, kill it. Any other
1236          * error (busy), just add it to our list as we previously
1237          * would have done
1238          */
1239         ret = q->mq_ops->queue_rq(hctx, &bd);
1240         if (ret == BLK_MQ_RQ_QUEUE_OK)
1241                 return 0;
1242         else {
1243                 __blk_mq_requeue_request(rq);
1244
1245                 if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1246                         rq->errors = -EIO;
1247                         blk_mq_end_request(rq, rq->errors);
1248                         return 0;
1249                 }
1250                 return -1;
1251         }
1252 }
1253
1254 /*
1255  * Multiple hardware queue variant. This will not use per-process plugs,
1256  * but will attempt to bypass the hctx queueing if we can go straight to
1257  * hardware for SYNC IO.
1258  */
1259 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1260 {
1261         const int is_sync = rw_is_sync(bio->bi_rw);
1262         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1263         struct blk_map_ctx data;
1264         struct request *rq;
1265         unsigned int request_count = 0;
1266         struct blk_plug *plug;
1267         struct request *same_queue_rq = NULL;
1268
1269         blk_queue_bounce(q, &bio);
1270
1271         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1272                 bio_io_error(bio);
1273                 return;
1274         }
1275
1276         blk_queue_split(q, &bio, q->bio_split);
1277
1278         if (!is_flush_fua && !blk_queue_nomerges(q) &&
1279             blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1280                 return;
1281
1282         rq = blk_mq_map_request(q, bio, &data);
1283         if (unlikely(!rq))
1284                 return;
1285
1286         if (unlikely(is_flush_fua)) {
1287                 blk_mq_bio_to_request(rq, bio);
1288                 blk_insert_flush(rq);
1289                 goto run_queue;
1290         }
1291
1292         plug = current->plug;
1293         /*
1294          * If the driver supports defer issued based on 'last', then
1295          * queue it up like normal since we can potentially save some
1296          * CPU this way.
1297          */
1298         if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
1299             !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1300                 struct request *old_rq = NULL;
1301
1302                 blk_mq_bio_to_request(rq, bio);
1303
1304                 /*
1305                  * we do limited pluging. If bio can be merged, do merge.
1306                  * Otherwise the existing request in the plug list will be
1307                  * issued. So the plug list will have one request at most
1308                  */
1309                 if (plug) {
1310                         /*
1311                          * The plug list might get flushed before this. If that
1312                          * happens, same_queue_rq is invalid and plug list is empty
1313                          **/
1314                         if (same_queue_rq && !list_empty(&plug->mq_list)) {
1315                                 old_rq = same_queue_rq;
1316                                 list_del_init(&old_rq->queuelist);
1317                         }
1318                         list_add_tail(&rq->queuelist, &plug->mq_list);
1319                 } else /* is_sync */
1320                         old_rq = rq;
1321                 blk_mq_put_ctx(data.ctx);
1322                 if (!old_rq)
1323                         return;
1324                 if (!blk_mq_direct_issue_request(old_rq))
1325                         return;
1326                 blk_mq_insert_request(old_rq, false, true, true);
1327                 return;
1328         }
1329
1330         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1331                 /*
1332                  * For a SYNC request, send it to the hardware immediately. For
1333                  * an ASYNC request, just ensure that we run it later on. The
1334                  * latter allows for merging opportunities and more efficient
1335                  * dispatching.
1336                  */
1337 run_queue:
1338                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1339         }
1340         blk_mq_put_ctx(data.ctx);
1341 }
1342
1343 /*
1344  * Single hardware queue variant. This will attempt to use any per-process
1345  * plug for merging and IO deferral.
1346  */
1347 static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1348 {
1349         const int is_sync = rw_is_sync(bio->bi_rw);
1350         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1351         struct blk_plug *plug;
1352         unsigned int request_count = 0;
1353         struct blk_map_ctx data;
1354         struct request *rq;
1355
1356         blk_queue_bounce(q, &bio);
1357
1358         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1359                 bio_io_error(bio);
1360                 return;
1361         }
1362
1363         blk_queue_split(q, &bio, q->bio_split);
1364
1365         if (!is_flush_fua && !blk_queue_nomerges(q) &&
1366             blk_attempt_plug_merge(q, bio, &request_count, NULL))
1367                 return;
1368
1369         rq = blk_mq_map_request(q, bio, &data);
1370         if (unlikely(!rq))
1371                 return;
1372
1373         if (unlikely(is_flush_fua)) {
1374                 blk_mq_bio_to_request(rq, bio);
1375                 blk_insert_flush(rq);
1376                 goto run_queue;
1377         }
1378
1379         /*
1380          * A task plug currently exists. Since this is completely lockless,
1381          * utilize that to temporarily store requests until the task is
1382          * either done or scheduled away.
1383          */
1384         plug = current->plug;
1385         if (plug) {
1386                 blk_mq_bio_to_request(rq, bio);
1387                 if (list_empty(&plug->mq_list))
1388                         trace_block_plug(q);
1389                 else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1390                         blk_flush_plug_list(plug, false);
1391                         trace_block_plug(q);
1392                 }
1393                 list_add_tail(&rq->queuelist, &plug->mq_list);
1394                 blk_mq_put_ctx(data.ctx);
1395                 return;
1396         }
1397
1398         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1399                 /*
1400                  * For a SYNC request, send it to the hardware immediately. For
1401                  * an ASYNC request, just ensure that we run it later on. The
1402                  * latter allows for merging opportunities and more efficient
1403                  * dispatching.
1404                  */
1405 run_queue:
1406                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1407         }
1408
1409         blk_mq_put_ctx(data.ctx);
1410 }
1411
1412 /*
1413  * Default mapping to a software queue, since we use one per CPU.
1414  */
1415 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1416 {
1417         return q->queue_hw_ctx[q->mq_map[cpu]];
1418 }
1419 EXPORT_SYMBOL(blk_mq_map_queue);
1420
1421 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1422                 struct blk_mq_tags *tags, unsigned int hctx_idx)
1423 {
1424         struct page *page;
1425
1426         if (tags->rqs && set->ops->exit_request) {
1427                 int i;
1428
1429                 for (i = 0; i < tags->nr_tags; i++) {
1430                         if (!tags->rqs[i])
1431                                 continue;
1432                         set->ops->exit_request(set->driver_data, tags->rqs[i],
1433                                                 hctx_idx, i);
1434                         tags->rqs[i] = NULL;
1435                 }
1436         }
1437
1438         while (!list_empty(&tags->page_list)) {
1439                 page = list_first_entry(&tags->page_list, struct page, lru);
1440                 list_del_init(&page->lru);
1441                 __free_pages(page, page->private);
1442         }
1443
1444         kfree(tags->rqs);
1445
1446         blk_mq_free_tags(tags);
1447 }
1448
1449 static size_t order_to_size(unsigned int order)
1450 {
1451         return (size_t)PAGE_SIZE << order;
1452 }
1453
1454 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1455                 unsigned int hctx_idx)
1456 {
1457         struct blk_mq_tags *tags;
1458         unsigned int i, j, entries_per_page, max_order = 4;
1459         size_t rq_size, left;
1460
1461         tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1462                                 set->numa_node,
1463                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1464         if (!tags)
1465                 return NULL;
1466
1467         INIT_LIST_HEAD(&tags->page_list);
1468
1469         tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1470                                  GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1471                                  set->numa_node);
1472         if (!tags->rqs) {
1473                 blk_mq_free_tags(tags);
1474                 return NULL;
1475         }
1476
1477         /*
1478          * rq_size is the size of the request plus driver payload, rounded
1479          * to the cacheline size
1480          */
1481         rq_size = round_up(sizeof(struct request) + set->cmd_size,
1482                                 cache_line_size());
1483         left = rq_size * set->queue_depth;
1484
1485         for (i = 0; i < set->queue_depth; ) {
1486                 int this_order = max_order;
1487                 struct page *page;
1488                 int to_do;
1489                 void *p;
1490
1491                 while (left < order_to_size(this_order - 1) && this_order)
1492                         this_order--;
1493
1494                 do {
1495                         page = alloc_pages_node(set->numa_node,
1496                                 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
1497                                 this_order);
1498                         if (page)
1499                                 break;
1500                         if (!this_order--)
1501                                 break;
1502                         if (order_to_size(this_order) < rq_size)
1503                                 break;
1504                 } while (1);
1505
1506                 if (!page)
1507                         goto fail;
1508
1509                 page->private = this_order;
1510                 list_add_tail(&page->lru, &tags->page_list);
1511
1512                 p = page_address(page);
1513                 entries_per_page = order_to_size(this_order) / rq_size;
1514                 to_do = min(entries_per_page, set->queue_depth - i);
1515                 left -= to_do * rq_size;
1516                 for (j = 0; j < to_do; j++) {
1517                         tags->rqs[i] = p;
1518                         if (set->ops->init_request) {
1519                                 if (set->ops->init_request(set->driver_data,
1520                                                 tags->rqs[i], hctx_idx, i,
1521                                                 set->numa_node)) {
1522                                         tags->rqs[i] = NULL;
1523                                         goto fail;
1524                                 }
1525                         }
1526
1527                         p += rq_size;
1528                         i++;
1529                 }
1530         }
1531         return tags;
1532
1533 fail:
1534         blk_mq_free_rq_map(set, tags, hctx_idx);
1535         return NULL;
1536 }
1537
1538 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1539 {
1540         kfree(bitmap->map);
1541 }
1542
1543 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1544 {
1545         unsigned int bpw = 8, total, num_maps, i;
1546
1547         bitmap->bits_per_word = bpw;
1548
1549         num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1550         bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1551                                         GFP_KERNEL, node);
1552         if (!bitmap->map)
1553                 return -ENOMEM;
1554
1555         total = nr_cpu_ids;
1556         for (i = 0; i < num_maps; i++) {
1557                 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1558                 total -= bitmap->map[i].depth;
1559         }
1560
1561         return 0;
1562 }
1563
1564 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1565 {
1566         struct request_queue *q = hctx->queue;
1567         struct blk_mq_ctx *ctx;
1568         LIST_HEAD(tmp);
1569
1570         /*
1571          * Move ctx entries to new CPU, if this one is going away.
1572          */
1573         ctx = __blk_mq_get_ctx(q, cpu);
1574
1575         spin_lock(&ctx->lock);
1576         if (!list_empty(&ctx->rq_list)) {
1577                 list_splice_init(&ctx->rq_list, &tmp);
1578                 blk_mq_hctx_clear_pending(hctx, ctx);
1579         }
1580         spin_unlock(&ctx->lock);
1581
1582         if (list_empty(&tmp))
1583                 return NOTIFY_OK;
1584
1585         ctx = blk_mq_get_ctx(q);
1586         spin_lock(&ctx->lock);
1587
1588         while (!list_empty(&tmp)) {
1589                 struct request *rq;
1590
1591                 rq = list_first_entry(&tmp, struct request, queuelist);
1592                 rq->mq_ctx = ctx;
1593                 list_move_tail(&rq->queuelist, &ctx->rq_list);
1594         }
1595
1596         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1597         blk_mq_hctx_mark_pending(hctx, ctx);
1598
1599         spin_unlock(&ctx->lock);
1600
1601         blk_mq_run_hw_queue(hctx, true);
1602         blk_mq_put_ctx(ctx);
1603         return NOTIFY_OK;
1604 }
1605
1606 static int blk_mq_hctx_notify(void *data, unsigned long action,
1607                               unsigned int cpu)
1608 {
1609         struct blk_mq_hw_ctx *hctx = data;
1610
1611         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1612                 return blk_mq_hctx_cpu_offline(hctx, cpu);
1613
1614         /*
1615          * In case of CPU online, tags may be reallocated
1616          * in blk_mq_map_swqueue() after mapping is updated.
1617          */
1618
1619         return NOTIFY_OK;
1620 }
1621
1622 /* hctx->ctxs will be freed in queue's release handler */
1623 static void blk_mq_exit_hctx(struct request_queue *q,
1624                 struct blk_mq_tag_set *set,
1625                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1626 {
1627         unsigned flush_start_tag = set->queue_depth;
1628
1629         blk_mq_tag_idle(hctx);
1630
1631         if (set->ops->exit_request)
1632                 set->ops->exit_request(set->driver_data,
1633                                        hctx->fq->flush_rq, hctx_idx,
1634                                        flush_start_tag + hctx_idx);
1635
1636         if (set->ops->exit_hctx)
1637                 set->ops->exit_hctx(hctx, hctx_idx);
1638
1639         blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1640         blk_free_flush_queue(hctx->fq);
1641         blk_mq_free_bitmap(&hctx->ctx_map);
1642 }
1643
1644 static void blk_mq_exit_hw_queues(struct request_queue *q,
1645                 struct blk_mq_tag_set *set, int nr_queue)
1646 {
1647         struct blk_mq_hw_ctx *hctx;
1648         unsigned int i;
1649
1650         queue_for_each_hw_ctx(q, hctx, i) {
1651                 if (i == nr_queue)
1652                         break;
1653                 blk_mq_exit_hctx(q, set, hctx, i);
1654         }
1655 }
1656
1657 static void blk_mq_free_hw_queues(struct request_queue *q,
1658                 struct blk_mq_tag_set *set)
1659 {
1660         struct blk_mq_hw_ctx *hctx;
1661         unsigned int i;
1662
1663         queue_for_each_hw_ctx(q, hctx, i)
1664                 free_cpumask_var(hctx->cpumask);
1665 }
1666
1667 static int blk_mq_init_hctx(struct request_queue *q,
1668                 struct blk_mq_tag_set *set,
1669                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1670 {
1671         int node;
1672         unsigned flush_start_tag = set->queue_depth;
1673
1674         node = hctx->numa_node;
1675         if (node == NUMA_NO_NODE)
1676                 node = hctx->numa_node = set->numa_node;
1677
1678         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1679         INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1680         spin_lock_init(&hctx->lock);
1681         INIT_LIST_HEAD(&hctx->dispatch);
1682         hctx->queue = q;
1683         hctx->queue_num = hctx_idx;
1684         hctx->flags = set->flags;
1685
1686         blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1687                                         blk_mq_hctx_notify, hctx);
1688         blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1689
1690         hctx->tags = set->tags[hctx_idx];
1691
1692         /*
1693          * Allocate space for all possible cpus to avoid allocation at
1694          * runtime
1695          */
1696         hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1697                                         GFP_KERNEL, node);
1698         if (!hctx->ctxs)
1699                 goto unregister_cpu_notifier;
1700
1701         if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1702                 goto free_ctxs;
1703
1704         hctx->nr_ctx = 0;
1705
1706         if (set->ops->init_hctx &&
1707             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1708                 goto free_bitmap;
1709
1710         hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1711         if (!hctx->fq)
1712                 goto exit_hctx;
1713
1714         if (set->ops->init_request &&
1715             set->ops->init_request(set->driver_data,
1716                                    hctx->fq->flush_rq, hctx_idx,
1717                                    flush_start_tag + hctx_idx, node))
1718                 goto free_fq;
1719
1720         return 0;
1721
1722  free_fq:
1723         kfree(hctx->fq);
1724  exit_hctx:
1725         if (set->ops->exit_hctx)
1726                 set->ops->exit_hctx(hctx, hctx_idx);
1727  free_bitmap:
1728         blk_mq_free_bitmap(&hctx->ctx_map);
1729  free_ctxs:
1730         kfree(hctx->ctxs);
1731  unregister_cpu_notifier:
1732         blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1733
1734         return -1;
1735 }
1736
1737 static int blk_mq_init_hw_queues(struct request_queue *q,
1738                 struct blk_mq_tag_set *set)
1739 {
1740         struct blk_mq_hw_ctx *hctx;
1741         unsigned int i;
1742
1743         /*
1744          * Initialize hardware queues
1745          */
1746         queue_for_each_hw_ctx(q, hctx, i) {
1747                 if (blk_mq_init_hctx(q, set, hctx, i))
1748                         break;
1749         }
1750
1751         if (i == q->nr_hw_queues)
1752                 return 0;
1753
1754         /*
1755          * Init failed
1756          */
1757         blk_mq_exit_hw_queues(q, set, i);
1758
1759         return 1;
1760 }
1761
1762 static void blk_mq_init_cpu_queues(struct request_queue *q,
1763                                    unsigned int nr_hw_queues)
1764 {
1765         unsigned int i;
1766
1767         for_each_possible_cpu(i) {
1768                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1769                 struct blk_mq_hw_ctx *hctx;
1770
1771                 memset(__ctx, 0, sizeof(*__ctx));
1772                 __ctx->cpu = i;
1773                 spin_lock_init(&__ctx->lock);
1774                 INIT_LIST_HEAD(&__ctx->rq_list);
1775                 __ctx->queue = q;
1776
1777                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1778                 if (!cpu_online(i))
1779                         continue;
1780
1781                 hctx = q->mq_ops->map_queue(q, i);
1782
1783                 /*
1784                  * Set local node, IFF we have more than one hw queue. If
1785                  * not, we remain on the home node of the device
1786                  */
1787                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1788                         hctx->numa_node = cpu_to_node(i);
1789         }
1790 }
1791
1792 static void blk_mq_map_swqueue(struct request_queue *q)
1793 {
1794         unsigned int i;
1795         struct blk_mq_hw_ctx *hctx;
1796         struct blk_mq_ctx *ctx;
1797         struct blk_mq_tag_set *set = q->tag_set;
1798
1799         queue_for_each_hw_ctx(q, hctx, i) {
1800                 cpumask_clear(hctx->cpumask);
1801                 hctx->nr_ctx = 0;
1802         }
1803
1804         /*
1805          * Map software to hardware queues
1806          */
1807         queue_for_each_ctx(q, ctx, i) {
1808                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1809                 if (!cpu_online(i))
1810                         continue;
1811
1812                 hctx = q->mq_ops->map_queue(q, i);
1813                 cpumask_set_cpu(i, hctx->cpumask);
1814                 cpumask_set_cpu(i, hctx->tags->cpumask);
1815                 ctx->index_hw = hctx->nr_ctx;
1816                 hctx->ctxs[hctx->nr_ctx++] = ctx;
1817         }
1818
1819         queue_for_each_hw_ctx(q, hctx, i) {
1820                 struct blk_mq_ctxmap *map = &hctx->ctx_map;
1821
1822                 /*
1823                  * If no software queues are mapped to this hardware queue,
1824                  * disable it and free the request entries.
1825                  */
1826                 if (!hctx->nr_ctx) {
1827                         if (set->tags[i]) {
1828                                 blk_mq_free_rq_map(set, set->tags[i], i);
1829                                 set->tags[i] = NULL;
1830                         }
1831                         hctx->tags = NULL;
1832                         continue;
1833                 }
1834
1835                 /* unmapped hw queue can be remapped after CPU topo changed */
1836                 if (!set->tags[i])
1837                         set->tags[i] = blk_mq_init_rq_map(set, i);
1838                 hctx->tags = set->tags[i];
1839                 WARN_ON(!hctx->tags);
1840
1841                 /*
1842                  * Set the map size to the number of mapped software queues.
1843                  * This is more accurate and more efficient than looping
1844                  * over all possibly mapped software queues.
1845                  */
1846                 map->size = DIV_ROUND_UP(hctx->nr_ctx, map->bits_per_word);
1847
1848                 /*
1849                  * Initialize batch roundrobin counts
1850                  */
1851                 hctx->next_cpu = cpumask_first(hctx->cpumask);
1852                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1853         }
1854 }
1855
1856 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1857 {
1858         struct blk_mq_hw_ctx *hctx;
1859         struct request_queue *q;
1860         bool shared;
1861         int i;
1862
1863         if (set->tag_list.next == set->tag_list.prev)
1864                 shared = false;
1865         else
1866                 shared = true;
1867
1868         list_for_each_entry(q, &set->tag_list, tag_set_list) {
1869                 blk_mq_freeze_queue(q);
1870
1871                 queue_for_each_hw_ctx(q, hctx, i) {
1872                         if (shared)
1873                                 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1874                         else
1875                                 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1876                 }
1877                 blk_mq_unfreeze_queue(q);
1878         }
1879 }
1880
1881 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1882 {
1883         struct blk_mq_tag_set *set = q->tag_set;
1884
1885         mutex_lock(&set->tag_list_lock);
1886         list_del_init(&q->tag_set_list);
1887         blk_mq_update_tag_set_depth(set);
1888         mutex_unlock(&set->tag_list_lock);
1889 }
1890
1891 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1892                                      struct request_queue *q)
1893 {
1894         q->tag_set = set;
1895
1896         mutex_lock(&set->tag_list_lock);
1897         list_add_tail(&q->tag_set_list, &set->tag_list);
1898         blk_mq_update_tag_set_depth(set);
1899         mutex_unlock(&set->tag_list_lock);
1900 }
1901
1902 /*
1903  * It is the actual release handler for mq, but we do it from
1904  * request queue's release handler for avoiding use-after-free
1905  * and headache because q->mq_kobj shouldn't have been introduced,
1906  * but we can't group ctx/kctx kobj without it.
1907  */
1908 void blk_mq_release(struct request_queue *q)
1909 {
1910         struct blk_mq_hw_ctx *hctx;
1911         unsigned int i;
1912
1913         /* hctx kobj stays in hctx */
1914         queue_for_each_hw_ctx(q, hctx, i) {
1915                 if (!hctx)
1916                         continue;
1917                 kfree(hctx->ctxs);
1918                 kfree(hctx);
1919         }
1920
1921         kfree(q->queue_hw_ctx);
1922
1923         /* ctx kobj stays in queue_ctx */
1924         free_percpu(q->queue_ctx);
1925 }
1926
1927 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1928 {
1929         struct request_queue *uninit_q, *q;
1930
1931         uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1932         if (!uninit_q)
1933                 return ERR_PTR(-ENOMEM);
1934
1935         q = blk_mq_init_allocated_queue(set, uninit_q);
1936         if (IS_ERR(q))
1937                 blk_cleanup_queue(uninit_q);
1938
1939         return q;
1940 }
1941 EXPORT_SYMBOL(blk_mq_init_queue);
1942
1943 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
1944                                                   struct request_queue *q)
1945 {
1946         struct blk_mq_hw_ctx **hctxs;
1947         struct blk_mq_ctx __percpu *ctx;
1948         unsigned int *map;
1949         int i;
1950
1951         ctx = alloc_percpu(struct blk_mq_ctx);
1952         if (!ctx)
1953                 return ERR_PTR(-ENOMEM);
1954
1955         hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1956                         set->numa_node);
1957
1958         if (!hctxs)
1959                 goto err_percpu;
1960
1961         map = blk_mq_make_queue_map(set);
1962         if (!map)
1963                 goto err_map;
1964
1965         for (i = 0; i < set->nr_hw_queues; i++) {
1966                 int node = blk_mq_hw_queue_to_node(map, i);
1967
1968                 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1969                                         GFP_KERNEL, node);
1970                 if (!hctxs[i])
1971                         goto err_hctxs;
1972
1973                 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
1974                                                 node))
1975                         goto err_hctxs;
1976
1977                 atomic_set(&hctxs[i]->nr_active, 0);
1978                 hctxs[i]->numa_node = node;
1979                 hctxs[i]->queue_num = i;
1980         }
1981
1982         /*
1983          * Init percpu_ref in atomic mode so that it's faster to shutdown.
1984          * See blk_register_queue() for details.
1985          */
1986         if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
1987                             PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
1988                 goto err_hctxs;
1989
1990         setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1991         blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
1992
1993         q->nr_queues = nr_cpu_ids;
1994         q->nr_hw_queues = set->nr_hw_queues;
1995         q->mq_map = map;
1996
1997         q->queue_ctx = ctx;
1998         q->queue_hw_ctx = hctxs;
1999
2000         q->mq_ops = set->ops;
2001         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2002
2003         if (!(set->flags & BLK_MQ_F_SG_MERGE))
2004                 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2005
2006         q->sg_reserved_size = INT_MAX;
2007
2008         INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
2009         INIT_LIST_HEAD(&q->requeue_list);
2010         spin_lock_init(&q->requeue_lock);
2011
2012         if (q->nr_hw_queues > 1)
2013                 blk_queue_make_request(q, blk_mq_make_request);
2014         else
2015                 blk_queue_make_request(q, blk_sq_make_request);
2016
2017         /*
2018          * Do this after blk_queue_make_request() overrides it...
2019          */
2020         q->nr_requests = set->queue_depth;
2021
2022         if (set->ops->complete)
2023                 blk_queue_softirq_done(q, set->ops->complete);
2024
2025         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2026
2027         if (blk_mq_init_hw_queues(q, set))
2028                 goto err_hctxs;
2029
2030         mutex_lock(&all_q_mutex);
2031         list_add_tail(&q->all_q_node, &all_q_list);
2032         mutex_unlock(&all_q_mutex);
2033
2034         blk_mq_add_queue_tag_set(set, q);
2035
2036         blk_mq_map_swqueue(q);
2037
2038         return q;
2039
2040 err_hctxs:
2041         kfree(map);
2042         for (i = 0; i < set->nr_hw_queues; i++) {
2043                 if (!hctxs[i])
2044                         break;
2045                 free_cpumask_var(hctxs[i]->cpumask);
2046                 kfree(hctxs[i]);
2047         }
2048 err_map:
2049         kfree(hctxs);
2050 err_percpu:
2051         free_percpu(ctx);
2052         return ERR_PTR(-ENOMEM);
2053 }
2054 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2055
2056 void blk_mq_free_queue(struct request_queue *q)
2057 {
2058         struct blk_mq_tag_set   *set = q->tag_set;
2059
2060         blk_mq_del_queue_tag_set(q);
2061
2062         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2063         blk_mq_free_hw_queues(q, set);
2064
2065         percpu_ref_exit(&q->mq_usage_counter);
2066
2067         kfree(q->mq_map);
2068
2069         q->mq_map = NULL;
2070
2071         mutex_lock(&all_q_mutex);
2072         list_del_init(&q->all_q_node);
2073         mutex_unlock(&all_q_mutex);
2074 }
2075
2076 /* Basically redo blk_mq_init_queue with queue frozen */
2077 static void blk_mq_queue_reinit(struct request_queue *q)
2078 {
2079         WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2080
2081         blk_mq_sysfs_unregister(q);
2082
2083         blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
2084
2085         /*
2086          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2087          * we should change hctx numa_node according to new topology (this
2088          * involves free and re-allocate memory, worthy doing?)
2089          */
2090
2091         blk_mq_map_swqueue(q);
2092
2093         blk_mq_sysfs_register(q);
2094 }
2095
2096 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
2097                                       unsigned long action, void *hcpu)
2098 {
2099         struct request_queue *q;
2100
2101         /*
2102          * Before new mappings are established, hotadded cpu might already
2103          * start handling requests. This doesn't break anything as we map
2104          * offline CPUs to first hardware queue. We will re-init the queue
2105          * below to get optimal settings.
2106          */
2107         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
2108             action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
2109                 return NOTIFY_OK;
2110
2111         mutex_lock(&all_q_mutex);
2112
2113         /*
2114          * We need to freeze and reinit all existing queues.  Freezing
2115          * involves synchronous wait for an RCU grace period and doing it
2116          * one by one may take a long time.  Start freezing all queues in
2117          * one swoop and then wait for the completions so that freezing can
2118          * take place in parallel.
2119          */
2120         list_for_each_entry(q, &all_q_list, all_q_node)
2121                 blk_mq_freeze_queue_start(q);
2122         list_for_each_entry(q, &all_q_list, all_q_node) {
2123                 blk_mq_freeze_queue_wait(q);
2124
2125                 /*
2126                  * timeout handler can't touch hw queue during the
2127                  * reinitialization
2128                  */
2129                 del_timer_sync(&q->timeout);
2130         }
2131
2132         list_for_each_entry(q, &all_q_list, all_q_node)
2133                 blk_mq_queue_reinit(q);
2134
2135         list_for_each_entry(q, &all_q_list, all_q_node)
2136                 blk_mq_unfreeze_queue(q);
2137
2138         mutex_unlock(&all_q_mutex);
2139         return NOTIFY_OK;
2140 }
2141
2142 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2143 {
2144         int i;
2145
2146         for (i = 0; i < set->nr_hw_queues; i++) {
2147                 set->tags[i] = blk_mq_init_rq_map(set, i);
2148                 if (!set->tags[i])
2149                         goto out_unwind;
2150         }
2151
2152         return 0;
2153
2154 out_unwind:
2155         while (--i >= 0)
2156                 blk_mq_free_rq_map(set, set->tags[i], i);
2157
2158         return -ENOMEM;
2159 }
2160
2161 /*
2162  * Allocate the request maps associated with this tag_set. Note that this
2163  * may reduce the depth asked for, if memory is tight. set->queue_depth
2164  * will be updated to reflect the allocated depth.
2165  */
2166 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2167 {
2168         unsigned int depth;
2169         int err;
2170
2171         depth = set->queue_depth;
2172         do {
2173                 err = __blk_mq_alloc_rq_maps(set);
2174                 if (!err)
2175                         break;
2176
2177                 set->queue_depth >>= 1;
2178                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2179                         err = -ENOMEM;
2180                         break;
2181                 }
2182         } while (set->queue_depth);
2183
2184         if (!set->queue_depth || err) {
2185                 pr_err("blk-mq: failed to allocate request map\n");
2186                 return -ENOMEM;
2187         }
2188
2189         if (depth != set->queue_depth)
2190                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2191                                                 depth, set->queue_depth);
2192
2193         return 0;
2194 }
2195
2196 struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags)
2197 {
2198         return tags->cpumask;
2199 }
2200 EXPORT_SYMBOL_GPL(blk_mq_tags_cpumask);
2201
2202 /*
2203  * Alloc a tag set to be associated with one or more request queues.
2204  * May fail with EINVAL for various error conditions. May adjust the
2205  * requested depth down, if if it too large. In that case, the set
2206  * value will be stored in set->queue_depth.
2207  */
2208 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2209 {
2210         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2211
2212         if (!set->nr_hw_queues)
2213                 return -EINVAL;
2214         if (!set->queue_depth)
2215                 return -EINVAL;
2216         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2217                 return -EINVAL;
2218
2219         if (!set->ops->queue_rq || !set->ops->map_queue)
2220                 return -EINVAL;
2221
2222         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2223                 pr_info("blk-mq: reduced tag depth to %u\n",
2224                         BLK_MQ_MAX_DEPTH);
2225                 set->queue_depth = BLK_MQ_MAX_DEPTH;
2226         }
2227
2228         /*
2229          * If a crashdump is active, then we are potentially in a very
2230          * memory constrained environment. Limit us to 1 queue and
2231          * 64 tags to prevent using too much memory.
2232          */
2233         if (is_kdump_kernel()) {
2234                 set->nr_hw_queues = 1;
2235                 set->queue_depth = min(64U, set->queue_depth);
2236         }
2237
2238         set->tags = kmalloc_node(set->nr_hw_queues *
2239                                  sizeof(struct blk_mq_tags *),
2240                                  GFP_KERNEL, set->numa_node);
2241         if (!set->tags)
2242                 return -ENOMEM;
2243
2244         if (blk_mq_alloc_rq_maps(set))
2245                 goto enomem;
2246
2247         mutex_init(&set->tag_list_lock);
2248         INIT_LIST_HEAD(&set->tag_list);
2249
2250         return 0;
2251 enomem:
2252         kfree(set->tags);
2253         set->tags = NULL;
2254         return -ENOMEM;
2255 }
2256 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2257
2258 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2259 {
2260         int i;
2261
2262         for (i = 0; i < set->nr_hw_queues; i++) {
2263                 if (set->tags[i]) {
2264                         blk_mq_free_rq_map(set, set->tags[i], i);
2265                         free_cpumask_var(set->tags[i]->cpumask);
2266                 }
2267         }
2268
2269         kfree(set->tags);
2270         set->tags = NULL;
2271 }
2272 EXPORT_SYMBOL(blk_mq_free_tag_set);
2273
2274 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2275 {
2276         struct blk_mq_tag_set *set = q->tag_set;
2277         struct blk_mq_hw_ctx *hctx;
2278         int i, ret;
2279
2280         if (!set || nr > set->queue_depth)
2281                 return -EINVAL;
2282
2283         ret = 0;
2284         queue_for_each_hw_ctx(q, hctx, i) {
2285                 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2286                 if (ret)
2287                         break;
2288         }
2289
2290         if (!ret)
2291                 q->nr_requests = nr;
2292
2293         return ret;
2294 }
2295
2296 void blk_mq_disable_hotplug(void)
2297 {
2298         mutex_lock(&all_q_mutex);
2299 }
2300
2301 void blk_mq_enable_hotplug(void)
2302 {
2303         mutex_unlock(&all_q_mutex);
2304 }
2305
2306 static int __init blk_mq_init(void)
2307 {
2308         blk_mq_cpu_init();
2309
2310         hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
2311
2312         return 0;
2313 }
2314 subsys_initcall(blk_mq_init);