]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - block/blk-mq.c
blk-mq: include errors in did_work calculation
[karo-tx-linux.git] / block / blk-mq.c
index 5a1ff4894285aacb40c50a9349196217c5462796..e6ac774b0e41a884428faef164571ebe22280f76 100644 (file)
@@ -68,7 +68,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
        sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
 }
 
-void blk_mq_freeze_queue_start(struct request_queue *q)
+void blk_freeze_queue_start(struct request_queue *q)
 {
        int freeze_depth;
 
@@ -78,7 +78,7 @@ void blk_mq_freeze_queue_start(struct request_queue *q)
                blk_mq_run_hw_queues(q, false);
        }
 }
-EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
+EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
 
 void blk_mq_freeze_queue_wait(struct request_queue *q)
 {
@@ -108,7 +108,7 @@ void blk_freeze_queue(struct request_queue *q)
         * no blk_unfreeze_queue(), and blk_freeze_queue() is not
         * exported to drivers as the only user for unfreeze is blk_mq.
         */
-       blk_mq_freeze_queue_start(q);
+       blk_freeze_queue_start(q);
        blk_mq_freeze_queue_wait(q);
 }
 
@@ -488,7 +488,7 @@ void blk_mq_start_request(struct request *rq)
        trace_block_rq_issue(q, rq);
 
        if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
-               blk_stat_set_issue_time(&rq->issue_stat);
+               blk_stat_set_issue(&rq->issue_stat, blk_rq_sectors(rq));
                rq->rq_flags |= RQF_STATS;
                wbt_issue(q->rq_wb, &rq->issue_stat);
        }
@@ -523,6 +523,15 @@ void blk_mq_start_request(struct request *rq)
 }
 EXPORT_SYMBOL(blk_mq_start_request);
 
+/*
+ * When we reach here because queue is busy, REQ_ATOM_COMPLETE
+ * flag isn't set yet, so there may be race with timeout handler,
+ * but given rq->deadline is just set in .queue_rq() under
+ * this situation, the race won't be possible in reality because
+ * rq->timeout should be set as big enough to cover the window
+ * between blk_mq_start_request() called from .queue_rq() and
+ * clearing REQ_ATOM_STARTED here.
+ */
 static void __blk_mq_requeue_request(struct request *rq)
 {
        struct request_queue *q = rq->q;
@@ -663,7 +672,7 @@ void blk_mq_rq_timed_out(struct request *req, bool reserved)
         * just be ignored. This can happen due to the bitflag ordering.
         * Timeout first checks if STARTED is set, and if it is, assumes
         * the request is active. But if we race with completion, then
-        * we both flags will get cleared. So check here again, and ignore
+        * both flags will get cleared. So check here again, and ignore
         * a timeout event with a request that isn't active.
         */
        if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
@@ -693,18 +702,22 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
 {
        struct blk_mq_timeout_data *data = priv;
 
-       if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
-               /*
-                * If a request wasn't started before the queue was
-                * marked dying, kill it here or it'll go unnoticed.
-                */
-               if (unlikely(blk_queue_dying(rq->q))) {
-                       rq->errors = -EIO;
-                       blk_mq_end_request(rq, rq->errors);
-               }
+       if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
                return;
-       }
 
+       /*
+        * The rq being checked may have been freed and reallocated
+        * out already here, we avoid this race by checking rq->deadline
+        * and REQ_ATOM_COMPLETE flag together:
+        *
+        * - if rq->deadline is observed as new value because of
+        *   reusing, the rq won't be timed out because of timing.
+        * - if rq->deadline is observed as previous value,
+        *   REQ_ATOM_COMPLETE flag won't be cleared in reuse path
+        *   because we put a barrier between setting rq->deadline
+        *   and clearing the flag in blk_mq_start_request(), so
+        *   this rq won't be timed out too.
+        */
        if (time_after_eq(jiffies, rq->deadline)) {
                if (!blk_mark_rq_complete(rq))
                        blk_mq_rq_timed_out(rq, reserved);
@@ -733,7 +746,7 @@ static void blk_mq_timeout_work(struct work_struct *work)
         * percpu_ref_tryget directly, because we need to be able to
         * obtain a reference even in the short window between the queue
         * starting to freeze, by dropping the first reference in
-        * blk_mq_freeze_queue_start, and the moment the last request is
+        * blk_freeze_queue_start, and the moment the last request is
         * consumed, marked by the instant q_usage_counter reaches
         * zero.
         */
@@ -974,7 +987,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
        struct request *rq;
        LIST_HEAD(driver_list);
        struct list_head *dptr;
-       int queued, ret = BLK_MQ_RQ_QUEUE_OK;
+       int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
 
        /*
         * Start off with dptr being NULL, so we start the first request
@@ -985,7 +998,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
        /*
         * Now process all the entries, sending them to the driver.
         */
-       queued = 0;
+       errors = queued = 0;
        while (!list_empty(list)) {
                struct blk_mq_queue_data bd;
 
@@ -1042,6 +1055,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
                default:
                        pr_err("blk-mq: bad return on queue: %d\n", ret);
                case BLK_MQ_RQ_QUEUE_ERROR:
+                       errors++;
                        rq->errors = -EIO;
                        blk_mq_end_request(rq, rq->errors);
                        break;
@@ -1093,7 +1107,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
                        blk_mq_run_hw_queue(hctx, true);
        }
 
-       return queued != 0;
+       return (queued + errors) != 0;
 }
 
 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
@@ -1430,7 +1444,7 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
        return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
 }
 
-static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
+static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
                                      bool may_sleep)
 {
        struct request_queue *q = rq->q;
@@ -1462,8 +1476,6 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
                return;
        }
 
-       __blk_mq_requeue_request(rq);
-
        if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
                *cookie = BLK_QC_T_NONE;
                rq->errors = -EIO;
@@ -1471,17 +1483,32 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
                return;
        }
 
+       __blk_mq_requeue_request(rq);
 insert:
        blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
 }
 
+static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+               struct request *rq, blk_qc_t *cookie)
+{
+       if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
+               rcu_read_lock();
+               __blk_mq_try_issue_directly(rq, cookie, false);
+               rcu_read_unlock();
+       } else {
+               unsigned int srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
+               __blk_mq_try_issue_directly(rq, cookie, true);
+               srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
+       }
+}
+
 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 {
        const int is_sync = op_is_sync(bio->bi_opf);
        const int is_flush_fua = op_is_flush(bio->bi_opf);
        struct blk_mq_alloc_data data = { .flags = 0 };
        struct request *rq;
-       unsigned int request_count = 0, srcu_idx;
+       unsigned int request_count = 0;
        struct blk_plug *plug;
        struct request *same_queue_rq = NULL;
        blk_qc_t cookie;
@@ -1517,16 +1544,17 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
        cookie = request_to_qc_t(data.hctx, rq);
 
+       plug = current->plug;
        if (unlikely(is_flush_fua)) {
-               if (q->elevator)
-                       goto elv_insert;
                blk_mq_bio_to_request(rq, bio);
-               blk_insert_flush(rq);
-               goto run_queue;
-       }
-
-       plug = current->plug;
-       if (plug && q->nr_hw_queues == 1) {
+               if (q->elevator) {
+                       blk_mq_sched_insert_request(rq, false, true, true,
+                                       true);
+               } else {
+                       blk_insert_flush(rq);
+                       blk_mq_run_hw_queue(data.hctx, true);
+               }
+       } else if (plug && q->nr_hw_queues == 1) {
                struct request *last = NULL;
 
                blk_mq_bio_to_request(rq, bio);
@@ -1545,8 +1573,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                else
                        last = list_entry_rq(plug->mq_list.prev);
 
-               blk_mq_put_ctx(data.ctx);
-
                if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
                    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
                        blk_flush_plug_list(plug, false);
@@ -1554,66 +1580,36 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                }
 
                list_add_tail(&rq->queuelist, &plug->mq_list);
-               goto done;
-       } else if (((plug && !blk_queue_nomerges(q)) || is_sync)) {
-               struct request *old_rq = NULL;
-
+       } else if (plug && !blk_queue_nomerges(q)) {
                blk_mq_bio_to_request(rq, bio);
 
                /*
                 * We do limited plugging. If the bio can be merged, do that.
                 * Otherwise the existing request in the plug list will be
                 * issued. So the plug list will have one request at most
+                * The plug list might get flushed before this. If that happens,
+                * the plug list is empty, and same_queue_rq is invalid.
                 */
-               if (plug) {
-                       /*
-                        * The plug list might get flushed before this. If that
-                        * happens, same_queue_rq is invalid and plug list is
-                        * empty
-                        */
-                       if (same_queue_rq && !list_empty(&plug->mq_list)) {
-                               old_rq = same_queue_rq;
-                               list_del_init(&old_rq->queuelist);
-                       }
-                       list_add_tail(&rq->queuelist, &plug->mq_list);
-               } else /* is_sync */
-                       old_rq = rq;
-               blk_mq_put_ctx(data.ctx);
-               if (!old_rq)
-                       goto done;
-
-               if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
-                       rcu_read_lock();
-                       blk_mq_try_issue_directly(old_rq, &cookie, false);
-                       rcu_read_unlock();
-               } else {
-                       srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
-                       blk_mq_try_issue_directly(old_rq, &cookie, true);
-                       srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
-               }
-               goto done;
-       }
+               if (list_empty(&plug->mq_list))
+                       same_queue_rq = NULL;
+               if (same_queue_rq)
+                       list_del_init(&same_queue_rq->queuelist);
+               list_add_tail(&rq->queuelist, &plug->mq_list);
 
-       if (q->elevator) {
-elv_insert:
-               blk_mq_put_ctx(data.ctx);
+               if (same_queue_rq)
+                       blk_mq_try_issue_directly(data.hctx, same_queue_rq,
+                                       &cookie);
+       } else if (q->nr_hw_queues > 1 && is_sync) {
                blk_mq_bio_to_request(rq, bio);
-               blk_mq_sched_insert_request(rq, false, true,
-                                               !is_sync || is_flush_fua, true);
-               goto done;
-       }
-       if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
-               /*
-                * For a SYNC request, send it to the hardware immediately. For
-                * an ASYNC request, just ensure that we run it later on. The
-                * latter allows for merging opportunities and more efficient
-                * dispatching.
-                */
-run_queue:
-               blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
+               blk_mq_try_issue_directly(data.hctx, rq, &cookie);
+       } else if (q->elevator) {
+               blk_mq_bio_to_request(rq, bio);
+               blk_mq_sched_insert_request(rq, false, true, true, true);
+       } else if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
+               blk_mq_run_hw_queue(data.hctx, true);
        }
+
        blk_mq_put_ctx(data.ctx);
-done:
        return cookie;
 }
 
@@ -2238,10 +2234,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        /* mark the queue as mq asap */
        q->mq_ops = set->ops;
 
-       q->stats = blk_alloc_queue_stats();
-       if (!q->stats)
-               goto err_exit;
-
        q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
                                             blk_stat_rq_ddir, 2, q);
        if (!q->poll_cb)
@@ -2380,7 +2372,7 @@ static void blk_mq_queue_reinit_work(void)
         * take place in parallel.
         */
        list_for_each_entry(q, &all_q_list, all_q_node)
-               blk_mq_freeze_queue_start(q);
+               blk_freeze_queue_start(q);
        list_for_each_entry(q, &all_q_list, all_q_node)
                blk_mq_freeze_queue_wait(q);