]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - block/blk-core.c
block: Check locking assumptions at runtime
[karo-tx-linux.git] / block / blk-core.c
index c7068520794bd0ba060b905f850efaae6a8cbd36..5f87788249ce3e1a37a9e67c5524aee6ff08c0d0 100644 (file)
@@ -129,11 +129,70 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
 }
 EXPORT_SYMBOL(blk_rq_init);
 
+static const struct {
+       int             errno;
+       const char      *name;
+} blk_errors[] = {
+       [BLK_STS_OK]            = { 0,          "" },
+       [BLK_STS_NOTSUPP]       = { -EOPNOTSUPP, "operation not supported" },
+       [BLK_STS_TIMEOUT]       = { -ETIMEDOUT, "timeout" },
+       [BLK_STS_NOSPC]         = { -ENOSPC,    "critical space allocation" },
+       [BLK_STS_TRANSPORT]     = { -ENOLINK,   "recoverable transport" },
+       [BLK_STS_TARGET]        = { -EREMOTEIO, "critical target" },
+       [BLK_STS_NEXUS]         = { -EBADE,     "critical nexus" },
+       [BLK_STS_MEDIUM]        = { -ENODATA,   "critical medium" },
+       [BLK_STS_PROTECTION]    = { -EILSEQ,    "protection" },
+       [BLK_STS_RESOURCE]      = { -ENOMEM,    "kernel resource" },
+       [BLK_STS_AGAIN]         = { -EAGAIN,    "nonblocking retry" },
+
+       /* device mapper special case, should not leak out: */
+       [BLK_STS_DM_REQUEUE]    = { -EREMCHG, "dm internal retry" },
+
+       /* everything else not covered above: */
+       [BLK_STS_IOERR]         = { -EIO,       "I/O" },
+};
+
+blk_status_t errno_to_blk_status(int errno)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
+               if (blk_errors[i].errno == errno)
+                       return (__force blk_status_t)i;
+       }
+
+       return BLK_STS_IOERR;
+}
+EXPORT_SYMBOL_GPL(errno_to_blk_status);
+
+int blk_status_to_errno(blk_status_t status)
+{
+       int idx = (__force int)status;
+
+       if (WARN_ON_ONCE(idx > ARRAY_SIZE(blk_errors)))
+               return -EIO;
+       return blk_errors[idx].errno;
+}
+EXPORT_SYMBOL_GPL(blk_status_to_errno);
+
+static void print_req_error(struct request *req, blk_status_t status)
+{
+       int idx = (__force int)status;
+
+       if (WARN_ON_ONCE(idx > ARRAY_SIZE(blk_errors)))
+               return;
+
+       printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
+                          __func__, blk_errors[idx].name, req->rq_disk ?
+                          req->rq_disk->disk_name : "?",
+                          (unsigned long long)blk_rq_pos(req));
+}
+
 static void req_bio_endio(struct request *rq, struct bio *bio,
-                         unsigned int nbytes, int error)
+                         unsigned int nbytes, blk_status_t error)
 {
        if (error)
-               bio->bi_error = error;
+               bio->bi_status = error;
 
        if (unlikely(rq->rq_flags & RQF_QUIET))
                bio_set_flag(bio, BIO_QUIET);
@@ -177,10 +236,12 @@ static void blk_delay_work(struct work_struct *work)
  * Description:
  *   Sometimes queueing needs to be postponed for a little while, to allow
  *   resources to come back. This function will make sure that queueing is
- *   restarted around the specified time. Queue lock must be held.
+ *   restarted around the specified time.
  */
 void blk_delay_queue(struct request_queue *q, unsigned long msecs)
 {
+       lockdep_assert_held(q->queue_lock);
+
        if (likely(!blk_queue_dead(q)))
                queue_delayed_work(kblockd_workqueue, &q->delay_work,
                                   msecs_to_jiffies(msecs));
@@ -198,6 +259,8 @@ EXPORT_SYMBOL(blk_delay_queue);
  **/
 void blk_start_queue_async(struct request_queue *q)
 {
+       lockdep_assert_held(q->queue_lock);
+
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
        blk_run_queue_async(q);
 }
@@ -210,10 +273,11 @@ EXPORT_SYMBOL(blk_start_queue_async);
  * Description:
  *   blk_start_queue() will clear the stop flag on the queue, and call
  *   the request_fn for the queue if it was in a stopped state when
- *   entered. Also see blk_stop_queue(). Queue lock must be held.
+ *   entered. Also see blk_stop_queue().
  **/
 void blk_start_queue(struct request_queue *q)
 {
+       lockdep_assert_held(q->queue_lock);
        WARN_ON(!irqs_disabled());
 
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
@@ -233,10 +297,12 @@ EXPORT_SYMBOL(blk_start_queue);
  *   or if it simply chooses not to queue more I/O at one point, it can
  *   call this function to prevent the request_fn from being called until
  *   the driver has signalled it's ready to go again. This happens by calling
- *   blk_start_queue() to restart queue operations. Queue lock must be held.
+ *   blk_start_queue() to restart queue operations.
  **/
 void blk_stop_queue(struct request_queue *q)
 {
+       lockdep_assert_held(q->queue_lock);
+
        cancel_delayed_work(&q->delay_work);
        queue_flag_set(QUEUE_FLAG_STOPPED, q);
 }
@@ -289,6 +355,8 @@ EXPORT_SYMBOL(blk_sync_queue);
  */
 inline void __blk_run_queue_uncond(struct request_queue *q)
 {
+       lockdep_assert_held(q->queue_lock);
+
        if (unlikely(blk_queue_dead(q)))
                return;
 
@@ -310,11 +378,12 @@ EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
  * @q: The queue to run
  *
  * Description:
- *    See @blk_run_queue. This variant must be called with the queue lock
- *    held and interrupts disabled.
+ *    See @blk_run_queue.
  */
 void __blk_run_queue(struct request_queue *q)
 {
+       lockdep_assert_held(q->queue_lock);
+
        if (unlikely(blk_queue_stopped(q)))
                return;
 
@@ -328,10 +397,17 @@ EXPORT_SYMBOL(__blk_run_queue);
  *
  * Description:
  *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf
- *    of us. The caller must hold the queue lock.
+ *    of us.
+ *
+ * Note:
+ *    Since it is not allowed to run q->delay_work after blk_cleanup_queue()
+ *    has canceled q->delay_work, callers must hold the queue lock to avoid
+ *    race conditions between blk_cleanup_queue() and blk_run_queue_async().
  */
 void blk_run_queue_async(struct request_queue *q)
 {
+       lockdep_assert_held(q->queue_lock);
+
        if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
                mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
 }
@@ -648,13 +724,19 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
        if (!rl->rq_pool)
                return -ENOMEM;
 
+       if (rl != &q->root_rl)
+               WARN_ON_ONCE(!blk_get_queue(q));
+
        return 0;
 }
 
-void blk_exit_rl(struct request_list *rl)
+void blk_exit_rl(struct request_queue *q, struct request_list *rl)
 {
-       if (rl->rq_pool)
+       if (rl->rq_pool) {
                mempool_destroy(rl->rq_pool);
+               if (rl != &q->root_rl)
+                       blk_put_queue(q);
+       }
 }
 
 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
@@ -726,7 +808,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        if (q->id < 0)
                goto fail_q;
 
-       q->bio_split = bioset_create(BIO_POOL_SIZE, 0);
+       q->bio_split = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
        if (!q->bio_split)
                goto fail_id;
 
@@ -1071,6 +1153,8 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
        int may_queue;
        req_flags_t rq_flags = RQF_ALLOCED;
 
+       lockdep_assert_held(q->queue_lock);
+
        if (unlikely(blk_queue_dying(q)))
                return ERR_PTR(-ENODEV);
 
@@ -1244,12 +1328,19 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
        struct request_list *rl;
        struct request *rq;
 
+       lockdep_assert_held(q->queue_lock);
+
        rl = blk_get_rl(q, bio);        /* transferred to @rq on success */
 retry:
        rq = __get_request(rl, op, bio, gfp_mask);
        if (!IS_ERR(rq))
                return rq;
 
+       if (op & REQ_NOWAIT) {
+               blk_put_rl(rl);
+               return ERR_PTR(-EAGAIN);
+       }
+
        if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) {
                blk_put_rl(rl);
                return rq;
@@ -1277,8 +1368,8 @@ retry:
        goto retry;
 }
 
-static struct request *blk_old_get_request(struct request_queue *q, int rw,
-               gfp_t gfp_mask)
+static struct request *blk_old_get_request(struct request_queue *q,
+                                          unsigned int op, gfp_t gfp_mask)
 {
        struct request *rq;
 
@@ -1286,7 +1377,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
        create_io_context(gfp_mask, q->node);
 
        spin_lock_irq(q->queue_lock);
-       rq = get_request(q, rw, NULL, gfp_mask);
+       rq = get_request(q, op, NULL, gfp_mask);
        if (IS_ERR(rq)) {
                spin_unlock_irq(q->queue_lock);
                return rq;
@@ -1299,14 +1390,24 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
        return rq;
 }
 
-struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
+struct request *blk_get_request(struct request_queue *q, unsigned int op,
+                               gfp_t gfp_mask)
 {
-       if (q->mq_ops)
-               return blk_mq_alloc_request(q, rw,
+       struct request *req;
+
+       if (q->mq_ops) {
+               req = blk_mq_alloc_request(q, op,
                        (gfp_mask & __GFP_DIRECT_RECLAIM) ?
                                0 : BLK_MQ_REQ_NOWAIT);
-       else
-               return blk_old_get_request(q, rw, gfp_mask);
+               if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
+                       q->mq_ops->initialize_rq_fn(req);
+       } else {
+               req = blk_old_get_request(q, op, gfp_mask);
+               if (!IS_ERR(req) && q->initialize_rq_fn)
+                       q->initialize_rq_fn(req);
+       }
+
+       return req;
 }
 EXPORT_SYMBOL(blk_get_request);
 
@@ -1322,6 +1423,8 @@ EXPORT_SYMBOL(blk_get_request);
  */
 void blk_requeue_request(struct request_queue *q, struct request *rq)
 {
+       lockdep_assert_held(q->queue_lock);
+
        blk_delete_timer(rq);
        blk_clear_rq_complete(rq);
        trace_block_rq_requeue(q, rq);
@@ -1396,9 +1499,6 @@ static void blk_pm_put_request(struct request *rq)
 static inline void blk_pm_put_request(struct request *rq) {}
 #endif
 
-/*
- * queue lock must be held
- */
 void __blk_put_request(struct request_queue *q, struct request *req)
 {
        req_flags_t rq_flags = req->rq_flags;
@@ -1411,6 +1511,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
                return;
        }
 
+       lockdep_assert_held(q->queue_lock);
+
        blk_pm_put_request(req);
 
        elv_completed_request(q, req);
@@ -1659,10 +1761,10 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
         */
        blk_queue_bounce(q, &bio);
 
-       blk_queue_split(q, &bio, q->bio_split);
+       blk_queue_split(q, &bio);
 
        if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
-               bio->bi_error = -EIO;
+               bio->bi_status = BLK_STS_IOERR;
                bio_endio(bio);
                return BLK_QC_T_NONE;
        }
@@ -1720,7 +1822,10 @@ get_rq:
        req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
        if (IS_ERR(req)) {
                __wbt_done(q->rq_wb, wb_acct);
-               bio->bi_error = PTR_ERR(req);
+               if (PTR_ERR(req) == -ENOMEM)
+                       bio->bi_status = BLK_STS_RESOURCE;
+               else
+                       bio->bi_status = BLK_STS_IOERR;
                bio_endio(bio);
                goto out_unlock;
        }
@@ -1875,7 +1980,7 @@ generic_make_request_checks(struct bio *bio)
 {
        struct request_queue *q;
        int nr_sectors = bio_sectors(bio);
-       int err = -EIO;
+       blk_status_t status = BLK_STS_IOERR;
        char b[BDEVNAME_SIZE];
        struct hd_struct *part;
 
@@ -1894,6 +1999,14 @@ generic_make_request_checks(struct bio *bio)
                goto end_io;
        }
 
+       /*
+        * For a REQ_NOWAIT based request, return -EOPNOTSUPP
+        * if queue is not a request based queue.
+        */
+
+       if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
+               goto not_supported;
+
        part = bio->bi_bdev->bd_part;
        if (should_fail_request(part, bio->bi_iter.bi_size) ||
            should_fail_request(&part_to_disk(part)->part0,
@@ -1918,7 +2031,7 @@ generic_make_request_checks(struct bio *bio)
            !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
                bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
                if (!nr_sectors) {
-                       err = 0;
+                       status = BLK_STS_OK;
                        goto end_io;
                }
        }
@@ -1970,9 +2083,9 @@ generic_make_request_checks(struct bio *bio)
        return true;
 
 not_supported:
-       err = -EOPNOTSUPP;
+       status = BLK_STS_NOTSUPP;
 end_io:
-       bio->bi_error = err;
+       bio->bi_status = status;
        bio_endio(bio);
        return false;
 }
@@ -2051,7 +2164,7 @@ blk_qc_t generic_make_request(struct bio *bio)
        do {
                struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 
-               if (likely(blk_queue_enter(q, false) == 0)) {
+               if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) {
                        struct bio_list lower, same;
 
                        /* Create a fresh bio_list for all subordinate requests */
@@ -2076,7 +2189,11 @@ blk_qc_t generic_make_request(struct bio *bio)
                        bio_list_merge(&bio_list_on_stack[0], &same);
                        bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
                } else {
-                       bio_io_error(bio);
+                       if (unlikely(!blk_queue_dying(q) &&
+                                       (bio->bi_opf & REQ_NOWAIT)))
+                               bio_wouldblock_error(bio);
+                       else
+                               bio_io_error(bio);
                }
                bio = bio_list_pop(&bio_list_on_stack[0]);
        } while (bio);
@@ -2177,29 +2294,29 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
  * @q:  the queue to submit the request
  * @rq: the request being queued
  */
-int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
+blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
 {
        unsigned long flags;
        int where = ELEVATOR_INSERT_BACK;
 
        if (blk_cloned_rq_check_limits(q, rq))
-               return -EIO;
+               return BLK_STS_IOERR;
 
        if (rq->rq_disk &&
            should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
-               return -EIO;
+               return BLK_STS_IOERR;
 
        if (q->mq_ops) {
                if (blk_queue_io_stat(q))
                        blk_account_io_start(rq, true);
                blk_mq_sched_insert_request(rq, false, true, false, false);
-               return 0;
+               return BLK_STS_OK;
        }
 
        spin_lock_irqsave(q->queue_lock, flags);
        if (unlikely(blk_queue_dying(q))) {
                spin_unlock_irqrestore(q->queue_lock, flags);
-               return -ENODEV;
+               return BLK_STS_IOERR;
        }
 
        /*
@@ -2216,7 +2333,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
                __blk_run_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 
-       return 0;
+       return BLK_STS_OK;
 }
 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
 
@@ -2232,9 +2349,6 @@ EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
  *
  * Return:
  *     The number of bytes to fail.
- *
- * Context:
- *     queue_lock must be held.
  */
 unsigned int blk_rq_err_bytes(const struct request *rq)
 {
@@ -2374,15 +2488,14 @@ void blk_account_io_start(struct request *rq, bool new_io)
  * Return:
  *     Pointer to the request at the top of @q if available.  Null
  *     otherwise.
- *
- * Context:
- *     queue_lock must be held.
  */
 struct request *blk_peek_request(struct request_queue *q)
 {
        struct request *rq;
        int ret;
 
+       lockdep_assert_held(q->queue_lock);
+
        while ((rq = __elv_next_request(q)) != NULL) {
 
                rq = blk_pm_peek_request(q, rq);
@@ -2450,15 +2563,14 @@ struct request *blk_peek_request(struct request_queue *q)
                        rq = NULL;
                        break;
                } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
-                       int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
-
                        rq->rq_flags |= RQF_QUIET;
                        /*
                         * Mark this request as started so we don't trigger
                         * any debug logic in the end I/O path.
                         */
                        blk_start_request(rq);
-                       __blk_end_request_all(rq, err);
+                       __blk_end_request_all(rq, ret == BLKPREP_INVALID ?
+                                       BLK_STS_TARGET : BLK_STS_IOERR);
                } else {
                        printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
                        break;
@@ -2499,12 +2611,11 @@ void blk_dequeue_request(struct request *rq)
  *
  *     Block internal functions which don't want to start timer should
  *     call blk_dequeue_request().
- *
- * Context:
- *     queue_lock must be held.
  */
 void blk_start_request(struct request *req)
 {
+       lockdep_assert_held(req->q->queue_lock);
+
        blk_dequeue_request(req);
 
        if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
@@ -2529,14 +2640,13 @@ EXPORT_SYMBOL(blk_start_request);
  * Return:
  *     Pointer to the request at the top of @q if available.  Null
  *     otherwise.
- *
- * Context:
- *     queue_lock must be held.
  */
 struct request *blk_fetch_request(struct request_queue *q)
 {
        struct request *rq;
 
+       lockdep_assert_held(q->queue_lock);
+
        rq = blk_peek_request(q);
        if (rq)
                blk_start_request(rq);
@@ -2547,7 +2657,7 @@ EXPORT_SYMBOL(blk_fetch_request);
 /**
  * blk_update_request - Special helper function for request stacking drivers
  * @req:      the request being processed
- * @error:    %0 for success, < %0 for error
+ * @error:    block status code
  * @nr_bytes: number of bytes to complete @req
  *
  * Description:
@@ -2566,49 +2676,19 @@ EXPORT_SYMBOL(blk_fetch_request);
  *     %false - this request doesn't have any more data
  *     %true  - this request has more data
  **/
-bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
+bool blk_update_request(struct request *req, blk_status_t error,
+               unsigned int nr_bytes)
 {
        int total_bytes;
 
-       trace_block_rq_complete(req, error, nr_bytes);
+       trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
 
        if (!req->bio)
                return false;
 
-       if (error && !blk_rq_is_passthrough(req) &&
-           !(req->rq_flags & RQF_QUIET)) {
-               char *error_type;
-
-               switch (error) {
-               case -ENOLINK:
-                       error_type = "recoverable transport";
-                       break;
-               case -EREMOTEIO:
-                       error_type = "critical target";
-                       break;
-               case -EBADE:
-                       error_type = "critical nexus";
-                       break;
-               case -ETIMEDOUT:
-                       error_type = "timeout";
-                       break;
-               case -ENOSPC:
-                       error_type = "critical space allocation";
-                       break;
-               case -ENODATA:
-                       error_type = "critical medium";
-                       break;
-               case -EIO:
-               default:
-                       error_type = "I/O";
-                       break;
-               }
-               printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
-                                  __func__, error_type, req->rq_disk ?
-                                  req->rq_disk->disk_name : "?",
-                                  (unsigned long long)blk_rq_pos(req));
-
-       }
+       if (unlikely(error && !blk_rq_is_passthrough(req) &&
+                    !(req->rq_flags & RQF_QUIET)))
+               print_req_error(req, error);
 
        blk_account_io_completion(req, nr_bytes);
 
@@ -2674,7 +2754,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
 }
 EXPORT_SYMBOL_GPL(blk_update_request);
 
-static bool blk_update_bidi_request(struct request *rq, int error,
+static bool blk_update_bidi_request(struct request *rq, blk_status_t error,
                                    unsigned int nr_bytes,
                                    unsigned int bidi_bytes)
 {
@@ -2712,13 +2792,12 @@ void blk_unprep_request(struct request *req)
 }
 EXPORT_SYMBOL_GPL(blk_unprep_request);
 
-/*
- * queue lock must be held
- */
-void blk_finish_request(struct request *req, int error)
+void blk_finish_request(struct request *req, blk_status_t error)
 {
        struct request_queue *q = req->q;
 
+       lockdep_assert_held(req->q->queue_lock);
+
        if (req->rq_flags & RQF_STATS)
                blk_stat_add(req);
 
@@ -2752,7 +2831,7 @@ EXPORT_SYMBOL(blk_finish_request);
 /**
  * blk_end_bidi_request - Complete a bidi request
  * @rq:         the request to complete
- * @error:      %0 for success, < %0 for error
+ * @error:      block status code
  * @nr_bytes:   number of bytes to complete @rq
  * @bidi_bytes: number of bytes to complete @rq->next_rq
  *
@@ -2766,7 +2845,7 @@ EXPORT_SYMBOL(blk_finish_request);
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  **/
-static bool blk_end_bidi_request(struct request *rq, int error,
+static bool blk_end_bidi_request(struct request *rq, blk_status_t error,
                                 unsigned int nr_bytes, unsigned int bidi_bytes)
 {
        struct request_queue *q = rq->q;
@@ -2785,7 +2864,7 @@ static bool blk_end_bidi_request(struct request *rq, int error,
 /**
  * __blk_end_bidi_request - Complete a bidi request with queue lock held
  * @rq:         the request to complete
- * @error:      %0 for success, < %0 for error
+ * @error:      block status code
  * @nr_bytes:   number of bytes to complete @rq
  * @bidi_bytes: number of bytes to complete @rq->next_rq
  *
@@ -2797,9 +2876,11 @@ static bool blk_end_bidi_request(struct request *rq, int error,
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  **/
-static bool __blk_end_bidi_request(struct request *rq, int error,
+static bool __blk_end_bidi_request(struct request *rq, blk_status_t error,
                                   unsigned int nr_bytes, unsigned int bidi_bytes)
 {
+       lockdep_assert_held(rq->q->queue_lock);
+
        if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
                return true;
 
@@ -2811,7 +2892,7 @@ static bool __blk_end_bidi_request(struct request *rq, int error,
 /**
  * blk_end_request - Helper function for drivers to complete the request.
  * @rq:       the request being processed
- * @error:    %0 for success, < %0 for error
+ * @error:    block status code
  * @nr_bytes: number of bytes to complete
  *
  * Description:
@@ -2822,7 +2903,8 @@ static bool __blk_end_bidi_request(struct request *rq, int error,
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  **/
-bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
+bool blk_end_request(struct request *rq, blk_status_t error,
+               unsigned int nr_bytes)
 {
        return blk_end_bidi_request(rq, error, nr_bytes, 0);
 }
@@ -2831,12 +2913,12 @@ EXPORT_SYMBOL(blk_end_request);
 /**
  * blk_end_request_all - Helper function for drives to finish the request.
  * @rq: the request to finish
- * @error: %0 for success, < %0 for error
+ * @error: block status code
  *
  * Description:
  *     Completely finish @rq.
  */
-void blk_end_request_all(struct request *rq, int error)
+void blk_end_request_all(struct request *rq, blk_status_t error)
 {
        bool pending;
        unsigned int bidi_bytes = 0;
@@ -2852,7 +2934,7 @@ EXPORT_SYMBOL(blk_end_request_all);
 /**
  * __blk_end_request - Helper function for drivers to complete the request.
  * @rq:       the request being processed
- * @error:    %0 for success, < %0 for error
+ * @error:    block status code
  * @nr_bytes: number of bytes to complete
  *
  * Description:
@@ -2862,8 +2944,11 @@ EXPORT_SYMBOL(blk_end_request_all);
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  **/
-bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
+bool __blk_end_request(struct request *rq, blk_status_t error,
+               unsigned int nr_bytes)
 {
+       lockdep_assert_held(rq->q->queue_lock);
+
        return __blk_end_bidi_request(rq, error, nr_bytes, 0);
 }
 EXPORT_SYMBOL(__blk_end_request);
@@ -2871,16 +2956,18 @@ EXPORT_SYMBOL(__blk_end_request);
 /**
  * __blk_end_request_all - Helper function for drives to finish the request.
  * @rq: the request to finish
- * @error: %0 for success, < %0 for error
+ * @error:    block status code
  *
  * Description:
  *     Completely finish @rq.  Must be called with queue lock held.
  */
-void __blk_end_request_all(struct request *rq, int error)
+void __blk_end_request_all(struct request *rq, blk_status_t error)
 {
        bool pending;
        unsigned int bidi_bytes = 0;
 
+       lockdep_assert_held(rq->q->queue_lock);
+
        if (unlikely(blk_bidi_rq(rq)))
                bidi_bytes = blk_rq_bytes(rq->next_rq);
 
@@ -2892,7 +2979,7 @@ EXPORT_SYMBOL(__blk_end_request_all);
 /**
  * __blk_end_request_cur - Helper function to finish the current request chunk.
  * @rq: the request to finish the current chunk for
- * @error: %0 for success, < %0 for error
+ * @error:    block status code
  *
  * Description:
  *     Complete the current consecutively mapped chunk from @rq.  Must
@@ -2902,7 +2989,7 @@ EXPORT_SYMBOL(__blk_end_request_all);
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  */
-bool __blk_end_request_cur(struct request *rq, int error)
+bool __blk_end_request_cur(struct request *rq, blk_status_t error)
 {
        return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
 }
@@ -3145,6 +3232,8 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
                            bool from_schedule)
        __releases(q->queue_lock)
 {
+       lockdep_assert_held(q->queue_lock);
+
        trace_block_unplug(q, depth, !from_schedule);
 
        if (from_schedule)
@@ -3243,7 +3332,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
                 * Short-circuit if @q is dead
                 */
                if (unlikely(blk_queue_dying(q))) {
-                       __blk_end_request_all(rq, -ENODEV);
+                       __blk_end_request_all(rq, BLK_STS_IOERR);
                        continue;
                }