]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/block/null_blk.c
null_blk: don't pass always-0 req->errors to blk_mq_complete_request
[karo-tx-linux.git] / drivers / block / null_blk.c
index c0e14e54909b41b8f8e2f916be0d1169464ff643..0ca4aa34edb9cf6450eb83ab4ed3a511ccadae36 100644 (file)
@@ -117,6 +117,10 @@ static bool use_lightnvm;
 module_param(use_lightnvm, bool, S_IRUGO);
 MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
 
+static bool blocking;
+module_param(blocking, bool, S_IRUGO);
+MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
+
 static int irqmode = NULL_IRQ_SOFTIRQ;
 
 static int null_set_irqmode(const char *str, const struct kernel_param *kp)
@@ -277,7 +281,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd)
        case NULL_IRQ_SOFTIRQ:
                switch (queue_mode)  {
                case NULL_Q_MQ:
-                       blk_mq_complete_request(cmd->rq, cmd->rq->errors);
+                       blk_mq_complete_request(cmd->rq, 0);
                        break;
                case NULL_Q_RQ:
                        blk_complete_request(cmd->rq);
@@ -357,6 +361,8 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
 {
        struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
 
+       might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
+
        if (irqmode == NULL_IRQ_TIMER) {
                hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
                cmd->timer.function = null_cmd_timer_expired;
@@ -392,7 +398,7 @@ static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
        return 0;
 }
 
-static struct blk_mq_ops null_mq_ops = {
+static const struct blk_mq_ops null_mq_ops = {
        .queue_rq       = null_queue_rq,
        .init_hctx      = null_init_hctx,
        .complete       = null_softirq_done_fn,
@@ -420,7 +426,8 @@ static void null_lnvm_end_io(struct request *rq, int error)
 {
        struct nvm_rq *rqd = rq->end_io_data;
 
-       nvm_end_io(rqd, error);
+       rqd->error = error;
+       nvm_end_io(rqd);
 
        blk_put_request(rq);
 }
@@ -431,19 +438,12 @@ static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
        struct request *rq;
        struct bio *bio = rqd->bio;
 
-       rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
+       rq = blk_mq_alloc_request(q,
+               op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
        if (IS_ERR(rq))
                return -ENOMEM;
 
-       rq->cmd_type = REQ_TYPE_DRV_PRIV;
-       rq->__sector = bio->bi_iter.bi_sector;
-       rq->ioprio = bio_prio(bio);
-
-       if (bio_has_data(bio))
-               rq->nr_phys_segments = bio_phys_segments(q, bio);
-
-       rq->__data_len = bio->bi_iter.bi_size;
-       rq->bio = rq->biotail = bio;
+       blk_init_request_from_bio(rq, bio);
 
        rq->end_io_data = rqd;
 
@@ -460,7 +460,6 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
 
        id->ver_id = 0x1;
        id->vmnt = 0;
-       id->cgrps = 1;
        id->cap = 0x2;
        id->dom = 0x1;
 
@@ -479,7 +478,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
 
        sector_div(size, bs); /* convert size to pages */
        size >>= 8; /* concert size to pgs pr blk */
-       grp = &id->groups[0];
+       grp = &id->grp;
        grp->mtype = 0;
        grp->fmtype = 0;
        grp->num_ch = 1;
@@ -724,6 +723,9 @@ static int null_add_dev(void)
                nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
                nullb->tag_set.driver_data = nullb;
 
+               if (blocking)
+                       nullb->tag_set.flags |= BLK_MQ_F_BLOCKING;
+
                rv = blk_mq_alloc_tag_set(&nullb->tag_set);
                if (rv)
                        goto out_cleanup_queues;