]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/md/dm-rq.c
dm rq: change ->rq_end_io calling conventions
[karo-tx-linux.git] / drivers / md / dm-rq.c
index 28955b94d2b26f47d7c54217d84c2a8a11af692a..920e854caba9849ceeb4bd1bcb6c6a8c6b28c067 100644 (file)
@@ -280,14 +280,14 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
        if (!rq->q->mq_ops)
                dm_old_requeue_request(rq);
        else
-               dm_mq_delay_requeue_request(rq, delay_requeue ? 5000 : 0);
+               dm_mq_delay_requeue_request(rq, delay_requeue ? 100/*ms*/ : 0);
 
        rq_completed(md, rw, false);
 }
 
 static void dm_done(struct request *clone, int error, bool mapped)
 {
-       int r = error;
+       int r = DM_ENDIO_DONE;
        struct dm_rq_target_io *tio = clone->end_io_data;
        dm_request_endio_fn rq_end_io = NULL;
 
@@ -298,20 +298,28 @@ static void dm_done(struct request *clone, int error, bool mapped)
                        r = rq_end_io(tio->ti, clone, error, &tio->info);
        }
 
-       if (unlikely(r == -EREMOTEIO && (req_op(clone) == REQ_OP_WRITE_SAME) &&
-                    !clone->q->limits.max_write_same_sectors))
-               disable_write_same(tio->md);
+       if (unlikely(error == -EREMOTEIO)) {
+               if (req_op(clone) == REQ_OP_WRITE_SAME &&
+                   !clone->q->limits.max_write_same_sectors)
+                       disable_write_same(tio->md);
+               if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
+                   !clone->q->limits.max_write_zeroes_sectors)
+                       disable_write_zeroes(tio->md);
+       }
 
-       if (r <= 0)
+       switch (r) {
+       case DM_ENDIO_DONE:
                /* The target wants to complete the I/O */
-               dm_end_request(clone, r);
-       else if (r == DM_ENDIO_INCOMPLETE)
+               dm_end_request(clone, error);
+               break;
+       case DM_ENDIO_INCOMPLETE:
                /* The target will handle the I/O */
                return;
-       else if (r == DM_ENDIO_REQUEUE)
+       case DM_ENDIO_REQUEUE:
                /* The target wants to requeue the I/O */
                dm_requeue_original_request(tio, false);
-       else {
+               break;
+       default:
                DMWARN("unimplemented target endio return value: %d", r);
                BUG();
        }
@@ -358,7 +366,7 @@ static void dm_complete_request(struct request *rq, int error)
        if (!rq->q->mq_ops)
                blk_complete_request(rq);
        else
-               blk_mq_complete_request(rq, error);
+               blk_mq_complete_request(rq);
 }
 
 /*
@@ -755,13 +763,14 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
                /* Undo dm_start_request() before requeuing */
                rq_end_stats(md, rq);
                rq_completed(md, rq_data_dir(rq), false);
+               blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
                return BLK_MQ_RQ_QUEUE_BUSY;
        }
 
        return BLK_MQ_RQ_QUEUE_OK;
 }
 
-static struct blk_mq_ops dm_mq_ops = {
+static const struct blk_mq_ops dm_mq_ops = {
        .queue_rq = dm_mq_queue_rq,
        .complete = dm_softirq_done,
        .init_request = dm_mq_init_request,
@@ -809,10 +818,14 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
        dm_init_md_queue(md);
 
        /* backfill 'mq' sysfs registration normally done in blk_register_queue */
-       blk_mq_register_dev(disk_to_dev(md->disk), q);
+       err = blk_mq_register_dev(disk_to_dev(md->disk), q);
+       if (err)
+               goto out_cleanup_queue;
 
        return 0;
 
+out_cleanup_queue:
+       blk_cleanup_queue(q);
 out_tag_set:
        blk_mq_free_tag_set(md->tag_set);
 out_kfree_tag_set: