]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
rbd: queue_depth map option
authorIlya Dryomov <idryomov@gmail.com>
Tue, 23 Jun 2015 13:21:19 +0000 (16:21 +0300)
committerIlya Dryomov <idryomov@gmail.com>
Thu, 25 Jun 2015 15:30:55 +0000 (18:30 +0300)
nr_requests (/sys/block/rbd<id>/queue/nr_requests) is pretty much
irrelevant in blk-mq case because each driver sets its own max depth
that it can handle and that's the number of tags that gets preallocated
on setup.  Users can't increase queue depth beyond that value via
writing to nr_requests.

For rbd we are happy with the default BLKDEV_MAX_RQ (128) for most
cases but we want to give users the opportunity to increase it.
Introduce a new per-device queue_depth option to do just that:

    $ sudo rbd map -o queue_depth=1024 ...

Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Reviewed-by: Alex Elder <elder@linaro.org>
drivers/block/rbd.c

index e502bce02d2ca8353a74249e1ab85b1ef8c02766..b316ee48a30b2ee84814946cf88fae3519788026 100644 (file)
@@ -728,6 +728,7 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
  * (Per device) rbd map options
  */
 enum {
+       Opt_queue_depth,
        Opt_last_int,
        /* int args above */
        Opt_last_string,
@@ -738,6 +739,7 @@ enum {
 };
 
 static match_table_t rbd_opts_tokens = {
+       {Opt_queue_depth, "queue_depth=%d"},
        /* int args above */
        /* string args above */
        {Opt_read_only, "read_only"},
@@ -748,9 +750,11 @@ static match_table_t rbd_opts_tokens = {
 };
 
 struct rbd_options {
+       int     queue_depth;
        bool    read_only;
 };
 
+#define RBD_QUEUE_DEPTH_DEFAULT        BLKDEV_MAX_RQ
 #define RBD_READ_ONLY_DEFAULT  false
 
 static int parse_rbd_opts_token(char *c, void *private)
@@ -774,6 +778,13 @@ static int parse_rbd_opts_token(char *c, void *private)
        }
 
        switch (token) {
+       case Opt_queue_depth:
+               if (intval < 1) {
+                       pr_err("queue_depth out of range\n");
+                       return -EINVAL;
+               }
+               rbd_opts->queue_depth = intval;
+               break;
        case Opt_read_only:
                rbd_opts->read_only = true;
                break;
@@ -3761,10 +3772,9 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
 
        memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
        rbd_dev->tag_set.ops = &rbd_mq_ops;
-       rbd_dev->tag_set.queue_depth = BLKDEV_MAX_RQ;
+       rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
        rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
-       rbd_dev->tag_set.flags =
-               BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
+       rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
        rbd_dev->tag_set.nr_hw_queues = 1;
        rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
 
@@ -4948,6 +4958,7 @@ static int rbd_add_parse_args(const char *buf,
                goto out_mem;
 
        rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
+       rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
 
        copts = ceph_parse_options(options, mon_addrs,
                                        mon_addrs + mon_addrs_size - 1,