]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
blk-mq: respect rq_affinity
authorChristoph Hellwig <hch@lst.de>
Fri, 25 Apr 2014 09:32:53 +0000 (02:32 -0700)
committerJens Axboe <axboe@fb.com>
Fri, 25 Apr 2014 14:24:07 +0000 (08:24 -0600)
The blk-mq code is using it's own version of the I/O completion affinity
tunables, which causes a few issues:

 - the rq_affinity sysfs file doesn't work for blk-mq devices, even if it
   still is present, thus breaking existing tuning setups.
 - the rq_affinity = 1 mode, which is the defauly for legacy request based
   drivers isn't implemented at all.
 - blk-mq drivers don't implement any completion affinity with the default
   flag settings.

This patches removes the blk-mq ipi_redirect flag and sysfs file, as well
as the internal BLK_MQ_F_SHOULD_IPI flag and replaces it with code that
respects the queue-wide rq_affinity flags and also implements the
rq_affinity = 1 mode.

This means I/O completion affinity can now only be tuned block-queue wide
instead of per context, which seems more sensible to me anyway.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-mq-sysfs.c
block/blk-mq.c
block/blk-mq.h
include/linux/blk-mq.h

index 9176a69848575f42bf50632362dc213a37ab4792..8145b5b25b4b302419cadee9a61def400188c09d 100644 (file)
@@ -203,42 +203,6 @@ static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
        return ret;
 }
 
-static ssize_t blk_mq_hw_sysfs_ipi_show(struct blk_mq_hw_ctx *hctx, char *page)
-{
-       ssize_t ret;
-
-       spin_lock(&hctx->lock);
-       ret = sprintf(page, "%u\n", !!(hctx->flags & BLK_MQ_F_SHOULD_IPI));
-       spin_unlock(&hctx->lock);
-
-       return ret;
-}
-
-static ssize_t blk_mq_hw_sysfs_ipi_store(struct blk_mq_hw_ctx *hctx,
-                                        const char *page, size_t len)
-{
-       struct blk_mq_ctx *ctx;
-       unsigned long ret;
-       unsigned int i;
-
-       if (kstrtoul(page, 10, &ret)) {
-               pr_err("blk-mq-sysfs: invalid input '%s'\n", page);
-               return -EINVAL;
-       }
-
-       spin_lock(&hctx->lock);
-       if (ret)
-               hctx->flags |= BLK_MQ_F_SHOULD_IPI;
-       else
-               hctx->flags &= ~BLK_MQ_F_SHOULD_IPI;
-       spin_unlock(&hctx->lock);
-
-       hctx_for_each_ctx(hctx, ctx, i)
-               ctx->ipi_redirect = !!ret;
-
-       return len;
-}
-
 static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
 {
        return blk_mq_tag_sysfs_show(hctx->tags, page);
@@ -307,11 +271,6 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
        .attr = {.name = "pending", .mode = S_IRUGO },
        .show = blk_mq_hw_sysfs_rq_list_show,
 };
-static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_ipi = {
-       .attr = {.name = "ipi_redirect", .mode = S_IRUGO | S_IWUSR},
-       .show = blk_mq_hw_sysfs_ipi_show,
-       .store = blk_mq_hw_sysfs_ipi_store,
-};
 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
        .attr = {.name = "tags", .mode = S_IRUGO },
        .show = blk_mq_hw_sysfs_tags_show,
@@ -326,7 +285,6 @@ static struct attribute *default_hw_ctx_attrs[] = {
        &blk_mq_hw_sysfs_run.attr,
        &blk_mq_hw_sysfs_dispatched.attr,
        &blk_mq_hw_sysfs_pending.attr,
-       &blk_mq_hw_sysfs_ipi.attr,
        &blk_mq_hw_sysfs_tags.attr,
        &blk_mq_hw_sysfs_cpus.attr,
        NULL,
index a84112c94e746075d5c6dafc90a0a7236a5305a1..f2e92eb92803b16f66e348a60d0b229205b9039c 100644 (file)
@@ -326,15 +326,19 @@ static void __blk_mq_complete_request_remote(void *data)
 void __blk_mq_complete_request(struct request *rq)
 {
        struct blk_mq_ctx *ctx = rq->mq_ctx;
+       bool shared = false;
        int cpu;
 
-       if (!ctx->ipi_redirect) {
+       if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
                rq->q->softirq_done_fn(rq);
                return;
        }
 
        cpu = get_cpu();
-       if (cpu != ctx->cpu && cpu_online(ctx->cpu)) {
+       if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
+               shared = cpus_share_cache(cpu, ctx->cpu);
+
+       if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
                rq->csd.func = __blk_mq_complete_request_remote;
                rq->csd.info = rq;
                rq->csd.flags = 0;
index b41a784de50dfbfff4c504c671bbe3e2ff815f26..1ae364ceaf8bbaae6f33c07cb3292c8341807ac2 100644 (file)
@@ -11,7 +11,6 @@ struct blk_mq_ctx {
 
        unsigned int            cpu;
        unsigned int            index_hw;
-       unsigned int            ipi_redirect;
 
        /* incremented at dispatch time */
        unsigned long           rq_dispatched[2];
index ab469d525894a765aaecfd90f999f6f11de611d1..3b561d651a0229326b41b275ab79e46f377e312a 100644 (file)
@@ -122,7 +122,6 @@ enum {
 
        BLK_MQ_F_SHOULD_MERGE   = 1 << 0,
        BLK_MQ_F_SHOULD_SORT    = 1 << 1,
-       BLK_MQ_F_SHOULD_IPI     = 1 << 2,
 
        BLK_MQ_S_STOPPED        = 0,