]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
mmc: queue: Factor out mmc_queue_alloc_bounce_sgs()
authorAdrian Hunter <adrian.hunter@intel.com>
Tue, 29 Nov 2016 10:09:12 +0000 (12:09 +0200)
committerUlf Hansson <ulf.hansson@linaro.org>
Mon, 5 Dec 2016 09:31:05 +0000 (10:31 +0100)
In preparation for supporting a queue of requests, factor out
mmc_queue_alloc_bounce_sgs().

Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Harjani Ritesh <riteshh@codeaurora.org>
[Ulf: Fixed compiler warning]
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
drivers/mmc/card/queue.c

index cca143afd12ff6eece3584ccc17ddf5b1dce9f29..46b7b1f0cade256b43b693a76463e8880bc8caac 100644 (file)
@@ -211,6 +211,30 @@ static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq,
 
        return true;
 }
+
+static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq,
+                                     unsigned int bouncesz)
+{
+       struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
+       struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
+       int ret;
+
+       mqrq_cur->sg = mmc_alloc_sg(1, &ret);
+       if (ret)
+               return ret;
+
+       mqrq_cur->bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret);
+       if (ret)
+               return ret;
+
+       mqrq_prev->sg = mmc_alloc_sg(1, &ret);
+       if (ret)
+               return ret;
+
+       mqrq_prev->bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret);
+
+       return ret;
+}
 #endif
 
 /**
@@ -227,6 +251,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
 {
        struct mmc_host *host = card->host;
        u64 limit = BLK_BOUNCE_HIGH;
+       bool bounce = false;
        int ret;
        struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
        struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
@@ -269,28 +294,15 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
                        blk_queue_max_segments(mq->queue, bouncesz / 512);
                        blk_queue_max_segment_size(mq->queue, bouncesz);
 
-                       mqrq_cur->sg = mmc_alloc_sg(1, &ret);
-                       if (ret)
-                               goto cleanup_queue;
-
-                       mqrq_cur->bounce_sg =
-                               mmc_alloc_sg(bouncesz / 512, &ret);
-                       if (ret)
-                               goto cleanup_queue;
-
-                       mqrq_prev->sg = mmc_alloc_sg(1, &ret);
-                       if (ret)
-                               goto cleanup_queue;
-
-                       mqrq_prev->bounce_sg =
-                               mmc_alloc_sg(bouncesz / 512, &ret);
+                       ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz);
                        if (ret)
                                goto cleanup_queue;
+                       bounce = true;
                }
        }
 #endif
 
-       if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
+       if (!bounce) {
                blk_queue_bounce_limit(mq->queue, limit);
                blk_queue_max_hw_sectors(mq->queue,
                        min(host->max_blk_count, host->max_req_size / 512));