]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
block: don't bother with bounce limits for make_request drivers
authorChristoph Hellwig <hch@lst.de>
Mon, 19 Jun 2017 07:26:23 +0000 (09:26 +0200)
committerJens Axboe <axboe@kernel.dk>
Tue, 27 Jun 2017 18:13:45 +0000 (12:13 -0600)
We only call blk_queue_bounce for request-based drivers, so stop messing
with it for make_request based drivers.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-core.c
block/blk-mq.c
block/blk-settings.c
drivers/block/brd.c
drivers/block/drbd/drbd_main.c
drivers/block/rsxx/dev.c
drivers/nvdimm/blk.c
drivers/nvdimm/btt.c
drivers/nvdimm/pmem.c

index af393d5a96807c6c59ce45a031c14042b72b5e6a..8699c423fa6e18b5c410deebef7c2d498a179247 100644 (file)
@@ -989,6 +989,11 @@ int blk_init_allocated_queue(struct request_queue *q)
         */
        blk_queue_make_request(q, blk_queue_bio);
 
+       /*
+        * by default assume old behaviour and bounce for any highmem page
+        */
+       blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
+
        q->sg_reserved_size = INT_MAX;
 
        /* Protect q->elevator from elevator_change */
index 05dfa3f270ae6a9f3cbb767db61adfb393080bd6..41e3aeb51c9a9c8332418f0d76063cbebfa34261 100644 (file)
@@ -2349,6 +2349,11 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 
        blk_queue_make_request(q, blk_mq_make_request);
 
+       /*
+        * by default assume old behaviour and bounce for any highmem page
+        */
+       blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
+
        /*
         * Do this after blk_queue_make_request() overrides it...
         */
index 4fa81ed383cab4ec1f543fa33e0536cbac8d7540..be1f115b538b3f874a8db5fd4eef000621821ad6 100644 (file)
@@ -172,11 +172,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
        q->nr_batching = BLK_BATCH_REQ;
 
        blk_set_default_limits(&q->limits);
-
-       /*
-        * by default assume old behaviour and bounce for any highmem page
-        */
-       blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
 }
 EXPORT_SYMBOL(blk_queue_make_request);
 
index 57b574f2f66a9d75a86f56a187e0fc9273b7c593..6112e99bedf7bde65a0d698e69317165ecbc937c 100644 (file)
@@ -418,7 +418,6 @@ static struct brd_device *brd_alloc(int i)
 
        blk_queue_make_request(brd->brd_queue, brd_make_request);
        blk_queue_max_hw_sectors(brd->brd_queue, 1024);
-       blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
 
        /* This is so fdisk will align partitions on 4k, because of
         * direct_access API needing 4k alignment, returning a PFN
index 90680034ef57f94d56d4a4fa533cc3be9e0cd596..5fb99e06ebe44238054f35bf3521da9301881028 100644 (file)
@@ -2850,7 +2850,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
        /* Setting the max_hw_sectors to an odd value of 8kibyte here
           This triggers a max_bio_size message upon first attach or connect */
        blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
-       blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
        q->queue_lock = &resource->req_lock;
 
        device->md_io.page = alloc_page(GFP_KERNEL);
index 4e8bdfa0aa317fcaa618f15b1de988a041e94c1b..7f4acebf46571d5cb5d8cbb2ce281ab5dbe14e30 100644 (file)
@@ -284,7 +284,6 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
        }
 
        blk_queue_make_request(card->queue, rsxx_make_request);
-       blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY);
        blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
        blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
 
index 79eb9fb358d5315c677d580572866518b0c74f57..f12d23c49771ca95d8fdf31949ec5be2805986c6 100644 (file)
@@ -273,7 +273,6 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
 
        blk_queue_make_request(q, nd_blk_make_request);
        blk_queue_max_hw_sectors(q, UINT_MAX);
-       blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
        blk_queue_logical_block_size(q, nsblk_sector_size(nsblk));
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
        q->queuedata = nsblk;
index 31b2d14e210d5ee139c0e934ce3a9d4de021a9a0..b6ba0618ea46736fefb053bc067adb1e4f7d726f 100644 (file)
@@ -1297,7 +1297,6 @@ static int btt_blk_init(struct btt *btt)
        blk_queue_make_request(btt->btt_queue, btt_make_request);
        blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
        blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
-       blk_queue_bounce_limit(btt->btt_queue, BLK_BOUNCE_ANY);
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
        btt->btt_queue->queuedata = btt;
 
index 7bd383aeea14df8c95254aa3b440a81319d72461..6b577afb1d4494d33a1cc54baa2dadf47b700fda 100644 (file)
@@ -343,7 +343,6 @@ static int pmem_attach_disk(struct device *dev,
        blk_queue_make_request(q, pmem_make_request);
        blk_queue_physical_block_size(q, PAGE_SIZE);
        blk_queue_max_hw_sectors(q, UINT_MAX);
-       blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
        queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
        q->queuedata = pmem;