]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'device-mapper/for-next'
authorStephen Rothwell <sfr@canb.auug.org.au>
Thu, 5 Nov 2015 01:22:28 +0000 (12:22 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 5 Nov 2015 01:22:28 +0000 (12:22 +1100)
35 files changed:
Documentation/device-mapper/delay.txt
drivers/md/dm-bufio.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-cache-policy-cleaner.c
drivers/md/dm-cache-policy-mq.c
drivers/md/dm-cache-policy-smq.c
drivers/md/dm-cache-target.c
drivers/md/dm-crypt.c
drivers/md/dm-delay.c
drivers/md/dm-era-target.c
drivers/md/dm-exception-store.c
drivers/md/dm-flakey.c
drivers/md/dm-io.c
drivers/md/dm-linear.c
drivers/md/dm-log-userspace-base.c
drivers/md/dm-log-writes.c
drivers/md/dm-mpath.c
drivers/md/dm-region-hash.c
drivers/md/dm-snap-persistent.c
drivers/md/dm-switch.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-verity.c
drivers/md/dm.c
drivers/md/persistent-data/dm-array.c
drivers/md/persistent-data/dm-block-manager.c
drivers/md/persistent-data/dm-block-manager.h
drivers/md/persistent-data/dm-btree-internal.h
drivers/md/persistent-data/dm-btree-remove.c
drivers/md/persistent-data/dm-btree-spine.c
drivers/md/persistent-data/dm-btree.c
drivers/md/persistent-data/dm-space-map-common.c
drivers/md/persistent-data/dm-transaction-manager.c
drivers/md/persistent-data/dm-transaction-manager.h
include/linux/device-mapper.h
include/uapi/linux/dm-ioctl.h

index 15adc55359e524dc43f0dfd4d9cdc6a436233724..a07b5927f4a88be767c146bd4d5ee259eadf3cf2 100644 (file)
@@ -8,6 +8,7 @@ Parameters:
     <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
 
 With separate write parameters, the first set is only used for reads.
+Offsets are specified in sectors.
 Delays are specified in milliseconds.
 
 Example scripts
index 83cc52eaf56d00a77debb4cbdc3f0ef62aaecb97..2dd33085b331da5bee79392dcf30f7e2c734f70b 100644 (file)
@@ -1598,11 +1598,11 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
 
        c->bdev = bdev;
        c->block_size = block_size;
-       c->sectors_per_block_bits = ffs(block_size) - 1 - SECTOR_SHIFT;
-       c->pages_per_block_bits = (ffs(block_size) - 1 >= PAGE_SHIFT) ?
-                                 ffs(block_size) - 1 - PAGE_SHIFT : 0;
-       c->blocks_per_page_bits = (ffs(block_size) - 1 < PAGE_SHIFT ?
-                                 PAGE_SHIFT - (ffs(block_size) - 1) : 0);
+       c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
+       c->pages_per_block_bits = (__ffs(block_size) >= PAGE_SHIFT) ?
+                                 __ffs(block_size) - PAGE_SHIFT : 0;
+       c->blocks_per_page_bits = (__ffs(block_size) < PAGE_SHIFT ?
+                                 PAGE_SHIFT - __ffs(block_size) : 0);
 
        c->aux_size = aux_size;
        c->alloc_callback = alloc_callback;
@@ -1861,12 +1861,8 @@ static void __exit dm_bufio_exit(void)
        cancel_delayed_work_sync(&dm_bufio_work);
        destroy_workqueue(dm_bufio_wq);
 
-       for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++) {
-               struct kmem_cache *kc = dm_bufio_caches[i];
-
-               if (kc)
-                       kmem_cache_destroy(kc);
-       }
+       for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++)
+               kmem_cache_destroy(dm_bufio_caches[i]);
 
        for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
                kfree(dm_bufio_cache_names[i]);
index 0a17d1b91a811d712a350c399d94cdc50322f0d0..f6543f3a970f8daf44f3523e0209fd85a3c13f0b 100644 (file)
@@ -260,7 +260,9 @@ static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
                }
        }
 
-       return dm_bm_unlock(b);
+       dm_bm_unlock(b);
+
+       return 0;
 }
 
 static void __setup_mapping_info(struct dm_cache_metadata *cmd)
@@ -465,7 +467,9 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
        dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
        sb_flags = le32_to_cpu(disk_super->flags);
        cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags);
-       return dm_bm_unlock(sblock);
+       dm_bm_unlock(sblock);
+
+       return 0;
 
 bad:
        dm_bm_unlock(sblock);
index 8a096456579bead67b182f27e65956341a7c8d73..14aaaf059f06a6a8197a8d3eb875abaf671b6305 100644 (file)
@@ -83,7 +83,7 @@ static struct list_head *list_pop(struct list_head *q)
 static int alloc_hash(struct hash *hash, unsigned elts)
 {
        hash->nr_buckets = next_power(elts >> 4, 16);
-       hash->hash_bits = ffs(hash->nr_buckets) - 1;
+       hash->hash_bits = __ffs(hash->nr_buckets);
        hash->table = vzalloc(sizeof(*hash->table) * hash->nr_buckets);
 
        return hash->table ? 0 : -ENOMEM;
index aa1b41ca40f778dcb4e6c0e393ab4ee33d25d388..ddb26980cd669ec81bf00aa1370e71bd6e30e107 100644 (file)
@@ -1410,7 +1410,7 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
        mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
 
        mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
-       mq->hash_bits = ffs(mq->nr_buckets) - 1;
+       mq->hash_bits = __ffs(mq->nr_buckets);
        mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets);
        if (!mq->table)
                goto bad_alloc_table;
index 1ffbeb1b3ea6088fd07bed9c68352821223c0b84..28d4586748d0033e1a2940bd8688b72ba34e84e5 100644 (file)
@@ -566,7 +566,7 @@ static int h_init(struct hash_table *ht, struct entry_space *es, unsigned nr_ent
 
        ht->es = es;
        nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
-       ht->hash_bits = ffs(nr_buckets) - 1;
+       ht->hash_bits = __ffs(nr_buckets);
 
        ht->buckets = vmalloc(sizeof(*ht->buckets) * nr_buckets);
        if (!ht->buckets)
index dd90d1236f4a46be1cf28d7ef8ebc5d64235adf7..2fd4c82961441e08b2b20d8db98bfb2bad86e05e 100644 (file)
@@ -2309,8 +2309,7 @@ static void destroy(struct cache *cache)
 {
        unsigned i;
 
-       if (cache->migration_pool)
-               mempool_destroy(cache->migration_pool);
+       mempool_destroy(cache->migration_pool);
 
        if (cache->all_io_ds)
                dm_deferred_set_destroy(cache->all_io_ds);
index 4b3b6f8aff0cb4112a7bafa2c128027f804468b6..3729b394432c9d66c7c219a8a52ddbf9be987869 100644 (file)
@@ -1544,10 +1544,8 @@ static void crypt_dtr(struct dm_target *ti)
        if (cc->bs)
                bioset_free(cc->bs);
 
-       if (cc->page_pool)
-               mempool_destroy(cc->page_pool);
-       if (cc->req_pool)
-               mempool_destroy(cc->req_pool);
+       mempool_destroy(cc->page_pool);
+       mempool_destroy(cc->req_pool);
 
        if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
                cc->iv_gen_ops->dtr(cc);
index b34f6e27293dcc9e408289c7955af921b4cb8990..b4c356a21123051d1716ca25badb13cfe78b4e26 100644 (file)
@@ -122,6 +122,7 @@ static void flush_expired_bios(struct work_struct *work)
  *    <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
  *
  * With separate write parameters, the first set is only used for reads.
+ * Offsets are specified in sectors.
  * Delays are specified in milliseconds.
  */
 static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
@@ -132,7 +133,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        int ret;
 
        if (argc != 3 && argc != 6) {
-               ti->error = "requires exactly 3 or 6 arguments";
+               ti->error = "Requires exactly 3 or 6 arguments";
                return -EINVAL;
        }
 
@@ -237,7 +238,7 @@ static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
        unsigned long expires = 0;
 
        if (!delay || !atomic_read(&dc->may_delay))
-               return 1;
+               return DM_MAPIO_REMAPPED;
 
        delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
 
@@ -257,7 +258,7 @@ static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
 
        queue_timeout(dc, expires);
 
-       return 0;
+       return DM_MAPIO_SUBMITTED;
 }
 
 static void delay_presuspend(struct dm_target *ti)
index 0119ebfb3d49b4652bd71548af42037aac22953a..665bf32856182e73bb390e3446b8280982bae020 100644 (file)
@@ -343,7 +343,9 @@ static int superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
                }
        }
 
-       return dm_bm_unlock(b);
+       dm_bm_unlock(b);
+
+       return 0;
 }
 
 /*----------------------------------------------------------------*/
@@ -582,7 +584,9 @@ static int open_metadata(struct era_metadata *md)
        md->metadata_snap = le64_to_cpu(disk->metadata_snap);
        md->archived_writesets = true;
 
-       return dm_bm_unlock(sblock);
+       dm_bm_unlock(sblock);
+
+       return 0;
 
 bad:
        dm_bm_unlock(sblock);
@@ -1046,12 +1050,7 @@ static int metadata_take_snap(struct era_metadata *md)
 
        md->metadata_snap = dm_block_location(clone);
 
-       r = dm_tm_unlock(md->tm, clone);
-       if (r) {
-               DMERR("%s: couldn't unlock clone", __func__);
-               md->metadata_snap = SUPERBLOCK_LOCATION;
-               return r;
-       }
+       dm_tm_unlock(md->tm, clone);
 
        return 0;
 }
index 192bb8beeb6b59e296d9a2e06c0ef8c0a9be8aeb..3997f34cfebc62114f33818dd18b1fe8bd08cbb6 100644 (file)
@@ -183,7 +183,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
 
        store->chunk_size = chunk_size;
        store->chunk_mask = chunk_size - 1;
-       store->chunk_shift = ffs(chunk_size) - 1;
+       store->chunk_shift = __ffs(chunk_size);
 
        return 0;
 }
index 645e8b4f808eee3f1f63e2432bf12cf8cbcde961..09e2afcafd2ddc2575d552e14eb4f469505300f2 100644 (file)
@@ -373,20 +373,20 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
        }
 }
 
-static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
+static int flakey_prepare_ioctl(struct dm_target *ti,
+               struct block_device **bdev, fmode_t *mode)
 {
        struct flakey_c *fc = ti->private;
-       struct dm_dev *dev = fc->dev;
-       int r = 0;
+
+       *bdev = fc->dev->bdev;
 
        /*
         * Only pass ioctls through if the device sizes match exactly.
         */
        if (fc->start ||
-           ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
-               r = scsi_verify_blk_ioctl(NULL, cmd);
-
-       return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
+           ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+               return 1;
+       return 0;
 }
 
 static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
@@ -405,7 +405,7 @@ static struct target_type flakey_target = {
        .map    = flakey_map,
        .end_io = flakey_end_io,
        .status = flakey_status,
-       .ioctl  = flakey_ioctl,
+       .prepare_ioctl = flakey_prepare_ioctl,
        .iterate_devices = flakey_iterate_devices,
 };
 
index 6f8e83b2a6f801b3524a7b58598ed79e7bc82bae..81c5e1a1f36389e0d743eedfb6ae895de88340be 100644 (file)
@@ -65,8 +65,7 @@ struct dm_io_client *dm_io_client_create(void)
        return client;
 
    bad:
-       if (client->pool)
-               mempool_destroy(client->pool);
+       mempool_destroy(client->pool);
        kfree(client);
        return ERR_PTR(-ENOMEM);
 }
index 436f5c9b6aea56dd6652696cc3eac489690c1e8e..05c35aacb3aaeadb4cfddd73a14fed73a7c1bbe0 100644 (file)
@@ -39,20 +39,20 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 
        lc = kmalloc(sizeof(*lc), GFP_KERNEL);
        if (lc == NULL) {
-               ti->error = "dm-linear: Cannot allocate linear context";
+               ti->error = "Cannot allocate linear context";
                return -ENOMEM;
        }
 
        ret = -EINVAL;
        if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) {
-               ti->error = "dm-linear: Invalid device sector";
+               ti->error = "Invalid device sector";
                goto bad;
        }
        lc->start = tmp;
 
        ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
        if (ret) {
-               ti->error = "dm-linear: Device lookup failed";
+               ti->error = "Device lookup failed";
                goto bad;
        }
 
@@ -116,21 +116,21 @@ static void linear_status(struct dm_target *ti, status_type_t type,
        }
 }
 
-static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
-                       unsigned long arg)
+static int linear_prepare_ioctl(struct dm_target *ti,
+               struct block_device **bdev, fmode_t *mode)
 {
        struct linear_c *lc = (struct linear_c *) ti->private;
        struct dm_dev *dev = lc->dev;
-       int r = 0;
+
+       *bdev = dev->bdev;
 
        /*
         * Only pass ioctls through if the device sizes match exactly.
         */
        if (lc->start ||
            ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
-               r = scsi_verify_blk_ioctl(NULL, cmd);
-
-       return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
+               return 1;
+       return 0;
 }
 
 static int linear_iterate_devices(struct dm_target *ti,
@@ -149,7 +149,7 @@ static struct target_type linear_target = {
        .dtr    = linear_dtr,
        .map    = linear_map,
        .status = linear_status,
-       .ioctl  = linear_ioctl,
+       .prepare_ioctl = linear_prepare_ioctl,
        .iterate_devices = linear_iterate_devices,
 };
 
index 058256d2eeea8b6094f50190b9a0116137b238b4..53b7b06d0aa8bcad21aa6ccd795cac920b9e02b0 100644 (file)
@@ -313,8 +313,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
 out:
        kfree(devices_rdata);
        if (r) {
-               if (lc->flush_entry_pool)
-                       mempool_destroy(lc->flush_entry_pool);
+               mempool_destroy(lc->flush_entry_pool);
                kfree(lc);
                kfree(ctr_str);
        } else {
index b2912dbac8bce7356f06eebc8063317c6bc411b0..624589d51c2cb67828567ad34b02d283e5965956 100644 (file)
@@ -714,20 +714,19 @@ static void log_writes_status(struct dm_target *ti, status_type_t type,
        }
 }
 
-static int log_writes_ioctl(struct dm_target *ti, unsigned int cmd,
-                           unsigned long arg)
+static int log_writes_prepare_ioctl(struct dm_target *ti,
+               struct block_device **bdev, fmode_t *mode)
 {
        struct log_writes_c *lc = ti->private;
        struct dm_dev *dev = lc->dev;
-       int r = 0;
 
+       *bdev = dev->bdev;
        /*
         * Only pass ioctls through if the device sizes match exactly.
         */
        if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
-               r = scsi_verify_blk_ioctl(NULL, cmd);
-
-       return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
+               return 1;
+       return 0;
 }
 
 static int log_writes_iterate_devices(struct dm_target *ti,
@@ -782,7 +781,7 @@ static struct target_type log_writes_target = {
        .map    = log_writes_map,
        .end_io = normal_end_io,
        .status = log_writes_status,
-       .ioctl  = log_writes_ioctl,
+       .prepare_ioctl = log_writes_prepare_ioctl,
        .message = log_writes_message,
        .iterate_devices = log_writes_iterate_devices,
        .io_hints = log_writes_io_hints,
index 5a67671a3973b576a9bdcc8dabc576448336b9ba..aaa6caa46a9f2dbceaf3d9c1aac92037b3b731b2 100644 (file)
@@ -1533,18 +1533,14 @@ out:
        return r;
 }
 
-static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
-                          unsigned long arg)
+static int multipath_prepare_ioctl(struct dm_target *ti,
+               struct block_device **bdev, fmode_t *mode)
 {
        struct multipath *m = ti->private;
        struct pgpath *pgpath;
-       struct block_device *bdev;
-       fmode_t mode;
        unsigned long flags;
        int r;
 
-       bdev = NULL;
-       mode = 0;
        r = 0;
 
        spin_lock_irqsave(&m->lock, flags);
@@ -1555,26 +1551,17 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
        pgpath = m->current_pgpath;
 
        if (pgpath) {
-               bdev = pgpath->path.dev->bdev;
-               mode = pgpath->path.dev->mode;
+               *bdev = pgpath->path.dev->bdev;
+               *mode = pgpath->path.dev->mode;
        }
 
        if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
                r = -ENOTCONN;
-       else if (!bdev)
+       else if (!*bdev)
                r = -EIO;
 
        spin_unlock_irqrestore(&m->lock, flags);
 
-       /*
-        * Only pass ioctls through if the device sizes match exactly.
-        */
-       if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) {
-               int err = scsi_verify_blk_ioctl(NULL, cmd);
-               if (err)
-                       r = err;
-       }
-
        if (r == -ENOTCONN && !fatal_signal_pending(current)) {
                spin_lock_irqsave(&m->lock, flags);
                if (!m->current_pg) {
@@ -1587,7 +1574,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
                dm_table_run_md_queue_async(m->ti->table);
        }
 
-       return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+       /*
+        * Only pass ioctls through if the device sizes match exactly.
+        */
+       if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+               return 1;
+       return r;
 }
 
 static int multipath_iterate_devices(struct dm_target *ti,
@@ -1690,7 +1682,7 @@ out:
  *---------------------------------------------------------------*/
 static struct target_type multipath_target = {
        .name = "multipath",
-       .version = {1, 9, 0},
+       .version = {1, 10, 0},
        .module = THIS_MODULE,
        .ctr = multipath_ctr,
        .dtr = multipath_dtr,
@@ -1703,7 +1695,7 @@ static struct target_type multipath_target = {
        .resume = multipath_resume,
        .status = multipath_status,
        .message = multipath_message,
-       .ioctl  = multipath_ioctl,
+       .prepare_ioctl = multipath_prepare_ioctl,
        .iterate_devices = multipath_iterate_devices,
        .busy = multipath_busy,
 };
index b929fd5f4984bb67fbb62474e24e5af425758770..74cb7b991d41d80384e3aaf34baf030579cc93d6 100644 (file)
@@ -193,7 +193,7 @@ struct dm_region_hash *dm_region_hash_create(
        rh->max_recovery = max_recovery;
        rh->log = log;
        rh->region_size = region_size;
-       rh->region_shift = ffs(region_size) - 1;
+       rh->region_shift = __ffs(region_size);
        rwlock_init(&rh->hash_lock);
        rh->mask = nr_buckets - 1;
        rh->nr_buckets = nr_buckets;
@@ -249,9 +249,7 @@ void dm_region_hash_destroy(struct dm_region_hash *rh)
        if (rh->log)
                dm_dirty_log_destroy(rh->log);
 
-       if (rh->region_pool)
-               mempool_destroy(rh->region_pool);
-
+       mempool_destroy(rh->region_pool);
        vfree(rh->buckets);
        kfree(rh);
 }
index 117a05e40090a9b78829ed415446d906c8268701..3164b8bce2948591999f429e4633235c1f7b1062 100644 (file)
@@ -322,7 +322,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
                    bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
                                            bdev) >> 9);
                ps->store->chunk_mask = ps->store->chunk_size - 1;
-               ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
+               ps->store->chunk_shift = __ffs(ps->store->chunk_size);
                chunk_size_supplied = 0;
        }
 
index 50fca469cafd92b3dac8c4455391cf24419de7ac..871c18fe000dfae2ef016ad64a5d8c4e0fc04e66 100644 (file)
@@ -99,11 +99,11 @@ static int alloc_region_table(struct dm_target *ti, unsigned nr_paths)
        if (sector_div(nr_regions, sctx->region_size))
                nr_regions++;
 
-       sctx->nr_regions = nr_regions;
-       if (sctx->nr_regions != nr_regions || sctx->nr_regions >= ULONG_MAX) {
+       if (nr_regions >= ULONG_MAX) {
                ti->error = "Region table too large";
                return -EINVAL;
        }
+       sctx->nr_regions = nr_regions;
 
        nr_slots = nr_regions;
        if (sector_div(nr_slots, sctx->region_entries_per_slot))
@@ -511,27 +511,24 @@ static void switch_status(struct dm_target *ti, status_type_t type,
  *
  * Passthrough all ioctls to the path for sector 0
  */
-static int switch_ioctl(struct dm_target *ti, unsigned cmd,
-                       unsigned long arg)
+static int switch_prepare_ioctl(struct dm_target *ti,
+               struct block_device **bdev, fmode_t *mode)
 {
        struct switch_ctx *sctx = ti->private;
-       struct block_device *bdev;
-       fmode_t mode;
        unsigned path_nr;
-       int r = 0;
 
        path_nr = switch_get_path_nr(sctx, 0);
 
-       bdev = sctx->path_list[path_nr].dmdev->bdev;
-       mode = sctx->path_list[path_nr].dmdev->mode;
+       *bdev = sctx->path_list[path_nr].dmdev->bdev;
+       *mode = sctx->path_list[path_nr].dmdev->mode;
 
        /*
         * Only pass ioctls through if the device sizes match exactly.
         */
-       if (ti->len + sctx->path_list[path_nr].start != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
-               r = scsi_verify_blk_ioctl(NULL, cmd);
-
-       return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+       if (ti->len + sctx->path_list[path_nr].start !=
+           i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+               return 1;
+       return 0;
 }
 
 static int switch_iterate_devices(struct dm_target *ti,
@@ -560,7 +557,7 @@ static struct target_type switch_target = {
        .map = switch_map,
        .message = switch_message,
        .status = switch_status,
-       .ioctl = switch_ioctl,
+       .prepare_ioctl = switch_prepare_ioctl,
        .iterate_devices = switch_iterate_devices,
 };
 
index 6ba47cfb1443748ccf092819a6a5ef160bb856fd..1fa45695b68a4eb6ed3db388d30f6e88f06fee21 100644 (file)
@@ -396,7 +396,9 @@ static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
                }
        }
 
-       return dm_bm_unlock(b);
+       dm_bm_unlock(b);
+
+       return 0;
 }
 
 static void __setup_btree_details(struct dm_pool_metadata *pmd)
@@ -650,7 +652,9 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
        }
 
        __setup_btree_details(pmd);
-       return dm_bm_unlock(sblock);
+       dm_bm_unlock(sblock);
+
+       return 0;
 
 bad_cleanup_data_sm:
        dm_sm_destroy(pmd->data_sm);
@@ -1297,7 +1301,9 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd)
        dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
        dm_sm_dec_block(pmd->metadata_sm, held_root);
 
-       return dm_tm_unlock(pmd->tm, copy);
+       dm_tm_unlock(pmd->tm, copy);
+
+       return 0;
 }
 
 int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
@@ -1327,7 +1333,9 @@ static int __get_metadata_snap(struct dm_pool_metadata *pmd,
        disk_super = dm_block_data(sblock);
        *result = le64_to_cpu(disk_super->held_root);
 
-       return dm_bm_unlock(sblock);
+       dm_bm_unlock(sblock);
+
+       return 0;
 }
 
 int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
index edc624bccf9aa841dc76073208735bd2455e12a8..ccf41886ebcf49790fe7fddb4aa7f9e581f0bbe3 100644 (file)
@@ -631,18 +631,17 @@ static void verity_status(struct dm_target *ti, status_type_t type,
        }
 }
 
-static int verity_ioctl(struct dm_target *ti, unsigned cmd,
-                       unsigned long arg)
+static int verity_prepare_ioctl(struct dm_target *ti,
+               struct block_device **bdev, fmode_t *mode)
 {
        struct dm_verity *v = ti->private;
-       int r = 0;
+
+       *bdev = v->data_dev->bdev;
 
        if (v->data_start ||
            ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT)
-               r = scsi_verify_blk_ioctl(NULL, cmd);
-
-       return r ? : __blkdev_driver_ioctl(v->data_dev->bdev, v->data_dev->mode,
-                                    cmd, arg);
+               return 1;
+       return 0;
 }
 
 static int verity_iterate_devices(struct dm_target *ti,
@@ -965,7 +964,7 @@ static struct target_type verity_target = {
        .dtr            = verity_dtr,
        .map            = verity_map,
        .status         = verity_status,
-       .ioctl          = verity_ioctl,
+       .prepare_ioctl  = verity_prepare_ioctl,
        .iterate_devices = verity_iterate_devices,
        .io_hints       = verity_io_hints,
 };
index 485760ebba762f41f4cf526cd880da93848bc47a..32440ad5f6844a7eeef767aed2be4c7727fc036a 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/ktime.h>
 #include <linux/elevator.h> /* for rq_end_sector() */
 #include <linux/blk-mq.h>
+#include <linux/pr.h>
 
 #include <trace/events/block.h>
 
@@ -555,18 +556,16 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
        return dm_get_geometry(md, geo);
 }
 
-static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
-                       unsigned int cmd, unsigned long arg)
+static int dm_get_live_table_for_ioctl(struct mapped_device *md,
+               struct dm_target **tgt, struct block_device **bdev,
+               fmode_t *mode, int *srcu_idx)
 {
-       struct mapped_device *md = bdev->bd_disk->private_data;
-       int srcu_idx;
        struct dm_table *map;
-       struct dm_target *tgt;
-       int r = -ENOTTY;
+       int r;
 
 retry:
-       map = dm_get_live_table(md, &srcu_idx);
-
+       r = -ENOTTY;
+       map = dm_get_live_table(md, srcu_idx);
        if (!map || !dm_table_get_size(map))
                goto out;
 
@@ -574,8 +573,9 @@ retry:
        if (dm_table_get_num_targets(map) != 1)
                goto out;
 
-       tgt = dm_table_get_target(map, 0);
-       if (!tgt->type->ioctl)
+       *tgt = dm_table_get_target(map, 0);
+
+       if (!(*tgt)->type->prepare_ioctl)
                goto out;
 
        if (dm_suspended_md(md)) {
@@ -583,16 +583,46 @@ retry:
                goto out;
        }
 
-       r = tgt->type->ioctl(tgt, cmd, arg);
+       r = (*tgt)->type->prepare_ioctl(*tgt, bdev, mode);
+       if (r < 0)
+               goto out;
 
-out:
-       dm_put_live_table(md, srcu_idx);
+       return r;
 
+out:
+       dm_put_live_table(md, *srcu_idx);
        if (r == -ENOTCONN) {
                msleep(10);
                goto retry;
        }
+       return r;
+}
+
+static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
+                       unsigned int cmd, unsigned long arg)
+{
+       struct mapped_device *md = bdev->bd_disk->private_data;
+       struct dm_target *tgt;
+       int srcu_idx, r;
+
+       r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+       if (r < 0)
+               return r;
 
+       if (r > 0) {
+               /*
+                * Target determined this ioctl is being issued against
+                * a logical partition of the parent bdev; so extra
+                * validation is needed.
+                */
+               r = scsi_verify_blk_ioctl(NULL, cmd);
+               if (r)
+                       goto out;
+       }
+
+       r =  __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+out:
+       dm_put_live_table(md, srcu_idx);
        return r;
 }
 
@@ -1734,8 +1764,6 @@ static void dm_make_request(struct request_queue *q, struct bio *bio)
 
        map = dm_get_live_table(md, &srcu_idx);
 
-       blk_queue_split(q, &bio, q->bio_split);
-
        generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
 
        /* if we're suspended, we have to queue this io for later */
@@ -2198,6 +2226,13 @@ static void dm_init_md_queue(struct mapped_device *md)
         * This queue is new, so no concurrency on the queue_flags.
         */
        queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
+
+       /*
+        * Initialize data that will only be used by a non-blk-mq DM queue
+        * - must do so here (in alloc_dev callchain) before queue is used
+        */
+       md->queue->queuedata = md;
+       md->queue->backing_dev_info.congested_data = md;
 }
 
 static void dm_init_old_md_queue(struct mapped_device *md)
@@ -2208,10 +2243,7 @@ static void dm_init_old_md_queue(struct mapped_device *md)
        /*
         * Initialize aspects of queue that aren't relevant for blk-mq
         */
-       md->queue->queuedata = md;
        md->queue->backing_dev_info.congested_fn = dm_any_congested;
-       md->queue->backing_dev_info.congested_data = md;
-
        blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
 }
 
@@ -2221,10 +2253,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
                destroy_workqueue(md->wq);
        if (md->kworker_task)
                kthread_stop(md->kworker_task);
-       if (md->io_pool)
-               mempool_destroy(md->io_pool);
-       if (md->rq_pool)
-               mempool_destroy(md->rq_pool);
+       mempool_destroy(md->io_pool);
+       mempool_destroy(md->rq_pool);
        if (md->bs)
                bioset_free(md->bs);
 
@@ -2759,6 +2789,12 @@ int dm_setup_md_queue(struct mapped_device *md)
        case DM_TYPE_BIO_BASED:
                dm_init_old_md_queue(md);
                blk_queue_make_request(md->queue, dm_make_request);
+               /*
+                * DM handles splitting bios as needed.  Free the bio_split bioset
+                * since it won't be used (saves 1 process per bio-based DM device).
+                */
+               bioset_free(md->queue->bio_split);
+               md->queue->bio_split = NULL;
                break;
        }
 
@@ -3505,11 +3541,8 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
        if (!pools)
                return;
 
-       if (pools->io_pool)
-               mempool_destroy(pools->io_pool);
-
-       if (pools->rq_pool)
-               mempool_destroy(pools->rq_pool);
+       mempool_destroy(pools->io_pool);
+       mempool_destroy(pools->rq_pool);
 
        if (pools->bs)
                bioset_free(pools->bs);
@@ -3517,11 +3550,133 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
        kfree(pools);
 }
 
+static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
+               u32 flags)
+{
+       struct mapped_device *md = bdev->bd_disk->private_data;
+       const struct pr_ops *ops;
+       struct dm_target *tgt;
+       fmode_t mode;
+       int srcu_idx, r;
+
+       r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+       if (r < 0)
+               return r;
+
+       ops = bdev->bd_disk->fops->pr_ops;
+       if (ops && ops->pr_register)
+               r = ops->pr_register(bdev, old_key, new_key, flags);
+       else
+               r = -EOPNOTSUPP;
+
+       dm_put_live_table(md, srcu_idx);
+       return r;
+}
+
+static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
+               u32 flags)
+{
+       struct mapped_device *md = bdev->bd_disk->private_data;
+       const struct pr_ops *ops;
+       struct dm_target *tgt;
+       fmode_t mode;
+       int srcu_idx, r;
+
+       r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+       if (r < 0)
+               return r;
+
+       ops = bdev->bd_disk->fops->pr_ops;
+       if (ops && ops->pr_reserve)
+               r = ops->pr_reserve(bdev, key, type, flags);
+       else
+               r = -EOPNOTSUPP;
+
+       dm_put_live_table(md, srcu_idx);
+       return r;
+}
+
+static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
+{
+       struct mapped_device *md = bdev->bd_disk->private_data;
+       const struct pr_ops *ops;
+       struct dm_target *tgt;
+       fmode_t mode;
+       int srcu_idx, r;
+
+       r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+       if (r < 0)
+               return r;
+
+       ops = bdev->bd_disk->fops->pr_ops;
+       if (ops && ops->pr_release)
+               r = ops->pr_release(bdev, key, type);
+       else
+               r = -EOPNOTSUPP;
+
+       dm_put_live_table(md, srcu_idx);
+       return r;
+}
+
+static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
+               enum pr_type type, bool abort)
+{
+       struct mapped_device *md = bdev->bd_disk->private_data;
+       const struct pr_ops *ops;
+       struct dm_target *tgt;
+       fmode_t mode;
+       int srcu_idx, r;
+
+       r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+       if (r < 0)
+               return r;
+
+       ops = bdev->bd_disk->fops->pr_ops;
+       if (ops && ops->pr_preempt)
+               r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
+       else
+               r = -EOPNOTSUPP;
+
+       dm_put_live_table(md, srcu_idx);
+       return r;
+}
+
+static int dm_pr_clear(struct block_device *bdev, u64 key)
+{
+       struct mapped_device *md = bdev->bd_disk->private_data;
+       const struct pr_ops *ops;
+       struct dm_target *tgt;
+       fmode_t mode;
+       int srcu_idx, r;
+
+       r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+       if (r < 0)
+               return r;
+
+       ops = bdev->bd_disk->fops->pr_ops;
+       if (ops && ops->pr_clear)
+               r = ops->pr_clear(bdev, key);
+       else
+               r = -EOPNOTSUPP;
+
+       dm_put_live_table(md, srcu_idx);
+       return r;
+}
+
+static const struct pr_ops dm_pr_ops = {
+       .pr_register    = dm_pr_register,
+       .pr_reserve     = dm_pr_reserve,
+       .pr_release     = dm_pr_release,
+       .pr_preempt     = dm_pr_preempt,
+       .pr_clear       = dm_pr_clear,
+};
+
 static const struct block_device_operations dm_blk_dops = {
        .open = dm_blk_open,
        .release = dm_blk_close,
        .ioctl = dm_blk_ioctl,
        .getgeo = dm_blk_getgeo,
+       .pr_ops = &dm_pr_ops,
        .owner = THIS_MODULE
 };
 
index e64b61ad0ef34fc42fdf8b8066bbe71965108493..431a03067d646e43a6fe8c5d99871321c5dba91e 100644 (file)
@@ -233,9 +233,9 @@ static int get_ablock(struct dm_array_info *info, dm_block_t b,
 /*
  * Unlocks an array block.
  */
-static int unlock_ablock(struct dm_array_info *info, struct dm_block *block)
+static void unlock_ablock(struct dm_array_info *info, struct dm_block *block)
 {
-       return dm_tm_unlock(info->btree_info.tm, block);
+       dm_tm_unlock(info->btree_info.tm, block);
 }
 
 /*----------------------------------------------------------------*/
index 88dbe7b97c2c3ca7ace7cbee4b75f80496a0c62a..f2393ba838eb57cfa977228da492814d7dda42af 100644 (file)
@@ -578,7 +578,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm,
 }
 EXPORT_SYMBOL_GPL(dm_bm_write_lock_zero);
 
-int dm_bm_unlock(struct dm_block *b)
+void dm_bm_unlock(struct dm_block *b)
 {
        struct buffer_aux *aux;
        aux = dm_bufio_get_aux_data(to_buffer(b));
@@ -590,8 +590,6 @@ int dm_bm_unlock(struct dm_block *b)
                bl_up_read(&aux->lock);
 
        dm_bufio_release(to_buffer(b));
-
-       return 0;
 }
 EXPORT_SYMBOL_GPL(dm_bm_unlock);
 
index 84330f59886dbc272e894e044e2236296a5119f7..3627d1b7667ad50a935faba440fc3fb62e81876c 100644 (file)
@@ -94,7 +94,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm, dm_block_t b,
                          struct dm_block_validator *v,
                          struct dm_block **result);
 
-int dm_bm_unlock(struct dm_block *b);
+void dm_bm_unlock(struct dm_block *b);
 
 /*
  * It's a common idiom to have a superblock that should be committed last.
index 8731b6ea026bd9b8cfbe2a21bbc06366e509181c..a240990a7f3339047e45a4b45087a8a3d0ca30fe 100644 (file)
@@ -52,7 +52,7 @@ void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
                  struct dm_btree_value_type *vt);
 
 int new_block(struct dm_btree_info *info, struct dm_block **result);
-int unlock_block(struct dm_btree_info *info, struct dm_block *b);
+void unlock_block(struct dm_btree_info *info, struct dm_block *b);
 
 /*
  * Spines keep track of the rolling locks.  There are 2 variants, read-only
index 2e4c4cb79e4d939f0ed2638986d2ccc3ddd8b972..21ea537bd55e9984f7cfe5b908a3d6bcad9038e9 100644 (file)
@@ -165,9 +165,9 @@ static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt
        return 0;
 }
 
-static int exit_child(struct dm_btree_info *info, struct child *c)
+static void exit_child(struct dm_btree_info *info, struct child *c)
 {
-       return dm_tm_unlock(info->tm, c->block);
+       dm_tm_unlock(info->tm, c->block);
 }
 
 static void shift(struct btree_node *left, struct btree_node *right, int count)
@@ -249,13 +249,10 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
 
        __rebalance2(info, parent, &left, &right);
 
-       r = exit_child(info, &left);
-       if (r) {
-               exit_child(info, &right);
-               return r;
-       }
+       exit_child(info, &left);
+       exit_child(info, &right);
 
-       return exit_child(info, &right);
+       return 0;
 }
 
 /*
@@ -394,22 +391,9 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
 
        __rebalance3(info, parent, &left, &center, &right);
 
-       r = exit_child(info, &left);
-       if (r) {
-               exit_child(info, &center);
-               exit_child(info, &right);
-               return r;
-       }
-
-       r = exit_child(info, &center);
-       if (r) {
-               exit_child(info, &right);
-               return r;
-       }
-
-       r = exit_child(info, &right);
-       if (r)
-               return r;
+       exit_child(info, &left);
+       exit_child(info, &center);
+       exit_child(info, &right);
 
        return 0;
 }
@@ -433,9 +417,7 @@ static int rebalance_children(struct shadow_spine *s,
 
                memcpy(n, dm_block_data(child),
                       dm_bm_block_size(dm_tm_get_bm(info->tm)));
-               r = dm_tm_unlock(info->tm, child);
-               if (r)
-                       return r;
+               dm_tm_unlock(info->tm, child);
 
                dm_tm_dec(info->tm, dm_block_location(child));
                return 0;
index 0dee514ba4c5f9e8d34d16e9d239ef333395c7d4..b27b8091a1ca4f649240e074a7bee564285dd30d 100644 (file)
@@ -117,9 +117,9 @@ int new_block(struct dm_btree_info *info, struct dm_block **result)
        return dm_tm_new_block(info->tm, &btree_node_validator, result);
 }
 
-int unlock_block(struct dm_btree_info *info, struct dm_block *b)
+void unlock_block(struct dm_btree_info *info, struct dm_block *b)
 {
-       return dm_tm_unlock(info->tm, b);
+       dm_tm_unlock(info->tm, b);
 }
 
 /*----------------------------------------------------------------*/
@@ -137,9 +137,7 @@ int exit_ro_spine(struct ro_spine *s)
        int r = 0, i;
 
        for (i = 0; i < s->count; i++) {
-               int r2 = unlock_block(s->info, s->nodes[i]);
-               if (r2 < 0)
-                       r = r2;
+               unlock_block(s->info, s->nodes[i]);
        }
 
        return r;
@@ -150,9 +148,7 @@ int ro_step(struct ro_spine *s, dm_block_t new_child)
        int r;
 
        if (s->count == 2) {
-               r = unlock_block(s->info, s->nodes[0]);
-               if (r < 0)
-                       return r;
+               unlock_block(s->info, s->nodes[0]);
                s->nodes[0] = s->nodes[1];
                s->count--;
        }
@@ -194,9 +190,7 @@ int exit_shadow_spine(struct shadow_spine *s)
        int r = 0, i;
 
        for (i = 0; i < s->count; i++) {
-               int r2 = unlock_block(s->info, s->nodes[i]);
-               if (r2 < 0)
-                       r = r2;
+               unlock_block(s->info, s->nodes[i]);
        }
 
        return r;
@@ -208,9 +202,7 @@ int shadow_step(struct shadow_spine *s, dm_block_t b,
        int r;
 
        if (s->count == 2) {
-               r = unlock_block(s->info, s->nodes[0]);
-               if (r < 0)
-                       return r;
+               unlock_block(s->info, s->nodes[0]);
                s->nodes[0] = s->nodes[1];
                s->count--;
        }
index 0e09aef43998ac250cc296122247875eed23ceb2..c573402033b2dd9e89dc11ad2f6e1b7e8bbb4263 100644 (file)
@@ -141,7 +141,9 @@ int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root)
        n->header.value_size = cpu_to_le32(info->value_type.size);
 
        *root = dm_block_location(b);
-       return unlock_block(info, b);
+       unlock_block(info, b);
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(dm_btree_empty);
 
index aacbe70c2c2eb64449bbb9a87264326258f49ac8..306d2e4502c480aef245224a5235c37dc8b94d0a 100644 (file)
@@ -259,9 +259,7 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
 
                idx.blocknr = cpu_to_le64(dm_block_location(b));
 
-               r = dm_tm_unlock(ll->tm, b);
-               if (r < 0)
-                       return r;
+               dm_tm_unlock(ll->tm, b);
 
                idx.nr_free = cpu_to_le32(ll->entries_per_block);
                idx.none_free_before = 0;
@@ -293,7 +291,9 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
 
        *result = sm_lookup_bitmap(dm_bitmap_data(blk), b);
 
-       return dm_tm_unlock(ll->tm, blk);
+       dm_tm_unlock(ll->tm, blk);
+
+       return 0;
 }
 
 static int sm_ll_lookup_big_ref_count(struct ll_disk *ll, dm_block_t b,
@@ -373,9 +373,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
                        return r;
                }
 
-               r = dm_tm_unlock(ll->tm, blk);
-               if (r < 0)
-                       return r;
+               dm_tm_unlock(ll->tm, blk);
 
                *result = i * ll->entries_per_block + (dm_block_t) position;
                return 0;
@@ -429,9 +427,7 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
        if (ref_count <= 2) {
                sm_set_bitmap(bm_le, bit, ref_count);
 
-               r = dm_tm_unlock(ll->tm, nb);
-               if (r < 0)
-                       return r;
+               dm_tm_unlock(ll->tm, nb);
 
                if (old > 2) {
                        r = dm_btree_remove(&ll->ref_count_info,
@@ -445,9 +441,7 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
                __le32 le_rc = cpu_to_le32(ref_count);
 
                sm_set_bitmap(bm_le, bit, 3);
-               r = dm_tm_unlock(ll->tm, nb);
-               if (r < 0)
-                       return r;
+               dm_tm_unlock(ll->tm, nb);
 
                __dm_bless_for_disk(&le_rc);
                r = dm_btree_insert(&ll->ref_count_info, ll->ref_count_root,
@@ -556,7 +550,9 @@ static int metadata_ll_init_index(struct ll_disk *ll)
        memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
        ll->bitmap_root = dm_block_location(b);
 
-       return dm_tm_unlock(ll->tm, b);
+       dm_tm_unlock(ll->tm, b);
+
+       return 0;
 }
 
 static int metadata_ll_open(struct ll_disk *ll)
@@ -570,7 +566,9 @@ static int metadata_ll_open(struct ll_disk *ll)
                return r;
 
        memcpy(&ll->mi_le, dm_block_data(block), sizeof(ll->mi_le));
-       return dm_tm_unlock(ll->tm, block);
+       dm_tm_unlock(ll->tm, block);
+
+       return 0;
 }
 
 static dm_block_t metadata_ll_max_entries(struct ll_disk *ll)
@@ -590,7 +588,9 @@ static int metadata_ll_commit(struct ll_disk *ll)
        memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
        ll->bitmap_root = dm_block_location(b);
 
-       return dm_tm_unlock(ll->tm, b);
+       dm_tm_unlock(ll->tm, b);
+
+       return 0;
 }
 
 int sm_ll_new_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm)
index 9cb797d800cf8de36479130708a4a58e12e64a23..abe2c5dd0993b6083f2e9cebe54ebb1d8de541ac 100644 (file)
@@ -342,9 +342,9 @@ int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
 }
 EXPORT_SYMBOL_GPL(dm_tm_read_lock);
 
-int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
+void dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
 {
-       return dm_bm_unlock(b);
+       dm_bm_unlock(b);
 }
 EXPORT_SYMBOL_GPL(dm_tm_unlock);
 
index 2e0d4d66fb1bafe15d3d70e8d41635320cc36308..f3a18be68f305a58df3a0330409ce0e403c8499b 100644 (file)
@@ -94,7 +94,7 @@ int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
                    struct dm_block_validator *v,
                    struct dm_block **result);
 
-int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b);
+void dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b);
 
 /*
  * Functions for altering the reference count of a block directly.
index 76d23fa8c7d3a21ba294e62be223f5a6992132a8..ec1c61c87d8974897af2be54af23588f14746777 100644 (file)
@@ -79,8 +79,8 @@ typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
 
 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
 
-typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
-                           unsigned long arg);
+typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti,
+                           struct block_device **bdev, fmode_t *mode);
 
 /*
  * These iteration functions are typically used to check (and combine)
@@ -156,7 +156,7 @@ struct target_type {
        dm_resume_fn resume;
        dm_status_fn status;
        dm_message_fn message;
-       dm_ioctl_fn ioctl;
+       dm_prepare_ioctl_fn prepare_ioctl;
        dm_busy_fn busy;
        dm_iterate_devices_fn iterate_devices;
        dm_io_hints_fn io_hints;
index d34611e35a30f10995d19cc4ae80aeb4a9547211..30afd0a23c4b0d3b0c70b5d2778f3988f01d03a2 100644 (file)
@@ -267,9 +267,9 @@ enum {
 #define DM_DEV_SET_GEOMETRY    _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
 
 #define DM_VERSION_MAJOR       4
-#define DM_VERSION_MINOR       33
+#define DM_VERSION_MINOR       34
 #define DM_VERSION_PATCHLEVEL  0
-#define DM_VERSION_EXTRA       "-ioctl (2015-8-18)"
+#define DM_VERSION_EXTRA       "-ioctl (2015-10-28)"
 
 /* Status bits */
 #define DM_READONLY_FLAG       (1 << 0) /* In/Out */