]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/md/dm-table.c
dm: do not allocate any mempools for blk-mq request-based DM
[karo-tx-linux.git] / drivers / md / dm-table.c
index 6554d9148927771296e52cc2882f9e6f7fa68145..85e1d39e9a38a568b7c05c1e4d49eda2e9b118fe 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/mutex.h>
 #include <linux/delay.h>
 #include <linux/atomic.h>
+#include <linux/blk-mq.h>
+#include <linux/mount.h>
 
 #define DM_MSG_PREFIX "table"
 
@@ -372,23 +374,18 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
        int r;
        dev_t uninitialized_var(dev);
        struct dm_dev_internal *dd;
-       unsigned int major, minor;
        struct dm_table *t = ti->table;
-       char dummy;
+       struct block_device *bdev;
 
        BUG_ON(!t);
 
-       if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
-               /* Extract the major/minor numbers */
-               dev = MKDEV(major, minor);
-               if (MAJOR(dev) != major || MINOR(dev) != minor)
-                       return -EOVERFLOW;
+       /* convert the path to a device */
+       bdev = lookup_bdev(path);
+       if (IS_ERR(bdev)) {
+               dev = name_to_dev_t(path);
+               if (!dev)
+                       return -ENODEV;
        } else {
-               /* convert the path to a device */
-               struct block_device *bdev = lookup_bdev(path);
-
-               if (IS_ERR(bdev))
-                       return PTR_ERR(bdev);
                dev = bdev->bd_dev;
                bdput(bdev);
        }
@@ -823,6 +820,12 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
 }
 EXPORT_SYMBOL(dm_consume_args);
 
+static bool __table_type_request_based(unsigned table_type)
+{
+       return (table_type == DM_TYPE_REQUEST_BASED ||
+               table_type == DM_TYPE_MQ_REQUEST_BASED);
+}
+
 static int dm_table_set_type(struct dm_table *t)
 {
        unsigned i;
@@ -855,8 +858,7 @@ static int dm_table_set_type(struct dm_table *t)
                 * Determine the type from the live device.
                 * Default to bio-based if device is new.
                 */
-               if (live_md_type == DM_TYPE_REQUEST_BASED ||
-                   live_md_type == DM_TYPE_MQ_REQUEST_BASED)
+               if (__table_type_request_based(live_md_type))
                        request_based = 1;
                else
                        bio_based = 1;
@@ -906,7 +908,7 @@ static int dm_table_set_type(struct dm_table *t)
                        }
                t->type = DM_TYPE_MQ_REQUEST_BASED;
 
-       } else if (hybrid && list_empty(devices) && live_md_type != DM_TYPE_NONE) {
+       } else if (list_empty(devices) && __table_type_request_based(live_md_type)) {
                /* inherit live MD type */
                t->type = live_md_type;
 
@@ -928,10 +930,7 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
 
 bool dm_table_request_based(struct dm_table *t)
 {
-       unsigned table_type = dm_table_get_type(t);
-
-       return (table_type == DM_TYPE_REQUEST_BASED ||
-               table_type == DM_TYPE_MQ_REQUEST_BASED);
+       return __table_type_request_based(dm_table_get_type(t));
 }
 
 bool dm_table_mq_request_based(struct dm_table *t)
@@ -939,27 +938,34 @@ bool dm_table_mq_request_based(struct dm_table *t)
        return dm_table_get_type(t) == DM_TYPE_MQ_REQUEST_BASED;
 }
 
-static int dm_table_alloc_md_mempools(struct dm_table *t)
+static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
 {
        unsigned type = dm_table_get_type(t);
        unsigned per_bio_data_size = 0;
-       struct dm_target *tgt;
        unsigned i;
 
-       if (unlikely(type == DM_TYPE_NONE)) {
+       switch (type) {
+       case DM_TYPE_BIO_BASED:
+               for (i = 0; i < t->num_targets; i++) {
+                       struct dm_target *tgt = t->targets + i;
+
+                       per_bio_data_size = max(per_bio_data_size,
+                                               tgt->per_bio_data_size);
+               }
+               t->mempools = dm_alloc_bio_mempools(t->integrity_supported,
+                                                   per_bio_data_size);
+               break;
+       case DM_TYPE_REQUEST_BASED:
+       case DM_TYPE_MQ_REQUEST_BASED:
+               t->mempools = dm_alloc_rq_mempools(md, type);
+               break;
+       default:
                DMWARN("no table type is set, can't allocate mempools");
                return -EINVAL;
        }
 
-       if (type == DM_TYPE_BIO_BASED)
-               for (i = 0; i < t->num_targets; i++) {
-                       tgt = t->targets + i;
-                       per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size);
-               }
-
-       t->mempools = dm_alloc_md_mempools(type, t->integrity_supported, per_bio_data_size);
-       if (!t->mempools)
-               return -ENOMEM;
+       if (IS_ERR(t->mempools))
+               return PTR_ERR(t->mempools);
 
        return 0;
 }
@@ -1127,7 +1133,7 @@ int dm_table_complete(struct dm_table *t)
                return r;
        }
 
-       r = dm_table_alloc_md_mempools(t);
+       r = dm_table_alloc_md_mempools(t, t->md);
        if (r)
                DMERR("unable to allocate mempools");
 
@@ -1339,14 +1345,14 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
                        continue;
 
                if (ti->flush_supported)
-                       return 1;
+                       return true;
 
                if (ti->type->iterate_devices &&
                    ti->type->iterate_devices(ti, device_flush_capable, &flush))
-                       return 1;
+                       return true;
        }
 
-       return 0;
+       return false;
 }
 
 static bool dm_table_discard_zeroes_data(struct dm_table *t)
@@ -1359,10 +1365,10 @@ static bool dm_table_discard_zeroes_data(struct dm_table *t)
                ti = dm_table_get_target(t, i++);
 
                if (ti->discard_zeroes_data_unsupported)
-                       return 0;
+                       return false;
        }
 
-       return 1;
+       return true;
 }
 
 static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
@@ -1408,10 +1414,10 @@ static bool dm_table_all_devices_attribute(struct dm_table *t,
 
                if (!ti->type->iterate_devices ||
                    !ti->type->iterate_devices(ti, func, NULL))
-                       return 0;
+                       return false;
        }
 
-       return 1;
+       return true;
 }
 
 static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
@@ -1468,14 +1474,14 @@ static bool dm_table_supports_discards(struct dm_table *t)
                        continue;
 
                if (ti->discards_supported)
-                       return 1;
+                       return true;
 
                if (ti->type->iterate_devices &&
                    ti->type->iterate_devices(ti, device_discard_capable, NULL))
-                       return 1;
+                       return true;
        }
 
-       return 0;
+       return false;
 }
 
 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
@@ -1677,20 +1683,6 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
        return r;
 }
 
-int dm_table_any_busy_target(struct dm_table *t)
-{
-       unsigned i;
-       struct dm_target *ti;
-
-       for (i = 0; i < t->num_targets; i++) {
-               ti = t->targets + i;
-               if (ti->type->busy && ti->type->busy(ti))
-                       return 1;
-       }
-
-       return 0;
-}
-
 struct mapped_device *dm_table_get_md(struct dm_table *t)
 {
        return t->md;
@@ -1709,9 +1701,13 @@ void dm_table_run_md_queue_async(struct dm_table *t)
        md = dm_table_get_md(t);
        queue = dm_get_md_queue(md);
        if (queue) {
-               spin_lock_irqsave(queue->queue_lock, flags);
-               blk_run_queue_async(queue);
-               spin_unlock_irqrestore(queue->queue_lock, flags);
+               if (queue->mq_ops)
+                       blk_mq_run_hw_queues(queue, true);
+               else {
+                       spin_lock_irqsave(queue->queue_lock, flags);
+                       blk_run_queue_async(queue);
+                       spin_unlock_irqrestore(queue->queue_lock, flags);
+               }
        }
 }
 EXPORT_SYMBOL(dm_table_run_md_queue_async);