]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
dm, dax: Make sure dm_dax_flush() is called if device supports it
authorVivek Goyal <vgoyal@redhat.com>
Wed, 26 Jul 2017 13:35:09 +0000 (09:35 -0400)
committerMike Snitzer <snitzer@redhat.com>
Wed, 26 Jul 2017 19:55:44 +0000 (15:55 -0400)
Currently dm_dax_flush() is not being called, even if underlying dax
device supports write cache, because DAXDEV_WRITE_CACHE is not being
propagated up to the DM dax device.

If the underlying dax device supports write cache, set
DAXDEV_WRITE_CACHE on the DM dax device.  This will cause dm_dax_flush()
to be called.

Fixes: abebfbe2f7 ("dm: add ->flush() dax operation support")
Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Acked-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
drivers/dax/super.c
drivers/md/dm-table.c
include/linux/dax.h

index ce9e563e6e1d47715a776405c3b86b283900a283..938eb4868f7f78c7264cae58b15a0dcc0565d5c4 100644 (file)
@@ -278,6 +278,12 @@ void dax_write_cache(struct dax_device *dax_dev, bool wc)
 }
 EXPORT_SYMBOL_GPL(dax_write_cache);
 
+bool dax_write_cache_enabled(struct dax_device *dax_dev)
+{
+       return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
+
 bool dax_alive(struct dax_device *dax_dev)
 {
        lockdep_assert_held(&dax_srcu);
index a39bcd9b982a443cdfdac2013fcbf5ce3561ac4e..28a4071cdf85e8ae15e8741d8770199a689de6b2 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/atomic.h>
 #include <linux/blk-mq.h>
 #include <linux/mount.h>
+#include <linux/dax.h>
 
 #define DM_MSG_PREFIX "table"
 
@@ -1630,6 +1631,37 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
        return false;
 }
 
+static int device_dax_write_cache_enabled(struct dm_target *ti,
+                                         struct dm_dev *dev, sector_t start,
+                                         sector_t len, void *data)
+{
+       struct dax_device *dax_dev = dev->dax_dev;
+
+       if (!dax_dev)
+               return false;
+
+       if (dax_write_cache_enabled(dax_dev))
+               return true;
+       return false;
+}
+
+static int dm_table_supports_dax_write_cache(struct dm_table *t)
+{
+       struct dm_target *ti;
+       unsigned i;
+
+       for (i = 0; i < dm_table_get_num_targets(t); i++) {
+               ti = dm_table_get_target(t, i);
+
+               if (ti->type->iterate_devices &&
+                   ti->type->iterate_devices(ti,
+                               device_dax_write_cache_enabled, NULL))
+                       return true;
+       }
+
+       return false;
+}
+
 static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
                            sector_t start, sector_t len, void *data)
 {
@@ -1785,6 +1817,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
        }
        blk_queue_write_cache(q, wc, fua);
 
+       if (dm_table_supports_dax_write_cache(t))
+               dax_write_cache(t->md->dax_dev, true);
+
        /* Ensure that all underlying devices are non-rotational. */
        if (dm_table_all_devices_attribute(t, device_is_nonrot))
                queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
index 79481187573288505a8136e8dffbf810cf9088a2..df97b7af7e2c7263c4ae9fb1608b19aad1ae02ba 100644 (file)
@@ -87,6 +87,7 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
 void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
                size_t size);
 void dax_write_cache(struct dax_device *dax_dev, bool wc);
+bool dax_write_cache_enabled(struct dax_device *dax_dev);
 
 /*
  * We use lowest available bit in exceptional entry for locking, one bit for