]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge tag 'for-4.12/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 6 May 2017 02:31:06 +0000 (19:31 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 6 May 2017 02:31:06 +0000 (19:31 -0700)
Pull device mapper fixes from Mike Snitzer:

 - DM cache metadata fixes to short-circuit operations that require the
   metadata not be in 'fail_io' mode. Otherwise crashes are possible.

 - a DM cache fix to address the inability to adapt to continuous IO
   that happened to also reflect a changing working set (which required
   old blocks be demoted before the new working set could be promoted)

 - a DM cache smq policy cleanup that fell out from reviewing the above

 - fix the Kconfig help text for CONFIG_DM_INTEGRITY

* tag 'for-4.12/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm cache metadata: fail operations if fail_io mode has been established
  dm integrity: improve the Kconfig help text for DM_INTEGRITY
  dm cache policy smq: cleanup free_target_met() and clean_target_met()
  dm cache policy smq: allow demotions to happen even during continuous IO

drivers/md/Kconfig
drivers/md/dm-cache-metadata.c
drivers/md/dm-cache-policy-smq.c

index 349ff88134016661b09a119bf9fa57062b52846d..906103c168ea30ca3062b7cd78d2423587f54ba4 100644 (file)
@@ -503,13 +503,24 @@ config DM_LOG_WRITES
          If unsure, say N.
 
 config DM_INTEGRITY
-       tristate "Integrity target"
+       tristate "Integrity target support"
        depends on BLK_DEV_DM
        select BLK_DEV_INTEGRITY
        select DM_BUFIO
        select CRYPTO
        select ASYNC_XOR
        ---help---
-          This is the integrity target.
+         This device-mapper target emulates a block device that has
+         additional per-sector tags that can be used for storing
+         integrity information.
+
+         This integrity target is used with the dm-crypt target to
+         provide authenticated disk encryption or it can be used
+         standalone.
+
+         To compile this code as a module, choose M here: the module will
+         be called dm-integrity.
+
+         If unsure, say N.
 
 endif # MD
index 8568dbd50ba402f23e0d5870e6c7d07737e530be..4a4e9c75fc4cddca59c30df2f18ebaa6f0baaa61 100644 (file)
@@ -1624,17 +1624,19 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
 
 int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
 {
-       int r;
+       int r = -EINVAL;
        flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
                                 clear_clean_shutdown);
 
        WRITE_LOCK(cmd);
+       if (cmd->fail_io)
+               goto out;
+
        r = __commit_transaction(cmd, mutator);
        if (r)
                goto out;
 
        r = __begin_transaction(cmd);
-
 out:
        WRITE_UNLOCK(cmd);
        return r;
@@ -1646,7 +1648,8 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
        int r = -EINVAL;
 
        READ_LOCK(cmd);
-       r = dm_sm_get_nr_free(cmd->metadata_sm, result);
+       if (!cmd->fail_io)
+               r = dm_sm_get_nr_free(cmd->metadata_sm, result);
        READ_UNLOCK(cmd);
 
        return r;
@@ -1658,7 +1661,8 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
        int r = -EINVAL;
 
        READ_LOCK(cmd);
-       r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
+       if (!cmd->fail_io)
+               r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
        READ_UNLOCK(cmd);
 
        return r;
index e0c40aec5e964eb1428f53ee5e84814d7ba9b75a..72479bd61e118e51c4bada036901ebe1bc4b1091 100644 (file)
@@ -1120,28 +1120,30 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
         * Cache entries may not be populated.  So we cannot rely on the
         * size of the clean queue.
         */
-       unsigned nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty);
+       unsigned nr_clean;
 
-       if (idle)
+       if (idle) {
                /*
                 * We'd like to clean everything.
                 */
                return q_size(&mq->dirty) == 0u;
-       else
-               return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >=
-                      percent_to_target(mq, CLEAN_TARGET);
+       }
+
+       nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty);
+       return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >=
+               percent_to_target(mq, CLEAN_TARGET);
 }
 
 static bool free_target_met(struct smq_policy *mq, bool idle)
 {
-       unsigned nr_free = from_cblock(mq->cache_size) -
-                          mq->cache_alloc.nr_allocated;
+       unsigned nr_free;
 
-       if (idle)
-               return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
-                      percent_to_target(mq, FREE_TARGET);
-       else
+       if (!idle)
                return true;
+
+       nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
+       return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
+               percent_to_target(mq, FREE_TARGET);
 }
 
 /*----------------------------------------------------------------*/
@@ -1214,7 +1216,11 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
                return;
 
        if (allocator_empty(&mq->cache_alloc)) {
-               if (!free_target_met(mq, false))
+               /*
+                * We always claim to be 'idle' to ensure some demotions happen
+                * with continuous loads.
+                */
+               if (!free_target_met(mq, true))
                        queue_demotion(mq);
                return;
        }