]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/md/dm-cache-metadata.c
5a026dc24db600b5a4a08f179ecb74d16eff04fc
[karo-tx-linux.git] / drivers / md / dm-cache-metadata.c
1 /*
2  * Copyright (C) 2012 Red Hat, Inc.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm-cache-metadata.h"
8
9 #include "persistent-data/dm-array.h"
10 #include "persistent-data/dm-bitset.h"
11 #include "persistent-data/dm-space-map.h"
12 #include "persistent-data/dm-space-map-disk.h"
13 #include "persistent-data/dm-transaction-manager.h"
14
15 #include <linux/device-mapper.h>
16
17 /*----------------------------------------------------------------*/
18
19 #define DM_MSG_PREFIX   "cache metadata"
20
21 #define CACHE_SUPERBLOCK_MAGIC 06142003
22 #define CACHE_SUPERBLOCK_LOCATION 0
23
24 /*
25  * defines a range of metadata versions that this module can handle.
26  */
27 #define MIN_CACHE_VERSION 1
28 #define MAX_CACHE_VERSION 2
29
30 /*
31  *  3 for btree insert +
32  *  2 for btree lookup used within space map
33  */
34 #define CACHE_MAX_CONCURRENT_LOCKS 5
35 #define SPACE_MAP_ROOT_SIZE 128
36
37 enum superblock_flag_bits {
38         /* for spotting crashes that would invalidate the dirty bitset */
39         CLEAN_SHUTDOWN,
40         /* metadata must be checked using the tools */
41         NEEDS_CHECK,
42 };
43
44 /*
45  * Each mapping from cache block -> origin block carries a set of flags.
46  */
47 enum mapping_bits {
48         /*
49          * A valid mapping.  Because we're using an array we clear this
50          * flag for an non existant mapping.
51          */
52         M_VALID = 1,
53
54         /*
55          * The data on the cache is different from that on the origin.
56          * This flag is only used by metadata format 1.
57          */
58         M_DIRTY = 2
59 };
60
61 struct cache_disk_superblock {
62         __le32 csum;
63         __le32 flags;
64         __le64 blocknr;
65
66         __u8 uuid[16];
67         __le64 magic;
68         __le32 version;
69
70         __u8 policy_name[CACHE_POLICY_NAME_SIZE];
71         __le32 policy_hint_size;
72
73         __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
74         __le64 mapping_root;
75         __le64 hint_root;
76
77         __le64 discard_root;
78         __le64 discard_block_size;
79         __le64 discard_nr_blocks;
80
81         __le32 data_block_size;
82         __le32 metadata_block_size;
83         __le32 cache_blocks;
84
85         __le32 compat_flags;
86         __le32 compat_ro_flags;
87         __le32 incompat_flags;
88
89         __le32 read_hits;
90         __le32 read_misses;
91         __le32 write_hits;
92         __le32 write_misses;
93
94         __le32 policy_version[CACHE_POLICY_VERSION_SIZE];
95
96         /*
97          * Metadata format 2 fields.
98          */
99         __le64 dirty_root;
100 } __packed;
101
102 struct dm_cache_metadata {
103         atomic_t ref_count;
104         struct list_head list;
105
106         unsigned version;
107         struct block_device *bdev;
108         struct dm_block_manager *bm;
109         struct dm_space_map *metadata_sm;
110         struct dm_transaction_manager *tm;
111
112         struct dm_array_info info;
113         struct dm_array_info hint_info;
114         struct dm_disk_bitset discard_info;
115
116         struct rw_semaphore root_lock;
117         unsigned long flags;
118         dm_block_t root;
119         dm_block_t hint_root;
120         dm_block_t discard_root;
121
122         sector_t discard_block_size;
123         dm_dblock_t discard_nr_blocks;
124
125         sector_t data_block_size;
126         dm_cblock_t cache_blocks;
127         bool changed:1;
128         bool clean_when_opened:1;
129
130         char policy_name[CACHE_POLICY_NAME_SIZE];
131         unsigned policy_version[CACHE_POLICY_VERSION_SIZE];
132         size_t policy_hint_size;
133         struct dm_cache_statistics stats;
134
135         /*
136          * Reading the space map root can fail, so we read it into this
137          * buffer before the superblock is locked and updated.
138          */
139         __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
140
141         /*
142          * Set if a transaction has to be aborted but the attempt to roll
143          * back to the previous (good) transaction failed.  The only
144          * metadata operation permissible in this state is the closing of
145          * the device.
146          */
147         bool fail_io:1;
148
149         /*
150          * Metadata format 2 fields.
151          */
152         dm_block_t dirty_root;
153         struct dm_disk_bitset dirty_info;
154
155         /*
156          * These structures are used when loading metadata.  They're too
157          * big to put on the stack.
158          */
159         struct dm_array_cursor mapping_cursor;
160         struct dm_array_cursor hint_cursor;
161         struct dm_bitset_cursor dirty_cursor;
162 };
163
164 /*-------------------------------------------------------------------
165  * superblock validator
166  *-----------------------------------------------------------------*/
167
168 #define SUPERBLOCK_CSUM_XOR 9031977
169
170 static void sb_prepare_for_write(struct dm_block_validator *v,
171                                  struct dm_block *b,
172                                  size_t sb_block_size)
173 {
174         struct cache_disk_superblock *disk_super = dm_block_data(b);
175
176         disk_super->blocknr = cpu_to_le64(dm_block_location(b));
177         disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
178                                                       sb_block_size - sizeof(__le32),
179                                                       SUPERBLOCK_CSUM_XOR));
180 }
181
182 static int check_metadata_version(struct cache_disk_superblock *disk_super)
183 {
184         uint32_t metadata_version = le32_to_cpu(disk_super->version);
185
186         if (metadata_version < MIN_CACHE_VERSION || metadata_version > MAX_CACHE_VERSION) {
187                 DMERR("Cache metadata version %u found, but only versions between %u and %u supported.",
188                       metadata_version, MIN_CACHE_VERSION, MAX_CACHE_VERSION);
189                 return -EINVAL;
190         }
191
192         return 0;
193 }
194
195 static int sb_check(struct dm_block_validator *v,
196                     struct dm_block *b,
197                     size_t sb_block_size)
198 {
199         struct cache_disk_superblock *disk_super = dm_block_data(b);
200         __le32 csum_le;
201
202         if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
203                 DMERR("sb_check failed: blocknr %llu: wanted %llu",
204                       le64_to_cpu(disk_super->blocknr),
205                       (unsigned long long)dm_block_location(b));
206                 return -ENOTBLK;
207         }
208
209         if (le64_to_cpu(disk_super->magic) != CACHE_SUPERBLOCK_MAGIC) {
210                 DMERR("sb_check failed: magic %llu: wanted %llu",
211                       le64_to_cpu(disk_super->magic),
212                       (unsigned long long)CACHE_SUPERBLOCK_MAGIC);
213                 return -EILSEQ;
214         }
215
216         csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
217                                              sb_block_size - sizeof(__le32),
218                                              SUPERBLOCK_CSUM_XOR));
219         if (csum_le != disk_super->csum) {
220                 DMERR("sb_check failed: csum %u: wanted %u",
221                       le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
222                 return -EILSEQ;
223         }
224
225         return check_metadata_version(disk_super);
226 }
227
228 static struct dm_block_validator sb_validator = {
229         .name = "superblock",
230         .prepare_for_write = sb_prepare_for_write,
231         .check = sb_check
232 };
233
234 /*----------------------------------------------------------------*/
235
236 static int superblock_read_lock(struct dm_cache_metadata *cmd,
237                                 struct dm_block **sblock)
238 {
239         return dm_bm_read_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
240                                &sb_validator, sblock);
241 }
242
243 static int superblock_lock_zero(struct dm_cache_metadata *cmd,
244                                 struct dm_block **sblock)
245 {
246         return dm_bm_write_lock_zero(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
247                                      &sb_validator, sblock);
248 }
249
250 static int superblock_lock(struct dm_cache_metadata *cmd,
251                            struct dm_block **sblock)
252 {
253         return dm_bm_write_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
254                                 &sb_validator, sblock);
255 }
256
257 /*----------------------------------------------------------------*/
258
259 static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
260 {
261         int r;
262         unsigned i;
263         struct dm_block *b;
264         __le64 *data_le, zero = cpu_to_le64(0);
265         unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
266
267         /*
268          * We can't use a validator here - it may be all zeroes.
269          */
270         r = dm_bm_read_lock(bm, CACHE_SUPERBLOCK_LOCATION, NULL, &b);
271         if (r)
272                 return r;
273
274         data_le = dm_block_data(b);
275         *result = true;
276         for (i = 0; i < sb_block_size; i++) {
277                 if (data_le[i] != zero) {
278                         *result = false;
279                         break;
280                 }
281         }
282
283         dm_bm_unlock(b);
284
285         return 0;
286 }
287
288 static void __setup_mapping_info(struct dm_cache_metadata *cmd)
289 {
290         struct dm_btree_value_type vt;
291
292         vt.context = NULL;
293         vt.size = sizeof(__le64);
294         vt.inc = NULL;
295         vt.dec = NULL;
296         vt.equal = NULL;
297         dm_array_info_init(&cmd->info, cmd->tm, &vt);
298
299         if (cmd->policy_hint_size) {
300                 vt.size = sizeof(__le32);
301                 dm_array_info_init(&cmd->hint_info, cmd->tm, &vt);
302         }
303 }
304
305 static int __save_sm_root(struct dm_cache_metadata *cmd)
306 {
307         int r;
308         size_t metadata_len;
309
310         r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
311         if (r < 0)
312                 return r;
313
314         return dm_sm_copy_root(cmd->metadata_sm, &cmd->metadata_space_map_root,
315                                metadata_len);
316 }
317
318 static void __copy_sm_root(struct dm_cache_metadata *cmd,
319                            struct cache_disk_superblock *disk_super)
320 {
321         memcpy(&disk_super->metadata_space_map_root,
322                &cmd->metadata_space_map_root,
323                sizeof(cmd->metadata_space_map_root));
324 }
325
326 static bool separate_dirty_bits(struct dm_cache_metadata *cmd)
327 {
328         return cmd->version >= 2;
329 }
330
331 static int __write_initial_superblock(struct dm_cache_metadata *cmd)
332 {
333         int r;
334         struct dm_block *sblock;
335         struct cache_disk_superblock *disk_super;
336         sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT;
337
338         /* FIXME: see if we can lose the max sectors limit */
339         if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
340                 bdev_size = DM_CACHE_METADATA_MAX_SECTORS;
341
342         r = dm_tm_pre_commit(cmd->tm);
343         if (r < 0)
344                 return r;
345
346         /*
347          * dm_sm_copy_root() can fail.  So we need to do it before we start
348          * updating the superblock.
349          */
350         r = __save_sm_root(cmd);
351         if (r)
352                 return r;
353
354         r = superblock_lock_zero(cmd, &sblock);
355         if (r)
356                 return r;
357
358         disk_super = dm_block_data(sblock);
359         disk_super->flags = 0;
360         memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
361         disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
362         disk_super->version = cpu_to_le32(cmd->version);
363         memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
364         memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
365         disk_super->policy_hint_size = 0;
366
367         __copy_sm_root(cmd, disk_super);
368
369         disk_super->mapping_root = cpu_to_le64(cmd->root);
370         disk_super->hint_root = cpu_to_le64(cmd->hint_root);
371         disk_super->discard_root = cpu_to_le64(cmd->discard_root);
372         disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
373         disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
374         disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE);
375         disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);
376         disk_super->cache_blocks = cpu_to_le32(0);
377
378         disk_super->read_hits = cpu_to_le32(0);
379         disk_super->read_misses = cpu_to_le32(0);
380         disk_super->write_hits = cpu_to_le32(0);
381         disk_super->write_misses = cpu_to_le32(0);
382
383         if (separate_dirty_bits(cmd))
384                 disk_super->dirty_root = cpu_to_le64(cmd->dirty_root);
385
386         return dm_tm_commit(cmd->tm, sblock);
387 }
388
389 static int __format_metadata(struct dm_cache_metadata *cmd)
390 {
391         int r;
392
393         r = dm_tm_create_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
394                                  &cmd->tm, &cmd->metadata_sm);
395         if (r < 0) {
396                 DMERR("tm_create_with_sm failed");
397                 return r;
398         }
399
400         __setup_mapping_info(cmd);
401
402         r = dm_array_empty(&cmd->info, &cmd->root);
403         if (r < 0)
404                 goto bad;
405
406         if (separate_dirty_bits(cmd)) {
407                 dm_disk_bitset_init(cmd->tm, &cmd->dirty_info);
408                 r = dm_bitset_empty(&cmd->dirty_info, &cmd->dirty_root);
409                 if (r < 0)
410                         goto bad;
411         }
412
413         dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
414         r = dm_bitset_empty(&cmd->discard_info, &cmd->discard_root);
415         if (r < 0)
416                 goto bad;
417
418         cmd->discard_block_size = 0;
419         cmd->discard_nr_blocks = 0;
420
421         r = __write_initial_superblock(cmd);
422         if (r)
423                 goto bad;
424
425         cmd->clean_when_opened = true;
426         return 0;
427
428 bad:
429         dm_tm_destroy(cmd->tm);
430         dm_sm_destroy(cmd->metadata_sm);
431
432         return r;
433 }
434
435 static int __check_incompat_features(struct cache_disk_superblock *disk_super,
436                                      struct dm_cache_metadata *cmd)
437 {
438         uint32_t incompat_flags, features;
439
440         incompat_flags = le32_to_cpu(disk_super->incompat_flags);
441         features = incompat_flags & ~DM_CACHE_FEATURE_INCOMPAT_SUPP;
442         if (features) {
443                 DMERR("could not access metadata due to unsupported optional features (%lx).",
444                       (unsigned long)features);
445                 return -EINVAL;
446         }
447
448         /*
449          * Check for read-only metadata to skip the following RDWR checks.
450          */
451         if (get_disk_ro(cmd->bdev->bd_disk))
452                 return 0;
453
454         features = le32_to_cpu(disk_super->compat_ro_flags) & ~DM_CACHE_FEATURE_COMPAT_RO_SUPP;
455         if (features) {
456                 DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
457                       (unsigned long)features);
458                 return -EINVAL;
459         }
460
461         return 0;
462 }
463
464 static int __open_metadata(struct dm_cache_metadata *cmd)
465 {
466         int r;
467         struct dm_block *sblock;
468         struct cache_disk_superblock *disk_super;
469         unsigned long sb_flags;
470
471         r = superblock_read_lock(cmd, &sblock);
472         if (r < 0) {
473                 DMERR("couldn't read lock superblock");
474                 return r;
475         }
476
477         disk_super = dm_block_data(sblock);
478
479         /* Verify the data block size hasn't changed */
480         if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
481                 DMERR("changing the data block size (from %u to %llu) is not supported",
482                       le32_to_cpu(disk_super->data_block_size),
483                       (unsigned long long)cmd->data_block_size);
484                 r = -EINVAL;
485                 goto bad;
486         }
487
488         r = __check_incompat_features(disk_super, cmd);
489         if (r < 0)
490                 goto bad;
491
492         r = dm_tm_open_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
493                                disk_super->metadata_space_map_root,
494                                sizeof(disk_super->metadata_space_map_root),
495                                &cmd->tm, &cmd->metadata_sm);
496         if (r < 0) {
497                 DMERR("tm_open_with_sm failed");
498                 goto bad;
499         }
500
501         __setup_mapping_info(cmd);
502         dm_disk_bitset_init(cmd->tm, &cmd->dirty_info);
503         dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
504         sb_flags = le32_to_cpu(disk_super->flags);
505         cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags);
506         dm_bm_unlock(sblock);
507
508         return 0;
509
510 bad:
511         dm_bm_unlock(sblock);
512         return r;
513 }
514
515 static int __open_or_format_metadata(struct dm_cache_metadata *cmd,
516                                      bool format_device)
517 {
518         int r;
519         bool unformatted = false;
520
521         r = __superblock_all_zeroes(cmd->bm, &unformatted);
522         if (r)
523                 return r;
524
525         if (unformatted)
526                 return format_device ? __format_metadata(cmd) : -EPERM;
527
528         return __open_metadata(cmd);
529 }
530
531 static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
532                                             bool may_format_device)
533 {
534         int r;
535         cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
536                                           CACHE_MAX_CONCURRENT_LOCKS);
537         if (IS_ERR(cmd->bm)) {
538                 DMERR("could not create block manager");
539                 return PTR_ERR(cmd->bm);
540         }
541
542         r = __open_or_format_metadata(cmd, may_format_device);
543         if (r)
544                 dm_block_manager_destroy(cmd->bm);
545
546         return r;
547 }
548
549 static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd)
550 {
551         dm_sm_destroy(cmd->metadata_sm);
552         dm_tm_destroy(cmd->tm);
553         dm_block_manager_destroy(cmd->bm);
554 }
555
556 typedef unsigned long (*flags_mutator)(unsigned long);
557
558 static void update_flags(struct cache_disk_superblock *disk_super,
559                          flags_mutator mutator)
560 {
561         uint32_t sb_flags = mutator(le32_to_cpu(disk_super->flags));
562         disk_super->flags = cpu_to_le32(sb_flags);
563 }
564
565 static unsigned long set_clean_shutdown(unsigned long flags)
566 {
567         set_bit(CLEAN_SHUTDOWN, &flags);
568         return flags;
569 }
570
571 static unsigned long clear_clean_shutdown(unsigned long flags)
572 {
573         clear_bit(CLEAN_SHUTDOWN, &flags);
574         return flags;
575 }
576
577 static void read_superblock_fields(struct dm_cache_metadata *cmd,
578                                    struct cache_disk_superblock *disk_super)
579 {
580         cmd->version = le32_to_cpu(disk_super->version);
581         cmd->flags = le32_to_cpu(disk_super->flags);
582         cmd->root = le64_to_cpu(disk_super->mapping_root);
583         cmd->hint_root = le64_to_cpu(disk_super->hint_root);
584         cmd->discard_root = le64_to_cpu(disk_super->discard_root);
585         cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size);
586         cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks));
587         cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
588         cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));
589         strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name));
590         cmd->policy_version[0] = le32_to_cpu(disk_super->policy_version[0]);
591         cmd->policy_version[1] = le32_to_cpu(disk_super->policy_version[1]);
592         cmd->policy_version[2] = le32_to_cpu(disk_super->policy_version[2]);
593         cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size);
594
595         cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits);
596         cmd->stats.read_misses = le32_to_cpu(disk_super->read_misses);
597         cmd->stats.write_hits = le32_to_cpu(disk_super->write_hits);
598         cmd->stats.write_misses = le32_to_cpu(disk_super->write_misses);
599
600         if (separate_dirty_bits(cmd))
601                 cmd->dirty_root = le64_to_cpu(disk_super->dirty_root);
602
603         cmd->changed = false;
604 }
605
606 /*
607  * The mutator updates the superblock flags.
608  */
609 static int __begin_transaction_flags(struct dm_cache_metadata *cmd,
610                                      flags_mutator mutator)
611 {
612         int r;
613         struct cache_disk_superblock *disk_super;
614         struct dm_block *sblock;
615
616         r = superblock_lock(cmd, &sblock);
617         if (r)
618                 return r;
619
620         disk_super = dm_block_data(sblock);
621         update_flags(disk_super, mutator);
622         read_superblock_fields(cmd, disk_super);
623         dm_bm_unlock(sblock);
624
625         return dm_bm_flush(cmd->bm);
626 }
627
628 static int __begin_transaction(struct dm_cache_metadata *cmd)
629 {
630         int r;
631         struct cache_disk_superblock *disk_super;
632         struct dm_block *sblock;
633
634         /*
635          * We re-read the superblock every time.  Shouldn't need to do this
636          * really.
637          */
638         r = superblock_read_lock(cmd, &sblock);
639         if (r)
640                 return r;
641
642         disk_super = dm_block_data(sblock);
643         read_superblock_fields(cmd, disk_super);
644         dm_bm_unlock(sblock);
645
646         return 0;
647 }
648
649 static int __commit_transaction(struct dm_cache_metadata *cmd,
650                                 flags_mutator mutator)
651 {
652         int r;
653         struct cache_disk_superblock *disk_super;
654         struct dm_block *sblock;
655
656         /*
657          * We need to know if the cache_disk_superblock exceeds a 512-byte sector.
658          */
659         BUILD_BUG_ON(sizeof(struct cache_disk_superblock) > 512);
660
661         if (separate_dirty_bits(cmd)) {
662                 r = dm_bitset_flush(&cmd->dirty_info, cmd->dirty_root,
663                                     &cmd->dirty_root);
664                 if (r)
665                         return r;
666         }
667
668         r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root,
669                             &cmd->discard_root);
670         if (r)
671                 return r;
672
673         r = dm_tm_pre_commit(cmd->tm);
674         if (r < 0)
675                 return r;
676
677         r = __save_sm_root(cmd);
678         if (r)
679                 return r;
680
681         r = superblock_lock(cmd, &sblock);
682         if (r)
683                 return r;
684
685         disk_super = dm_block_data(sblock);
686
687         disk_super->flags = cpu_to_le32(cmd->flags);
688         if (mutator)
689                 update_flags(disk_super, mutator);
690
691         disk_super->mapping_root = cpu_to_le64(cmd->root);
692         if (separate_dirty_bits(cmd))
693                 disk_super->dirty_root = cpu_to_le64(cmd->dirty_root);
694         disk_super->hint_root = cpu_to_le64(cmd->hint_root);
695         disk_super->discard_root = cpu_to_le64(cmd->discard_root);
696         disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
697         disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
698         disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));
699         strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));
700         disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
701         disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
702         disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
703
704         disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
705         disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
706         disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits);
707         disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses);
708         __copy_sm_root(cmd, disk_super);
709
710         return dm_tm_commit(cmd->tm, sblock);
711 }
712
713 /*----------------------------------------------------------------*/
714
715 /*
716  * The mappings are held in a dm-array that has 64-bit values stored in
717  * little-endian format.  The index is the cblock, the high 48bits of the
718  * value are the oblock and the low 16 bit the flags.
719  */
720 #define FLAGS_MASK ((1 << 16) - 1)
721
722 static __le64 pack_value(dm_oblock_t block, unsigned flags)
723 {
724         uint64_t value = from_oblock(block);
725         value <<= 16;
726         value = value | (flags & FLAGS_MASK);
727         return cpu_to_le64(value);
728 }
729
730 static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
731 {
732         uint64_t value = le64_to_cpu(value_le);
733         uint64_t b = value >> 16;
734         *block = to_oblock(b);
735         *flags = value & FLAGS_MASK;
736 }
737
738 /*----------------------------------------------------------------*/
739
740 static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
741                                                sector_t data_block_size,
742                                                bool may_format_device,
743                                                size_t policy_hint_size,
744                                                unsigned metadata_version)
745 {
746         int r;
747         struct dm_cache_metadata *cmd;
748
749         cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
750         if (!cmd) {
751                 DMERR("could not allocate metadata struct");
752                 return ERR_PTR(-ENOMEM);
753         }
754
755         cmd->version = metadata_version;
756         atomic_set(&cmd->ref_count, 1);
757         init_rwsem(&cmd->root_lock);
758         cmd->bdev = bdev;
759         cmd->data_block_size = data_block_size;
760         cmd->cache_blocks = 0;
761         cmd->policy_hint_size = policy_hint_size;
762         cmd->changed = true;
763         cmd->fail_io = false;
764
765         r = __create_persistent_data_objects(cmd, may_format_device);
766         if (r) {
767                 kfree(cmd);
768                 return ERR_PTR(r);
769         }
770
771         r = __begin_transaction_flags(cmd, clear_clean_shutdown);
772         if (r < 0) {
773                 dm_cache_metadata_close(cmd);
774                 return ERR_PTR(r);
775         }
776
777         return cmd;
778 }
779
780 /*
781  * We keep a little list of ref counted metadata objects to prevent two
782  * different target instances creating separate bufio instances.  This is
783  * an issue if a table is reloaded before the suspend.
784  */
785 static DEFINE_MUTEX(table_lock);
786 static LIST_HEAD(table);
787
788 static struct dm_cache_metadata *lookup(struct block_device *bdev)
789 {
790         struct dm_cache_metadata *cmd;
791
792         list_for_each_entry(cmd, &table, list)
793                 if (cmd->bdev == bdev) {
794                         atomic_inc(&cmd->ref_count);
795                         return cmd;
796                 }
797
798         return NULL;
799 }
800
801 static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
802                                                 sector_t data_block_size,
803                                                 bool may_format_device,
804                                                 size_t policy_hint_size,
805                                                 unsigned metadata_version)
806 {
807         struct dm_cache_metadata *cmd, *cmd2;
808
809         mutex_lock(&table_lock);
810         cmd = lookup(bdev);
811         mutex_unlock(&table_lock);
812
813         if (cmd)
814                 return cmd;
815
816         cmd = metadata_open(bdev, data_block_size, may_format_device,
817                             policy_hint_size, metadata_version);
818         if (!IS_ERR(cmd)) {
819                 mutex_lock(&table_lock);
820                 cmd2 = lookup(bdev);
821                 if (cmd2) {
822                         mutex_unlock(&table_lock);
823                         __destroy_persistent_data_objects(cmd);
824                         kfree(cmd);
825                         return cmd2;
826                 }
827                 list_add(&cmd->list, &table);
828                 mutex_unlock(&table_lock);
829         }
830
831         return cmd;
832 }
833
834 static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
835 {
836         if (cmd->data_block_size != data_block_size) {
837                 DMERR("data_block_size (%llu) different from that in metadata (%llu)",
838                       (unsigned long long) data_block_size,
839                       (unsigned long long) cmd->data_block_size);
840                 return false;
841         }
842
843         return true;
844 }
845
846 struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
847                                                  sector_t data_block_size,
848                                                  bool may_format_device,
849                                                  size_t policy_hint_size,
850                                                  unsigned metadata_version)
851 {
852         struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, may_format_device,
853                                                        policy_hint_size, metadata_version);
854
855         if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
856                 dm_cache_metadata_close(cmd);
857                 return ERR_PTR(-EINVAL);
858         }
859
860         return cmd;
861 }
862
863 void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
864 {
865         if (atomic_dec_and_test(&cmd->ref_count)) {
866                 mutex_lock(&table_lock);
867                 list_del(&cmd->list);
868                 mutex_unlock(&table_lock);
869
870                 if (!cmd->fail_io)
871                         __destroy_persistent_data_objects(cmd);
872                 kfree(cmd);
873         }
874 }
875
876 /*
877  * Checks that the given cache block is either unmapped or clean.
878  */
879 static int block_clean_combined_dirty(struct dm_cache_metadata *cmd, dm_cblock_t b,
880                                       bool *result)
881 {
882         int r;
883         __le64 value;
884         dm_oblock_t ob;
885         unsigned flags;
886
887         r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value);
888         if (r)
889                 return r;
890
891         unpack_value(value, &ob, &flags);
892         *result = !((flags & M_VALID) && (flags & M_DIRTY));
893
894         return 0;
895 }
896
897 static int blocks_are_clean_combined_dirty(struct dm_cache_metadata *cmd,
898                                            dm_cblock_t begin, dm_cblock_t end,
899                                            bool *result)
900 {
901         int r;
902         *result = true;
903
904         while (begin != end) {
905                 r = block_clean_combined_dirty(cmd, begin, result);
906                 if (r) {
907                         DMERR("block_clean_combined_dirty failed");
908                         return r;
909                 }
910
911                 if (!*result) {
912                         DMERR("cache block %llu is dirty",
913                               (unsigned long long) from_cblock(begin));
914                         return 0;
915                 }
916
917                 begin = to_cblock(from_cblock(begin) + 1);
918         }
919
920         return 0;
921 }
922
923 static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
924                                            dm_cblock_t begin, dm_cblock_t end,
925                                            bool *result)
926 {
927         int r;
928         bool dirty_flag;
929         *result = true;
930
931         r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
932                                    from_cblock(begin), &cmd->dirty_cursor);
933         if (r) {
934                 DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__);
935                 return r;
936         }
937
938         r = dm_bitset_cursor_skip(&cmd->dirty_cursor, from_cblock(begin));
939         if (r) {
940                 DMERR("%s: dm_bitset_cursor_skip for dirty failed", __func__);
941                 dm_bitset_cursor_end(&cmd->dirty_cursor);
942                 return r;
943         }
944
945         while (begin != end) {
946                 /*
947                  * We assume that unmapped blocks have their dirty bit
948                  * cleared.
949                  */
950                 dirty_flag = dm_bitset_cursor_get_value(&cmd->dirty_cursor);
951                 if (dirty_flag) {
952                         DMERR("%s: cache block %llu is dirty", __func__,
953                               (unsigned long long) from_cblock(begin));
954                         dm_bitset_cursor_end(&cmd->dirty_cursor);
955                         *result = false;
956                         return 0;
957                 }
958
959                 r = dm_bitset_cursor_next(&cmd->dirty_cursor);
960                 if (r) {
961                         DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__);
962                         dm_bitset_cursor_end(&cmd->dirty_cursor);
963                         return r;
964                 }
965
966                 begin = to_cblock(from_cblock(begin) + 1);
967         }
968
969         dm_bitset_cursor_end(&cmd->dirty_cursor);
970
971         return 0;
972 }
973
974 static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
975                                         dm_cblock_t begin, dm_cblock_t end,
976                                         bool *result)
977 {
978         if (separate_dirty_bits(cmd))
979                 return blocks_are_clean_separate_dirty(cmd, begin, end, result);
980         else
981                 return blocks_are_clean_combined_dirty(cmd, begin, end, result);
982 }
983
984 static bool cmd_write_lock(struct dm_cache_metadata *cmd)
985 {
986         down_write(&cmd->root_lock);
987         if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
988                 up_write(&cmd->root_lock);
989                 return false;
990         }
991         return true;
992 }
993
994 #define WRITE_LOCK(cmd)                         \
995         do {                                    \
996                 if (!cmd_write_lock((cmd)))     \
997                         return -EINVAL;         \
998         } while(0)
999
1000 #define WRITE_LOCK_VOID(cmd)                    \
1001         do {                                    \
1002                 if (!cmd_write_lock((cmd)))     \
1003                         return;                 \
1004         } while(0)
1005
1006 #define WRITE_UNLOCK(cmd) \
1007         up_write(&(cmd)->root_lock)
1008
1009 static bool cmd_read_lock(struct dm_cache_metadata *cmd)
1010 {
1011         down_read(&cmd->root_lock);
1012         if (cmd->fail_io) {
1013                 up_read(&cmd->root_lock);
1014                 return false;
1015         }
1016         return true;
1017 }
1018
1019 #define READ_LOCK(cmd)                          \
1020         do {                                    \
1021                 if (!cmd_read_lock((cmd)))      \
1022                         return -EINVAL;         \
1023         } while(0)
1024
1025 #define READ_LOCK_VOID(cmd)                     \
1026         do {                                    \
1027                 if (!cmd_read_lock((cmd)))      \
1028                         return;                 \
1029         } while(0)
1030
1031 #define READ_UNLOCK(cmd) \
1032         up_read(&(cmd)->root_lock)
1033
1034 int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
1035 {
1036         int r;
1037         bool clean;
1038         __le64 null_mapping = pack_value(0, 0);
1039
1040         WRITE_LOCK(cmd);
1041         __dm_bless_for_disk(&null_mapping);
1042
1043         if (from_cblock(new_cache_size) < from_cblock(cmd->cache_blocks)) {
1044                 r = blocks_are_unmapped_or_clean(cmd, new_cache_size, cmd->cache_blocks, &clean);
1045                 if (r) {
1046                         __dm_unbless_for_disk(&null_mapping);
1047                         goto out;
1048                 }
1049
1050                 if (!clean) {
1051                         DMERR("unable to shrink cache due to dirty blocks");
1052                         r = -EINVAL;
1053                         __dm_unbless_for_disk(&null_mapping);
1054                         goto out;
1055                 }
1056         }
1057
1058         r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks),
1059                             from_cblock(new_cache_size),
1060                             &null_mapping, &cmd->root);
1061         if (r)
1062                 goto out;
1063
1064         if (separate_dirty_bits(cmd)) {
1065                 r = dm_bitset_resize(&cmd->dirty_info, cmd->dirty_root,
1066                                      from_cblock(cmd->cache_blocks), from_cblock(new_cache_size),
1067                                      false, &cmd->dirty_root);
1068                 if (r)
1069                         goto out;
1070         }
1071
1072         cmd->cache_blocks = new_cache_size;
1073         cmd->changed = true;
1074
1075 out:
1076         WRITE_UNLOCK(cmd);
1077
1078         return r;
1079 }
1080
1081 int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
1082                                    sector_t discard_block_size,
1083                                    dm_dblock_t new_nr_entries)
1084 {
1085         int r;
1086
1087         WRITE_LOCK(cmd);
1088         r = dm_bitset_resize(&cmd->discard_info,
1089                              cmd->discard_root,
1090                              from_dblock(cmd->discard_nr_blocks),
1091                              from_dblock(new_nr_entries),
1092                              false, &cmd->discard_root);
1093         if (!r) {
1094                 cmd->discard_block_size = discard_block_size;
1095                 cmd->discard_nr_blocks = new_nr_entries;
1096         }
1097
1098         cmd->changed = true;
1099         WRITE_UNLOCK(cmd);
1100
1101         return r;
1102 }
1103
1104 static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
1105 {
1106         return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root,
1107                                  from_dblock(b), &cmd->discard_root);
1108 }
1109
1110 static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
1111 {
1112         return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root,
1113                                    from_dblock(b), &cmd->discard_root);
1114 }
1115
1116 static int __discard(struct dm_cache_metadata *cmd,
1117                      dm_dblock_t dblock, bool discard)
1118 {
1119         int r;
1120
1121         r = (discard ? __set_discard : __clear_discard)(cmd, dblock);
1122         if (r)
1123                 return r;
1124
1125         cmd->changed = true;
1126         return 0;
1127 }
1128
1129 int dm_cache_set_discard(struct dm_cache_metadata *cmd,
1130                          dm_dblock_t dblock, bool discard)
1131 {
1132         int r;
1133
1134         WRITE_LOCK(cmd);
1135         r = __discard(cmd, dblock, discard);
1136         WRITE_UNLOCK(cmd);
1137
1138         return r;
1139 }
1140
1141 static int __load_discards(struct dm_cache_metadata *cmd,
1142                            load_discard_fn fn, void *context)
1143 {
1144         int r = 0;
1145         uint32_t b;
1146         struct dm_bitset_cursor c;
1147
1148         if (from_dblock(cmd->discard_nr_blocks) == 0)
1149                 /* nothing to do */
1150                 return 0;
1151
1152         if (cmd->clean_when_opened) {
1153                 r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root, &cmd->discard_root);
1154                 if (r)
1155                         return r;
1156
1157                 r = dm_bitset_cursor_begin(&cmd->discard_info, cmd->discard_root,
1158                                            from_dblock(cmd->discard_nr_blocks), &c);
1159                 if (r)
1160                         return r;
1161
1162                 for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
1163                         r = fn(context, cmd->discard_block_size, to_dblock(b),
1164                                dm_bitset_cursor_get_value(&c));
1165                         if (r)
1166                                 break;
1167                 }
1168
1169                 dm_bitset_cursor_end(&c);
1170
1171         } else {
1172                 for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
1173                         r = fn(context, cmd->discard_block_size, to_dblock(b), false);
1174                         if (r)
1175                                 return r;
1176                 }
1177         }
1178
1179         return r;
1180 }
1181
1182 int dm_cache_load_discards(struct dm_cache_metadata *cmd,
1183                            load_discard_fn fn, void *context)
1184 {
1185         int r;
1186
1187         READ_LOCK(cmd);
1188         r = __load_discards(cmd, fn, context);
1189         READ_UNLOCK(cmd);
1190
1191         return r;
1192 }
1193
1194 int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result)
1195 {
1196         READ_LOCK(cmd);
1197         *result = cmd->cache_blocks;
1198         READ_UNLOCK(cmd);
1199
1200         return 0;
1201 }
1202
1203 static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
1204 {
1205         int r;
1206         __le64 value = pack_value(0, 0);
1207
1208         __dm_bless_for_disk(&value);
1209         r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1210                                &value, &cmd->root);
1211         if (r)
1212                 return r;
1213
1214         cmd->changed = true;
1215         return 0;
1216 }
1217
1218 int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
1219 {
1220         int r;
1221
1222         WRITE_LOCK(cmd);
1223         r = __remove(cmd, cblock);
1224         WRITE_UNLOCK(cmd);
1225
1226         return r;
1227 }
1228
1229 static int __insert(struct dm_cache_metadata *cmd,
1230                     dm_cblock_t cblock, dm_oblock_t oblock)
1231 {
1232         int r;
1233         __le64 value = pack_value(oblock, M_VALID);
1234         __dm_bless_for_disk(&value);
1235
1236         r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1237                                &value, &cmd->root);
1238         if (r)
1239                 return r;
1240
1241         cmd->changed = true;
1242         return 0;
1243 }
1244
1245 int dm_cache_insert_mapping(struct dm_cache_metadata *cmd,
1246                             dm_cblock_t cblock, dm_oblock_t oblock)
1247 {
1248         int r;
1249
1250         WRITE_LOCK(cmd);
1251         r = __insert(cmd, cblock, oblock);
1252         WRITE_UNLOCK(cmd);
1253
1254         return r;
1255 }
1256
1257 struct thunk {
1258         load_mapping_fn fn;
1259         void *context;
1260
1261         struct dm_cache_metadata *cmd;
1262         bool respect_dirty_flags;
1263         bool hints_valid;
1264 };
1265
1266 static bool policy_unchanged(struct dm_cache_metadata *cmd,
1267                              struct dm_cache_policy *policy)
1268 {
1269         const char *policy_name = dm_cache_policy_get_name(policy);
1270         const unsigned *policy_version = dm_cache_policy_get_version(policy);
1271         size_t policy_hint_size = dm_cache_policy_get_hint_size(policy);
1272
1273         /*
1274          * Ensure policy names match.
1275          */
1276         if (strncmp(cmd->policy_name, policy_name, sizeof(cmd->policy_name)))
1277                 return false;
1278
1279         /*
1280          * Ensure policy major versions match.
1281          */
1282         if (cmd->policy_version[0] != policy_version[0])
1283                 return false;
1284
1285         /*
1286          * Ensure policy hint sizes match.
1287          */
1288         if (cmd->policy_hint_size != policy_hint_size)
1289                 return false;
1290
1291         return true;
1292 }
1293
1294 static bool hints_array_initialized(struct dm_cache_metadata *cmd)
1295 {
1296         return cmd->hint_root && cmd->policy_hint_size;
1297 }
1298
1299 static bool hints_array_available(struct dm_cache_metadata *cmd,
1300                                   struct dm_cache_policy *policy)
1301 {
1302         return cmd->clean_when_opened && policy_unchanged(cmd, policy) &&
1303                 hints_array_initialized(cmd);
1304 }
1305
1306 static int __load_mapping_v1(struct dm_cache_metadata *cmd,
1307                              uint64_t cb, bool hints_valid,
1308                              struct dm_array_cursor *mapping_cursor,
1309                              struct dm_array_cursor *hint_cursor,
1310                              load_mapping_fn fn, void *context)
1311 {
1312         int r = 0;
1313
1314         __le64 mapping;
1315         __le32 hint = 0;
1316
1317         __le64 *mapping_value_le;
1318         __le32 *hint_value_le;
1319
1320         dm_oblock_t oblock;
1321         unsigned flags;
1322
1323         dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
1324         memcpy(&mapping, mapping_value_le, sizeof(mapping));
1325         unpack_value(mapping, &oblock, &flags);
1326
1327         if (flags & M_VALID) {
1328                 if (hints_valid) {
1329                         dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
1330                         memcpy(&hint, hint_value_le, sizeof(hint));
1331                 }
1332
1333                 r = fn(context, oblock, to_cblock(cb), flags & M_DIRTY,
1334                        le32_to_cpu(hint), hints_valid);
1335                 if (r) {
1336                         DMERR("policy couldn't load cache block %llu",
1337                               (unsigned long long) from_cblock(to_cblock(cb)));
1338                 }
1339         }
1340
1341         return r;
1342 }
1343
1344 static int __load_mapping_v2(struct dm_cache_metadata *cmd,
1345                              uint64_t cb, bool hints_valid,
1346                              struct dm_array_cursor *mapping_cursor,
1347                              struct dm_array_cursor *hint_cursor,
1348                              struct dm_bitset_cursor *dirty_cursor,
1349                              load_mapping_fn fn, void *context)
1350 {
1351         int r = 0;
1352
1353         __le64 mapping;
1354         __le32 hint = 0;
1355
1356         __le64 *mapping_value_le;
1357         __le32 *hint_value_le;
1358
1359         dm_oblock_t oblock;
1360         unsigned flags;
1361         bool dirty;
1362
1363         dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
1364         memcpy(&mapping, mapping_value_le, sizeof(mapping));
1365         unpack_value(mapping, &oblock, &flags);
1366
1367         if (flags & M_VALID) {
1368                 if (hints_valid) {
1369                         dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
1370                         memcpy(&hint, hint_value_le, sizeof(hint));
1371                 }
1372
1373                 dirty = dm_bitset_cursor_get_value(dirty_cursor);
1374                 r = fn(context, oblock, to_cblock(cb), dirty,
1375                        le32_to_cpu(hint), hints_valid);
1376                 if (r) {
1377                         DMERR("policy couldn't load cache block %llu",
1378                               (unsigned long long) from_cblock(to_cblock(cb)));
1379                 }
1380         }
1381
1382         return r;
1383 }
1384
1385 static int __load_mappings(struct dm_cache_metadata *cmd,
1386                            struct dm_cache_policy *policy,
1387                            load_mapping_fn fn, void *context)
1388 {
1389         int r;
1390         uint64_t cb;
1391
1392         bool hints_valid = hints_array_available(cmd, policy);
1393
1394         if (from_cblock(cmd->cache_blocks) == 0)
1395                 /* Nothing to do */
1396                 return 0;
1397
1398         r = dm_array_cursor_begin(&cmd->info, cmd->root, &cmd->mapping_cursor);
1399         if (r)
1400                 return r;
1401
1402         if (hints_valid) {
1403                 r = dm_array_cursor_begin(&cmd->hint_info, cmd->hint_root, &cmd->hint_cursor);
1404                 if (r) {
1405                         dm_array_cursor_end(&cmd->mapping_cursor);
1406                         return r;
1407                 }
1408         }
1409
1410         if (separate_dirty_bits(cmd)) {
1411                 r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
1412                                            from_cblock(cmd->cache_blocks),
1413                                            &cmd->dirty_cursor);
1414                 if (r) {
1415                         dm_array_cursor_end(&cmd->hint_cursor);
1416                         dm_array_cursor_end(&cmd->mapping_cursor);
1417                         return r;
1418                 }
1419         }
1420
1421         for (cb = 0; ; cb++) {
1422                 if (separate_dirty_bits(cmd))
1423                         r = __load_mapping_v2(cmd, cb, hints_valid,
1424                                               &cmd->mapping_cursor,
1425                                               &cmd->hint_cursor,
1426                                               &cmd->dirty_cursor,
1427                                               fn, context);
1428                 else
1429                         r = __load_mapping_v1(cmd, cb, hints_valid,
1430                                               &cmd->mapping_cursor, &cmd->hint_cursor,
1431                                               fn, context);
1432                 if (r)
1433                         goto out;
1434
1435                 /*
1436                  * We need to break out before we move the cursors.
1437                  */
1438                 if (cb >= (from_cblock(cmd->cache_blocks) - 1))
1439                         break;
1440
1441                 r = dm_array_cursor_next(&cmd->mapping_cursor);
1442                 if (r) {
1443                         DMERR("dm_array_cursor_next for mapping failed");
1444                         goto out;
1445                 }
1446
1447                 if (hints_valid) {
1448                         r = dm_array_cursor_next(&cmd->hint_cursor);
1449                         if (r) {
1450                                 DMERR("dm_array_cursor_next for hint failed");
1451                                 goto out;
1452                         }
1453                 }
1454
1455                 if (separate_dirty_bits(cmd)) {
1456                         r = dm_bitset_cursor_next(&cmd->dirty_cursor);
1457                         if (r) {
1458                                 DMERR("dm_bitset_cursor_next for dirty failed");
1459                                 goto out;
1460                         }
1461                 }
1462         }
1463 out:
1464         dm_array_cursor_end(&cmd->mapping_cursor);
1465         if (hints_valid)
1466                 dm_array_cursor_end(&cmd->hint_cursor);
1467
1468         if (separate_dirty_bits(cmd))
1469                 dm_bitset_cursor_end(&cmd->dirty_cursor);
1470
1471         return r;
1472 }
1473
1474 int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
1475                            struct dm_cache_policy *policy,
1476                            load_mapping_fn fn, void *context)
1477 {
1478         int r;
1479
1480         READ_LOCK(cmd);
1481         r = __load_mappings(cmd, policy, fn, context);
1482         READ_UNLOCK(cmd);
1483
1484         return r;
1485 }
1486
1487 static int __dump_mapping(void *context, uint64_t cblock, void *leaf)
1488 {
1489         int r = 0;
1490         __le64 value;
1491         dm_oblock_t oblock;
1492         unsigned flags;
1493
1494         memcpy(&value, leaf, sizeof(value));
1495         unpack_value(value, &oblock, &flags);
1496
1497         return r;
1498 }
1499
1500 static int __dump_mappings(struct dm_cache_metadata *cmd)
1501 {
1502         return dm_array_walk(&cmd->info, cmd->root, __dump_mapping, NULL);
1503 }
1504
1505 void dm_cache_dump(struct dm_cache_metadata *cmd)
1506 {
1507         READ_LOCK_VOID(cmd);
1508         __dump_mappings(cmd);
1509         READ_UNLOCK(cmd);
1510 }
1511
1512 int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
1513 {
1514         int r;
1515
1516         READ_LOCK(cmd);
1517         r = cmd->changed;
1518         READ_UNLOCK(cmd);
1519
1520         return r;
1521 }
1522
1523 static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty)
1524 {
1525         int r;
1526         unsigned flags;
1527         dm_oblock_t oblock;
1528         __le64 value;
1529
1530         r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(cblock), &value);
1531         if (r)
1532                 return r;
1533
1534         unpack_value(value, &oblock, &flags);
1535
1536         if (((flags & M_DIRTY) && dirty) || (!(flags & M_DIRTY) && !dirty))
1537                 /* nothing to be done */
1538                 return 0;
1539
1540         value = pack_value(oblock, (flags & ~M_DIRTY) | (dirty ? M_DIRTY : 0));
1541         __dm_bless_for_disk(&value);
1542
1543         r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1544                                &value, &cmd->root);
1545         if (r)
1546                 return r;
1547
1548         cmd->changed = true;
1549         return 0;
1550
1551 }
1552
1553 static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned nr_bits, unsigned long *bits)
1554 {
1555         int r;
1556         unsigned i;
1557         for (i = 0; i < nr_bits; i++) {
1558                 r = __dirty(cmd, to_cblock(i), test_bit(i, bits));
1559                 if (r)
1560                         return r;
1561         }
1562
1563         return 0;
1564 }
1565
1566 static int is_dirty_callback(uint32_t index, bool *value, void *context)
1567 {
1568         unsigned long *bits = context;
1569         *value = test_bit(index, bits);
1570         return 0;
1571 }
1572
1573 static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned nr_bits, unsigned long *bits)
1574 {
1575         int r = 0;
1576
1577         /* nr_bits is really just a sanity check */
1578         if (nr_bits != from_cblock(cmd->cache_blocks)) {
1579                 DMERR("dirty bitset is wrong size");
1580                 return -EINVAL;
1581         }
1582
1583         r = dm_bitset_del(&cmd->dirty_info, cmd->dirty_root);
1584         if (r)
1585                 return r;
1586
1587         cmd->changed = true;
1588         return dm_bitset_new(&cmd->dirty_info, &cmd->dirty_root, nr_bits, is_dirty_callback, bits);
1589 }
1590
1591 int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
1592                             unsigned nr_bits,
1593                             unsigned long *bits)
1594 {
1595         int r;
1596
1597         WRITE_LOCK(cmd);
1598         if (separate_dirty_bits(cmd))
1599                 r = __set_dirty_bits_v2(cmd, nr_bits, bits);
1600         else
1601                 r = __set_dirty_bits_v1(cmd, nr_bits, bits);
1602         WRITE_UNLOCK(cmd);
1603
1604         return r;
1605 }
1606
1607 void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
1608                                  struct dm_cache_statistics *stats)
1609 {
1610         READ_LOCK_VOID(cmd);
1611         *stats = cmd->stats;
1612         READ_UNLOCK(cmd);
1613 }
1614
1615 void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
1616                                  struct dm_cache_statistics *stats)
1617 {
1618         WRITE_LOCK_VOID(cmd);
1619         cmd->stats = *stats;
1620         WRITE_UNLOCK(cmd);
1621 }
1622
1623 int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
1624 {
1625         int r;
1626         flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
1627                                  clear_clean_shutdown);
1628
1629         WRITE_LOCK(cmd);
1630         r = __commit_transaction(cmd, mutator);
1631         if (r)
1632                 goto out;
1633
1634         r = __begin_transaction(cmd);
1635
1636 out:
1637         WRITE_UNLOCK(cmd);
1638         return r;
1639 }
1640
1641 int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
1642                                            dm_block_t *result)
1643 {
1644         int r = -EINVAL;
1645
1646         READ_LOCK(cmd);
1647         r = dm_sm_get_nr_free(cmd->metadata_sm, result);
1648         READ_UNLOCK(cmd);
1649
1650         return r;
1651 }
1652
1653 int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
1654                                    dm_block_t *result)
1655 {
1656         int r = -EINVAL;
1657
1658         READ_LOCK(cmd);
1659         r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
1660         READ_UNLOCK(cmd);
1661
1662         return r;
1663 }
1664
1665 /*----------------------------------------------------------------*/
1666
1667 static int get_hint(uint32_t index, void *value_le, void *context)
1668 {
1669         uint32_t value;
1670         struct dm_cache_policy *policy = context;
1671
1672         value = policy_get_hint(policy, to_cblock(index));
1673         *((__le32 *) value_le) = cpu_to_le32(value);
1674
1675         return 0;
1676 }
1677
1678 /*
1679  * It's quicker to always delete the hint array, and recreate with
1680  * dm_array_new().
1681  */
1682 static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
1683 {
1684         int r;
1685         size_t hint_size;
1686         const char *policy_name = dm_cache_policy_get_name(policy);
1687         const unsigned *policy_version = dm_cache_policy_get_version(policy);
1688
1689         if (!policy_name[0] ||
1690             (strlen(policy_name) > sizeof(cmd->policy_name) - 1))
1691                 return -EINVAL;
1692
1693         strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
1694         memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version));
1695
1696         hint_size = dm_cache_policy_get_hint_size(policy);
1697         if (!hint_size)
1698                 return 0; /* short-circuit hints initialization */
1699         cmd->policy_hint_size = hint_size;
1700
1701         if (cmd->hint_root) {
1702                 r = dm_array_del(&cmd->hint_info, cmd->hint_root);
1703                 if (r)
1704                         return r;
1705         }
1706
1707         return dm_array_new(&cmd->hint_info, &cmd->hint_root,
1708                             from_cblock(cmd->cache_blocks),
1709                             get_hint, policy);
1710 }
1711
1712 int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
1713 {
1714         int r;
1715
1716         WRITE_LOCK(cmd);
1717         r = write_hints(cmd, policy);
1718         WRITE_UNLOCK(cmd);
1719
1720         return r;
1721 }
1722
1723 int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
1724 {
1725         int r;
1726
1727         READ_LOCK(cmd);
1728         r = blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
1729         READ_UNLOCK(cmd);
1730
1731         return r;
1732 }
1733
1734 void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
1735 {
1736         WRITE_LOCK_VOID(cmd);
1737         dm_bm_set_read_only(cmd->bm);
1738         WRITE_UNLOCK(cmd);
1739 }
1740
1741 void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd)
1742 {
1743         WRITE_LOCK_VOID(cmd);
1744         dm_bm_set_read_write(cmd->bm);
1745         WRITE_UNLOCK(cmd);
1746 }
1747
1748 int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
1749 {
1750         int r;
1751         struct dm_block *sblock;
1752         struct cache_disk_superblock *disk_super;
1753
1754         WRITE_LOCK(cmd);
1755         set_bit(NEEDS_CHECK, &cmd->flags);
1756
1757         r = superblock_lock(cmd, &sblock);
1758         if (r) {
1759                 DMERR("couldn't read superblock");
1760                 goto out;
1761         }
1762
1763         disk_super = dm_block_data(sblock);
1764         disk_super->flags = cpu_to_le32(cmd->flags);
1765
1766         dm_bm_unlock(sblock);
1767
1768 out:
1769         WRITE_UNLOCK(cmd);
1770         return r;
1771 }
1772
1773 int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
1774 {
1775         READ_LOCK(cmd);
1776         *result = !!test_bit(NEEDS_CHECK, &cmd->flags);
1777         READ_UNLOCK(cmd);
1778
1779         return 0;
1780 }
1781
1782 int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
1783 {
1784         int r;
1785
1786         WRITE_LOCK(cmd);
1787         __destroy_persistent_data_objects(cmd);
1788         r = __create_persistent_data_objects(cmd, false);
1789         if (r)
1790                 cmd->fail_io = true;
1791         WRITE_UNLOCK(cmd);
1792
1793         return r;
1794 }