]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/md/dm-cache-target.c
Merge branches 'acpi-processor', 'acpi-hotplug' and 'acpi-battery'
[karo-tx-linux.git] / drivers / md / dm-cache-target.c
1 /*
2  * Copyright (C) 2012 Red Hat. All rights reserved.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm.h"
8 #include "dm-bio-prison.h"
9 #include "dm-bio-record.h"
10 #include "dm-cache-metadata.h"
11
12 #include <linux/dm-io.h>
13 #include <linux/dm-kcopyd.h>
14 #include <linux/init.h>
15 #include <linux/mempool.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19
20 #define DM_MSG_PREFIX "cache"
21
22 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
23         "A percentage of time allocated for copying to and/or from cache");
24
25 /*----------------------------------------------------------------*/
26
27 /*
28  * Glossary:
29  *
30  * oblock: index of an origin block
31  * cblock: index of a cache block
32  * promotion: movement of a block from origin to cache
33  * demotion: movement of a block from cache to origin
34  * migration: movement of a block between the origin and cache device,
35  *            either direction
36  */
37
38 /*----------------------------------------------------------------*/
39
40 static size_t bitset_size_in_bytes(unsigned nr_entries)
41 {
42         return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
43 }
44
45 static unsigned long *alloc_bitset(unsigned nr_entries)
46 {
47         size_t s = bitset_size_in_bytes(nr_entries);
48         return vzalloc(s);
49 }
50
51 static void clear_bitset(void *bitset, unsigned nr_entries)
52 {
53         size_t s = bitset_size_in_bytes(nr_entries);
54         memset(bitset, 0, s);
55 }
56
57 static void free_bitset(unsigned long *bits)
58 {
59         vfree(bits);
60 }
61
62 /*----------------------------------------------------------------*/
63
64 /*
65  * There are a couple of places where we let a bio run, but want to do some
66  * work before calling its endio function.  We do this by temporarily
67  * changing the endio fn.
68  */
69 struct dm_hook_info {
70         bio_end_io_t *bi_end_io;
71         void *bi_private;
72 };
73
74 static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
75                         bio_end_io_t *bi_end_io, void *bi_private)
76 {
77         h->bi_end_io = bio->bi_end_io;
78         h->bi_private = bio->bi_private;
79
80         bio->bi_end_io = bi_end_io;
81         bio->bi_private = bi_private;
82 }
83
84 static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
85 {
86         bio->bi_end_io = h->bi_end_io;
87         bio->bi_private = h->bi_private;
88
89         /*
90          * Must bump bi_remaining to allow bio to complete with
91          * restored bi_end_io.
92          */
93         atomic_inc(&bio->bi_remaining);
94 }
95
96 /*----------------------------------------------------------------*/
97
98 #define PRISON_CELLS 1024
99 #define MIGRATION_POOL_SIZE 128
100 #define COMMIT_PERIOD HZ
101 #define MIGRATION_COUNT_WINDOW 10
102
103 /*
104  * The block size of the device holding cache data must be
105  * between 32KB and 1GB.
106  */
107 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
108 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
109
110 /*
111  * FIXME: the cache is read/write for the time being.
112  */
113 enum cache_metadata_mode {
114         CM_WRITE,               /* metadata may be changed */
115         CM_READ_ONLY,           /* metadata may not be changed */
116 };
117
118 enum cache_io_mode {
119         /*
120          * Data is written to cached blocks only.  These blocks are marked
121          * dirty.  If you lose the cache device you will lose data.
122          * Potential performance increase for both reads and writes.
123          */
124         CM_IO_WRITEBACK,
125
126         /*
127          * Data is written to both cache and origin.  Blocks are never
128          * dirty.  Potential performance benfit for reads only.
129          */
130         CM_IO_WRITETHROUGH,
131
132         /*
133          * A degraded mode useful for various cache coherency situations
134          * (eg, rolling back snapshots).  Reads and writes always go to the
135          * origin.  If a write goes to a cached oblock, then the cache
136          * block is invalidated.
137          */
138         CM_IO_PASSTHROUGH
139 };
140
141 struct cache_features {
142         enum cache_metadata_mode mode;
143         enum cache_io_mode io_mode;
144 };
145
146 struct cache_stats {
147         atomic_t read_hit;
148         atomic_t read_miss;
149         atomic_t write_hit;
150         atomic_t write_miss;
151         atomic_t demotion;
152         atomic_t promotion;
153         atomic_t copies_avoided;
154         atomic_t cache_cell_clash;
155         atomic_t commit_count;
156         atomic_t discard_count;
157 };
158
159 /*
160  * Defines a range of cblocks, begin to (end - 1) are in the range.  end is
161  * the one-past-the-end value.
162  */
163 struct cblock_range {
164         dm_cblock_t begin;
165         dm_cblock_t end;
166 };
167
168 struct invalidation_request {
169         struct list_head list;
170         struct cblock_range *cblocks;
171
172         atomic_t complete;
173         int err;
174
175         wait_queue_head_t result_wait;
176 };
177
178 struct cache {
179         struct dm_target *ti;
180         struct dm_target_callbacks callbacks;
181
182         struct dm_cache_metadata *cmd;
183
184         /*
185          * Metadata is written to this device.
186          */
187         struct dm_dev *metadata_dev;
188
189         /*
190          * The slower of the two data devices.  Typically a spindle.
191          */
192         struct dm_dev *origin_dev;
193
194         /*
195          * The faster of the two data devices.  Typically an SSD.
196          */
197         struct dm_dev *cache_dev;
198
199         /*
200          * Size of the origin device in _complete_ blocks and native sectors.
201          */
202         dm_oblock_t origin_blocks;
203         sector_t origin_sectors;
204
205         /*
206          * Size of the cache device in blocks.
207          */
208         dm_cblock_t cache_size;
209
210         /*
211          * Fields for converting from sectors to blocks.
212          */
213         uint32_t sectors_per_block;
214         int sectors_per_block_shift;
215
216         spinlock_t lock;
217         struct bio_list deferred_bios;
218         struct bio_list deferred_flush_bios;
219         struct bio_list deferred_writethrough_bios;
220         struct list_head quiesced_migrations;
221         struct list_head completed_migrations;
222         struct list_head need_commit_migrations;
223         sector_t migration_threshold;
224         wait_queue_head_t migration_wait;
225         atomic_t nr_migrations;
226
227         wait_queue_head_t quiescing_wait;
228         atomic_t quiescing;
229         atomic_t quiescing_ack;
230
231         /*
232          * cache_size entries, dirty if set
233          */
234         atomic_t nr_dirty;
235         unsigned long *dirty_bitset;
236
237         /*
238          * origin_blocks entries, discarded if set.
239          */
240         dm_oblock_t discard_nr_blocks;
241         unsigned long *discard_bitset;
242
243         /*
244          * Rather than reconstructing the table line for the status we just
245          * save it and regurgitate.
246          */
247         unsigned nr_ctr_args;
248         const char **ctr_args;
249
250         struct dm_kcopyd_client *copier;
251         struct workqueue_struct *wq;
252         struct work_struct worker;
253
254         struct delayed_work waker;
255         unsigned long last_commit_jiffies;
256
257         struct dm_bio_prison *prison;
258         struct dm_deferred_set *all_io_ds;
259
260         mempool_t *migration_pool;
261         struct dm_cache_migration *next_migration;
262
263         struct dm_cache_policy *policy;
264         unsigned policy_nr_args;
265
266         bool need_tick_bio:1;
267         bool sized:1;
268         bool invalidate:1;
269         bool commit_requested:1;
270         bool loaded_mappings:1;
271         bool loaded_discards:1;
272
273         /*
274          * Cache features such as write-through.
275          */
276         struct cache_features features;
277
278         struct cache_stats stats;
279
280         /*
281          * Invalidation fields.
282          */
283         spinlock_t invalidation_lock;
284         struct list_head invalidation_requests;
285 };
286
287 struct per_bio_data {
288         bool tick:1;
289         unsigned req_nr:2;
290         struct dm_deferred_entry *all_io_entry;
291         struct dm_hook_info hook_info;
292
293         /*
294          * writethrough fields.  These MUST remain at the end of this
295          * structure and the 'cache' member must be the first as it
296          * is used to determine the offset of the writethrough fields.
297          */
298         struct cache *cache;
299         dm_cblock_t cblock;
300         struct dm_bio_details bio_details;
301 };
302
303 struct dm_cache_migration {
304         struct list_head list;
305         struct cache *cache;
306
307         unsigned long start_jiffies;
308         dm_oblock_t old_oblock;
309         dm_oblock_t new_oblock;
310         dm_cblock_t cblock;
311
312         bool err:1;
313         bool writeback:1;
314         bool demote:1;
315         bool promote:1;
316         bool requeue_holder:1;
317         bool invalidate:1;
318
319         struct dm_bio_prison_cell *old_ocell;
320         struct dm_bio_prison_cell *new_ocell;
321 };
322
323 /*
324  * Processing a bio in the worker thread may require these memory
325  * allocations.  We prealloc to avoid deadlocks (the same worker thread
326  * frees them back to the mempool).
327  */
328 struct prealloc {
329         struct dm_cache_migration *mg;
330         struct dm_bio_prison_cell *cell1;
331         struct dm_bio_prison_cell *cell2;
332 };
333
334 static void wake_worker(struct cache *cache)
335 {
336         queue_work(cache->wq, &cache->worker);
337 }
338
339 /*----------------------------------------------------------------*/
340
341 static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
342 {
343         /* FIXME: change to use a local slab. */
344         return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
345 }
346
347 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
348 {
349         dm_bio_prison_free_cell(cache->prison, cell);
350 }
351
352 static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
353 {
354         if (!p->mg) {
355                 p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
356                 if (!p->mg)
357                         return -ENOMEM;
358         }
359
360         if (!p->cell1) {
361                 p->cell1 = alloc_prison_cell(cache);
362                 if (!p->cell1)
363                         return -ENOMEM;
364         }
365
366         if (!p->cell2) {
367                 p->cell2 = alloc_prison_cell(cache);
368                 if (!p->cell2)
369                         return -ENOMEM;
370         }
371
372         return 0;
373 }
374
375 static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
376 {
377         if (p->cell2)
378                 free_prison_cell(cache, p->cell2);
379
380         if (p->cell1)
381                 free_prison_cell(cache, p->cell1);
382
383         if (p->mg)
384                 mempool_free(p->mg, cache->migration_pool);
385 }
386
387 static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
388 {
389         struct dm_cache_migration *mg = p->mg;
390
391         BUG_ON(!mg);
392         p->mg = NULL;
393
394         return mg;
395 }
396
397 /*
398  * You must have a cell within the prealloc struct to return.  If not this
399  * function will BUG() rather than returning NULL.
400  */
401 static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
402 {
403         struct dm_bio_prison_cell *r = NULL;
404
405         if (p->cell1) {
406                 r = p->cell1;
407                 p->cell1 = NULL;
408
409         } else if (p->cell2) {
410                 r = p->cell2;
411                 p->cell2 = NULL;
412         } else
413                 BUG();
414
415         return r;
416 }
417
418 /*
419  * You can't have more than two cells in a prealloc struct.  BUG() will be
420  * called if you try and overfill.
421  */
422 static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
423 {
424         if (!p->cell2)
425                 p->cell2 = cell;
426
427         else if (!p->cell1)
428                 p->cell1 = cell;
429
430         else
431                 BUG();
432 }
433
434 /*----------------------------------------------------------------*/
435
436 static void build_key(dm_oblock_t oblock, struct dm_cell_key *key)
437 {
438         key->virtual = 0;
439         key->dev = 0;
440         key->block = from_oblock(oblock);
441 }
442
443 /*
444  * The caller hands in a preallocated cell, and a free function for it.
445  * The cell will be freed if there's an error, or if it wasn't used because
446  * a cell with that key already exists.
447  */
448 typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
449
450 static int bio_detain(struct cache *cache, dm_oblock_t oblock,
451                       struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
452                       cell_free_fn free_fn, void *free_context,
453                       struct dm_bio_prison_cell **cell_result)
454 {
455         int r;
456         struct dm_cell_key key;
457
458         build_key(oblock, &key);
459         r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
460         if (r)
461                 free_fn(free_context, cell_prealloc);
462
463         return r;
464 }
465
466 static int get_cell(struct cache *cache,
467                     dm_oblock_t oblock,
468                     struct prealloc *structs,
469                     struct dm_bio_prison_cell **cell_result)
470 {
471         int r;
472         struct dm_cell_key key;
473         struct dm_bio_prison_cell *cell_prealloc;
474
475         cell_prealloc = prealloc_get_cell(structs);
476
477         build_key(oblock, &key);
478         r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
479         if (r)
480                 prealloc_put_cell(structs, cell_prealloc);
481
482         return r;
483 }
484
485 /*----------------------------------------------------------------*/
486
487 static bool is_dirty(struct cache *cache, dm_cblock_t b)
488 {
489         return test_bit(from_cblock(b), cache->dirty_bitset);
490 }
491
492 static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
493 {
494         if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
495                 atomic_inc(&cache->nr_dirty);
496                 policy_set_dirty(cache->policy, oblock);
497         }
498 }
499
500 static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
501 {
502         if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
503                 policy_clear_dirty(cache->policy, oblock);
504                 if (atomic_dec_return(&cache->nr_dirty) == 0)
505                         dm_table_event(cache->ti->table);
506         }
507 }
508
509 /*----------------------------------------------------------------*/
510
511 static bool block_size_is_power_of_two(struct cache *cache)
512 {
513         return cache->sectors_per_block_shift >= 0;
514 }
515
516 /* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */
517 #if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6
518 __always_inline
519 #endif
520 static dm_block_t block_div(dm_block_t b, uint32_t n)
521 {
522         do_div(b, n);
523
524         return b;
525 }
526
527 static void set_discard(struct cache *cache, dm_oblock_t b)
528 {
529         unsigned long flags;
530
531         atomic_inc(&cache->stats.discard_count);
532
533         spin_lock_irqsave(&cache->lock, flags);
534         set_bit(from_oblock(b), cache->discard_bitset);
535         spin_unlock_irqrestore(&cache->lock, flags);
536 }
537
538 static void clear_discard(struct cache *cache, dm_oblock_t b)
539 {
540         unsigned long flags;
541
542         spin_lock_irqsave(&cache->lock, flags);
543         clear_bit(from_oblock(b), cache->discard_bitset);
544         spin_unlock_irqrestore(&cache->lock, flags);
545 }
546
547 static bool is_discarded(struct cache *cache, dm_oblock_t b)
548 {
549         int r;
550         unsigned long flags;
551
552         spin_lock_irqsave(&cache->lock, flags);
553         r = test_bit(from_oblock(b), cache->discard_bitset);
554         spin_unlock_irqrestore(&cache->lock, flags);
555
556         return r;
557 }
558
559 static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
560 {
561         int r;
562         unsigned long flags;
563
564         spin_lock_irqsave(&cache->lock, flags);
565         r = test_bit(from_oblock(b), cache->discard_bitset);
566         spin_unlock_irqrestore(&cache->lock, flags);
567
568         return r;
569 }
570
571 /*----------------------------------------------------------------*/
572
573 static void load_stats(struct cache *cache)
574 {
575         struct dm_cache_statistics stats;
576
577         dm_cache_metadata_get_stats(cache->cmd, &stats);
578         atomic_set(&cache->stats.read_hit, stats.read_hits);
579         atomic_set(&cache->stats.read_miss, stats.read_misses);
580         atomic_set(&cache->stats.write_hit, stats.write_hits);
581         atomic_set(&cache->stats.write_miss, stats.write_misses);
582 }
583
584 static void save_stats(struct cache *cache)
585 {
586         struct dm_cache_statistics stats;
587
588         stats.read_hits = atomic_read(&cache->stats.read_hit);
589         stats.read_misses = atomic_read(&cache->stats.read_miss);
590         stats.write_hits = atomic_read(&cache->stats.write_hit);
591         stats.write_misses = atomic_read(&cache->stats.write_miss);
592
593         dm_cache_metadata_set_stats(cache->cmd, &stats);
594 }
595
596 /*----------------------------------------------------------------
597  * Per bio data
598  *--------------------------------------------------------------*/
599
600 /*
601  * If using writeback, leave out struct per_bio_data's writethrough fields.
602  */
603 #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
604 #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
605
606 static bool writethrough_mode(struct cache_features *f)
607 {
608         return f->io_mode == CM_IO_WRITETHROUGH;
609 }
610
611 static bool writeback_mode(struct cache_features *f)
612 {
613         return f->io_mode == CM_IO_WRITEBACK;
614 }
615
616 static bool passthrough_mode(struct cache_features *f)
617 {
618         return f->io_mode == CM_IO_PASSTHROUGH;
619 }
620
621 static size_t get_per_bio_data_size(struct cache *cache)
622 {
623         return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
624 }
625
626 static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
627 {
628         struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
629         BUG_ON(!pb);
630         return pb;
631 }
632
633 static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
634 {
635         struct per_bio_data *pb = get_per_bio_data(bio, data_size);
636
637         pb->tick = false;
638         pb->req_nr = dm_bio_get_target_bio_nr(bio);
639         pb->all_io_entry = NULL;
640
641         return pb;
642 }
643
644 /*----------------------------------------------------------------
645  * Remapping
646  *--------------------------------------------------------------*/
647 static void remap_to_origin(struct cache *cache, struct bio *bio)
648 {
649         bio->bi_bdev = cache->origin_dev->bdev;
650 }
651
652 static void remap_to_cache(struct cache *cache, struct bio *bio,
653                            dm_cblock_t cblock)
654 {
655         sector_t bi_sector = bio->bi_iter.bi_sector;
656         sector_t block = from_cblock(cblock);
657
658         bio->bi_bdev = cache->cache_dev->bdev;
659         if (!block_size_is_power_of_two(cache))
660                 bio->bi_iter.bi_sector =
661                         (block * cache->sectors_per_block) +
662                         sector_div(bi_sector, cache->sectors_per_block);
663         else
664                 bio->bi_iter.bi_sector =
665                         (block << cache->sectors_per_block_shift) |
666                         (bi_sector & (cache->sectors_per_block - 1));
667 }
668
669 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
670 {
671         unsigned long flags;
672         size_t pb_data_size = get_per_bio_data_size(cache);
673         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
674
675         spin_lock_irqsave(&cache->lock, flags);
676         if (cache->need_tick_bio &&
677             !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
678                 pb->tick = true;
679                 cache->need_tick_bio = false;
680         }
681         spin_unlock_irqrestore(&cache->lock, flags);
682 }
683
684 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
685                                   dm_oblock_t oblock)
686 {
687         check_if_tick_bio_needed(cache, bio);
688         remap_to_origin(cache, bio);
689         if (bio_data_dir(bio) == WRITE)
690                 clear_discard(cache, oblock);
691 }
692
693 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
694                                  dm_oblock_t oblock, dm_cblock_t cblock)
695 {
696         check_if_tick_bio_needed(cache, bio);
697         remap_to_cache(cache, bio, cblock);
698         if (bio_data_dir(bio) == WRITE) {
699                 set_dirty(cache, oblock, cblock);
700                 clear_discard(cache, oblock);
701         }
702 }
703
704 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
705 {
706         sector_t block_nr = bio->bi_iter.bi_sector;
707
708         if (!block_size_is_power_of_two(cache))
709                 (void) sector_div(block_nr, cache->sectors_per_block);
710         else
711                 block_nr >>= cache->sectors_per_block_shift;
712
713         return to_oblock(block_nr);
714 }
715
716 static int bio_triggers_commit(struct cache *cache, struct bio *bio)
717 {
718         return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
719 }
720
721 static void issue(struct cache *cache, struct bio *bio)
722 {
723         unsigned long flags;
724
725         if (!bio_triggers_commit(cache, bio)) {
726                 generic_make_request(bio);
727                 return;
728         }
729
730         /*
731          * Batch together any bios that trigger commits and then issue a
732          * single commit for them in do_worker().
733          */
734         spin_lock_irqsave(&cache->lock, flags);
735         cache->commit_requested = true;
736         bio_list_add(&cache->deferred_flush_bios, bio);
737         spin_unlock_irqrestore(&cache->lock, flags);
738 }
739
740 static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
741 {
742         unsigned long flags;
743
744         spin_lock_irqsave(&cache->lock, flags);
745         bio_list_add(&cache->deferred_writethrough_bios, bio);
746         spin_unlock_irqrestore(&cache->lock, flags);
747
748         wake_worker(cache);
749 }
750
751 static void writethrough_endio(struct bio *bio, int err)
752 {
753         struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
754
755         dm_unhook_bio(&pb->hook_info, bio);
756
757         if (err) {
758                 bio_endio(bio, err);
759                 return;
760         }
761
762         dm_bio_restore(&pb->bio_details, bio);
763         remap_to_cache(pb->cache, bio, pb->cblock);
764
765         /*
766          * We can't issue this bio directly, since we're in interrupt
767          * context.  So it gets put on a bio list for processing by the
768          * worker thread.
769          */
770         defer_writethrough_bio(pb->cache, bio);
771 }
772
773 /*
774  * When running in writethrough mode we need to send writes to clean blocks
775  * to both the cache and origin devices.  In future we'd like to clone the
776  * bio and send them in parallel, but for now we're doing them in
777  * series as this is easier.
778  */
779 static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
780                                        dm_oblock_t oblock, dm_cblock_t cblock)
781 {
782         struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
783
784         pb->cache = cache;
785         pb->cblock = cblock;
786         dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
787         dm_bio_record(&pb->bio_details, bio);
788
789         remap_to_origin_clear_discard(pb->cache, bio, oblock);
790 }
791
792 /*----------------------------------------------------------------
793  * Migration processing
794  *
795  * Migration covers moving data from the origin device to the cache, or
796  * vice versa.
797  *--------------------------------------------------------------*/
798 static void free_migration(struct dm_cache_migration *mg)
799 {
800         mempool_free(mg, mg->cache->migration_pool);
801 }
802
803 static void inc_nr_migrations(struct cache *cache)
804 {
805         atomic_inc(&cache->nr_migrations);
806 }
807
808 static void dec_nr_migrations(struct cache *cache)
809 {
810         atomic_dec(&cache->nr_migrations);
811
812         /*
813          * Wake the worker in case we're suspending the target.
814          */
815         wake_up(&cache->migration_wait);
816 }
817
818 static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
819                          bool holder)
820 {
821         (holder ? dm_cell_release : dm_cell_release_no_holder)
822                 (cache->prison, cell, &cache->deferred_bios);
823         free_prison_cell(cache, cell);
824 }
825
826 static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
827                        bool holder)
828 {
829         unsigned long flags;
830
831         spin_lock_irqsave(&cache->lock, flags);
832         __cell_defer(cache, cell, holder);
833         spin_unlock_irqrestore(&cache->lock, flags);
834
835         wake_worker(cache);
836 }
837
838 static void cleanup_migration(struct dm_cache_migration *mg)
839 {
840         struct cache *cache = mg->cache;
841         free_migration(mg);
842         dec_nr_migrations(cache);
843 }
844
845 static void migration_failure(struct dm_cache_migration *mg)
846 {
847         struct cache *cache = mg->cache;
848
849         if (mg->writeback) {
850                 DMWARN_LIMIT("writeback failed; couldn't copy block");
851                 set_dirty(cache, mg->old_oblock, mg->cblock);
852                 cell_defer(cache, mg->old_ocell, false);
853
854         } else if (mg->demote) {
855                 DMWARN_LIMIT("demotion failed; couldn't copy block");
856                 policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
857
858                 cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
859                 if (mg->promote)
860                         cell_defer(cache, mg->new_ocell, true);
861         } else {
862                 DMWARN_LIMIT("promotion failed; couldn't copy block");
863                 policy_remove_mapping(cache->policy, mg->new_oblock);
864                 cell_defer(cache, mg->new_ocell, true);
865         }
866
867         cleanup_migration(mg);
868 }
869
870 static void migration_success_pre_commit(struct dm_cache_migration *mg)
871 {
872         unsigned long flags;
873         struct cache *cache = mg->cache;
874
875         if (mg->writeback) {
876                 cell_defer(cache, mg->old_ocell, false);
877                 clear_dirty(cache, mg->old_oblock, mg->cblock);
878                 cleanup_migration(mg);
879                 return;
880
881         } else if (mg->demote) {
882                 if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) {
883                         DMWARN_LIMIT("demotion failed; couldn't update on disk metadata");
884                         policy_force_mapping(cache->policy, mg->new_oblock,
885                                              mg->old_oblock);
886                         if (mg->promote)
887                                 cell_defer(cache, mg->new_ocell, true);
888                         cleanup_migration(mg);
889                         return;
890                 }
891         } else {
892                 if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
893                         DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
894                         policy_remove_mapping(cache->policy, mg->new_oblock);
895                         cleanup_migration(mg);
896                         return;
897                 }
898         }
899
900         spin_lock_irqsave(&cache->lock, flags);
901         list_add_tail(&mg->list, &cache->need_commit_migrations);
902         cache->commit_requested = true;
903         spin_unlock_irqrestore(&cache->lock, flags);
904 }
905
906 static void migration_success_post_commit(struct dm_cache_migration *mg)
907 {
908         unsigned long flags;
909         struct cache *cache = mg->cache;
910
911         if (mg->writeback) {
912                 DMWARN("writeback unexpectedly triggered commit");
913                 return;
914
915         } else if (mg->demote) {
916                 cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
917
918                 if (mg->promote) {
919                         mg->demote = false;
920
921                         spin_lock_irqsave(&cache->lock, flags);
922                         list_add_tail(&mg->list, &cache->quiesced_migrations);
923                         spin_unlock_irqrestore(&cache->lock, flags);
924
925                 } else {
926                         if (mg->invalidate)
927                                 policy_remove_mapping(cache->policy, mg->old_oblock);
928                         cleanup_migration(mg);
929                 }
930
931         } else {
932                 if (mg->requeue_holder)
933                         cell_defer(cache, mg->new_ocell, true);
934                 else {
935                         bio_endio(mg->new_ocell->holder, 0);
936                         cell_defer(cache, mg->new_ocell, false);
937                 }
938                 clear_dirty(cache, mg->new_oblock, mg->cblock);
939                 cleanup_migration(mg);
940         }
941 }
942
943 static void copy_complete(int read_err, unsigned long write_err, void *context)
944 {
945         unsigned long flags;
946         struct dm_cache_migration *mg = (struct dm_cache_migration *) context;
947         struct cache *cache = mg->cache;
948
949         if (read_err || write_err)
950                 mg->err = true;
951
952         spin_lock_irqsave(&cache->lock, flags);
953         list_add_tail(&mg->list, &cache->completed_migrations);
954         spin_unlock_irqrestore(&cache->lock, flags);
955
956         wake_worker(cache);
957 }
958
959 static void issue_copy_real(struct dm_cache_migration *mg)
960 {
961         int r;
962         struct dm_io_region o_region, c_region;
963         struct cache *cache = mg->cache;
964         sector_t cblock = from_cblock(mg->cblock);
965
966         o_region.bdev = cache->origin_dev->bdev;
967         o_region.count = cache->sectors_per_block;
968
969         c_region.bdev = cache->cache_dev->bdev;
970         c_region.sector = cblock * cache->sectors_per_block;
971         c_region.count = cache->sectors_per_block;
972
973         if (mg->writeback || mg->demote) {
974                 /* demote */
975                 o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block;
976                 r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg);
977         } else {
978                 /* promote */
979                 o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block;
980                 r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
981         }
982
983         if (r < 0) {
984                 DMERR_LIMIT("issuing migration failed");
985                 migration_failure(mg);
986         }
987 }
988
989 static void overwrite_endio(struct bio *bio, int err)
990 {
991         struct dm_cache_migration *mg = bio->bi_private;
992         struct cache *cache = mg->cache;
993         size_t pb_data_size = get_per_bio_data_size(cache);
994         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
995         unsigned long flags;
996
997         dm_unhook_bio(&pb->hook_info, bio);
998
999         if (err)
1000                 mg->err = true;
1001
1002         mg->requeue_holder = false;
1003
1004         spin_lock_irqsave(&cache->lock, flags);
1005         list_add_tail(&mg->list, &cache->completed_migrations);
1006         spin_unlock_irqrestore(&cache->lock, flags);
1007
1008         wake_worker(cache);
1009 }
1010
1011 static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
1012 {
1013         size_t pb_data_size = get_per_bio_data_size(mg->cache);
1014         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1015
1016         dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
1017         remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock);
1018         generic_make_request(bio);
1019 }
1020
1021 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
1022 {
1023         return (bio_data_dir(bio) == WRITE) &&
1024                 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
1025 }
1026
1027 static void avoid_copy(struct dm_cache_migration *mg)
1028 {
1029         atomic_inc(&mg->cache->stats.copies_avoided);
1030         migration_success_pre_commit(mg);
1031 }
1032
1033 static void issue_copy(struct dm_cache_migration *mg)
1034 {
1035         bool avoid;
1036         struct cache *cache = mg->cache;
1037
1038         if (mg->writeback || mg->demote)
1039                 avoid = !is_dirty(cache, mg->cblock) ||
1040                         is_discarded_oblock(cache, mg->old_oblock);
1041         else {
1042                 struct bio *bio = mg->new_ocell->holder;
1043
1044                 avoid = is_discarded_oblock(cache, mg->new_oblock);
1045
1046                 if (!avoid && bio_writes_complete_block(cache, bio)) {
1047                         issue_overwrite(mg, bio);
1048                         return;
1049                 }
1050         }
1051
1052         avoid ? avoid_copy(mg) : issue_copy_real(mg);
1053 }
1054
1055 static void complete_migration(struct dm_cache_migration *mg)
1056 {
1057         if (mg->err)
1058                 migration_failure(mg);
1059         else
1060                 migration_success_pre_commit(mg);
1061 }
1062
1063 static void process_migrations(struct cache *cache, struct list_head *head,
1064                                void (*fn)(struct dm_cache_migration *))
1065 {
1066         unsigned long flags;
1067         struct list_head list;
1068         struct dm_cache_migration *mg, *tmp;
1069
1070         INIT_LIST_HEAD(&list);
1071         spin_lock_irqsave(&cache->lock, flags);
1072         list_splice_init(head, &list);
1073         spin_unlock_irqrestore(&cache->lock, flags);
1074
1075         list_for_each_entry_safe(mg, tmp, &list, list)
1076                 fn(mg);
1077 }
1078
1079 static void __queue_quiesced_migration(struct dm_cache_migration *mg)
1080 {
1081         list_add_tail(&mg->list, &mg->cache->quiesced_migrations);
1082 }
1083
1084 static void queue_quiesced_migration(struct dm_cache_migration *mg)
1085 {
1086         unsigned long flags;
1087         struct cache *cache = mg->cache;
1088
1089         spin_lock_irqsave(&cache->lock, flags);
1090         __queue_quiesced_migration(mg);
1091         spin_unlock_irqrestore(&cache->lock, flags);
1092
1093         wake_worker(cache);
1094 }
1095
1096 static void queue_quiesced_migrations(struct cache *cache, struct list_head *work)
1097 {
1098         unsigned long flags;
1099         struct dm_cache_migration *mg, *tmp;
1100
1101         spin_lock_irqsave(&cache->lock, flags);
1102         list_for_each_entry_safe(mg, tmp, work, list)
1103                 __queue_quiesced_migration(mg);
1104         spin_unlock_irqrestore(&cache->lock, flags);
1105
1106         wake_worker(cache);
1107 }
1108
1109 static void check_for_quiesced_migrations(struct cache *cache,
1110                                           struct per_bio_data *pb)
1111 {
1112         struct list_head work;
1113
1114         if (!pb->all_io_entry)
1115                 return;
1116
1117         INIT_LIST_HEAD(&work);
1118         if (pb->all_io_entry)
1119                 dm_deferred_entry_dec(pb->all_io_entry, &work);
1120
1121         if (!list_empty(&work))
1122                 queue_quiesced_migrations(cache, &work);
1123 }
1124
1125 static void quiesce_migration(struct dm_cache_migration *mg)
1126 {
1127         if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list))
1128                 queue_quiesced_migration(mg);
1129 }
1130
1131 static void promote(struct cache *cache, struct prealloc *structs,
1132                     dm_oblock_t oblock, dm_cblock_t cblock,
1133                     struct dm_bio_prison_cell *cell)
1134 {
1135         struct dm_cache_migration *mg = prealloc_get_migration(structs);
1136
1137         mg->err = false;
1138         mg->writeback = false;
1139         mg->demote = false;
1140         mg->promote = true;
1141         mg->requeue_holder = true;
1142         mg->invalidate = false;
1143         mg->cache = cache;
1144         mg->new_oblock = oblock;
1145         mg->cblock = cblock;
1146         mg->old_ocell = NULL;
1147         mg->new_ocell = cell;
1148         mg->start_jiffies = jiffies;
1149
1150         inc_nr_migrations(cache);
1151         quiesce_migration(mg);
1152 }
1153
1154 static void writeback(struct cache *cache, struct prealloc *structs,
1155                       dm_oblock_t oblock, dm_cblock_t cblock,
1156                       struct dm_bio_prison_cell *cell)
1157 {
1158         struct dm_cache_migration *mg = prealloc_get_migration(structs);
1159
1160         mg->err = false;
1161         mg->writeback = true;
1162         mg->demote = false;
1163         mg->promote = false;
1164         mg->requeue_holder = true;
1165         mg->invalidate = false;
1166         mg->cache = cache;
1167         mg->old_oblock = oblock;
1168         mg->cblock = cblock;
1169         mg->old_ocell = cell;
1170         mg->new_ocell = NULL;
1171         mg->start_jiffies = jiffies;
1172
1173         inc_nr_migrations(cache);
1174         quiesce_migration(mg);
1175 }
1176
1177 static void demote_then_promote(struct cache *cache, struct prealloc *structs,
1178                                 dm_oblock_t old_oblock, dm_oblock_t new_oblock,
1179                                 dm_cblock_t cblock,
1180                                 struct dm_bio_prison_cell *old_ocell,
1181                                 struct dm_bio_prison_cell *new_ocell)
1182 {
1183         struct dm_cache_migration *mg = prealloc_get_migration(structs);
1184
1185         mg->err = false;
1186         mg->writeback = false;
1187         mg->demote = true;
1188         mg->promote = true;
1189         mg->requeue_holder = true;
1190         mg->invalidate = false;
1191         mg->cache = cache;
1192         mg->old_oblock = old_oblock;
1193         mg->new_oblock = new_oblock;
1194         mg->cblock = cblock;
1195         mg->old_ocell = old_ocell;
1196         mg->new_ocell = new_ocell;
1197         mg->start_jiffies = jiffies;
1198
1199         inc_nr_migrations(cache);
1200         quiesce_migration(mg);
1201 }
1202
1203 /*
1204  * Invalidate a cache entry.  No writeback occurs; any changes in the cache
1205  * block are thrown away.
1206  */
1207 static void invalidate(struct cache *cache, struct prealloc *structs,
1208                        dm_oblock_t oblock, dm_cblock_t cblock,
1209                        struct dm_bio_prison_cell *cell)
1210 {
1211         struct dm_cache_migration *mg = prealloc_get_migration(structs);
1212
1213         mg->err = false;
1214         mg->writeback = false;
1215         mg->demote = true;
1216         mg->promote = false;
1217         mg->requeue_holder = true;
1218         mg->invalidate = true;
1219         mg->cache = cache;
1220         mg->old_oblock = oblock;
1221         mg->cblock = cblock;
1222         mg->old_ocell = cell;
1223         mg->new_ocell = NULL;
1224         mg->start_jiffies = jiffies;
1225
1226         inc_nr_migrations(cache);
1227         quiesce_migration(mg);
1228 }
1229
1230 /*----------------------------------------------------------------
1231  * bio processing
1232  *--------------------------------------------------------------*/
1233 static void defer_bio(struct cache *cache, struct bio *bio)
1234 {
1235         unsigned long flags;
1236
1237         spin_lock_irqsave(&cache->lock, flags);
1238         bio_list_add(&cache->deferred_bios, bio);
1239         spin_unlock_irqrestore(&cache->lock, flags);
1240
1241         wake_worker(cache);
1242 }
1243
1244 static void process_flush_bio(struct cache *cache, struct bio *bio)
1245 {
1246         size_t pb_data_size = get_per_bio_data_size(cache);
1247         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1248
1249         BUG_ON(bio->bi_iter.bi_size);
1250         if (!pb->req_nr)
1251                 remap_to_origin(cache, bio);
1252         else
1253                 remap_to_cache(cache, bio, 0);
1254
1255         issue(cache, bio);
1256 }
1257
1258 /*
1259  * People generally discard large parts of a device, eg, the whole device
1260  * when formatting.  Splitting these large discards up into cache block
1261  * sized ios and then quiescing (always neccessary for discard) takes too
1262  * long.
1263  *
1264  * We keep it simple, and allow any size of discard to come in, and just
1265  * mark off blocks on the discard bitset.  No passdown occurs!
1266  *
1267  * To implement passdown we need to change the bio_prison such that a cell
1268  * can have a key that spans many blocks.
1269  */
1270 static void process_discard_bio(struct cache *cache, struct bio *bio)
1271 {
1272         dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector,
1273                                                   cache->sectors_per_block);
1274         dm_block_t end_block = bio_end_sector(bio);
1275         dm_block_t b;
1276
1277         end_block = block_div(end_block, cache->sectors_per_block);
1278
1279         for (b = start_block; b < end_block; b++)
1280                 set_discard(cache, to_oblock(b));
1281
1282         bio_endio(bio, 0);
1283 }
1284
1285 static bool spare_migration_bandwidth(struct cache *cache)
1286 {
1287         sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
1288                 cache->sectors_per_block;
1289         return current_volume < cache->migration_threshold;
1290 }
1291
1292 static void inc_hit_counter(struct cache *cache, struct bio *bio)
1293 {
1294         atomic_inc(bio_data_dir(bio) == READ ?
1295                    &cache->stats.read_hit : &cache->stats.write_hit);
1296 }
1297
1298 static void inc_miss_counter(struct cache *cache, struct bio *bio)
1299 {
1300         atomic_inc(bio_data_dir(bio) == READ ?
1301                    &cache->stats.read_miss : &cache->stats.write_miss);
1302 }
1303
1304 static void issue_cache_bio(struct cache *cache, struct bio *bio,
1305                             struct per_bio_data *pb,
1306                             dm_oblock_t oblock, dm_cblock_t cblock)
1307 {
1308         pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1309         remap_to_cache_dirty(cache, bio, oblock, cblock);
1310         issue(cache, bio);
1311 }
1312
1313 static void process_bio(struct cache *cache, struct prealloc *structs,
1314                         struct bio *bio)
1315 {
1316         int r;
1317         bool release_cell = true;
1318         dm_oblock_t block = get_bio_block(cache, bio);
1319         struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
1320         struct policy_result lookup_result;
1321         size_t pb_data_size = get_per_bio_data_size(cache);
1322         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1323         bool discarded_block = is_discarded_oblock(cache, block);
1324         bool passthrough = passthrough_mode(&cache->features);
1325         bool can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
1326
1327         /*
1328          * Check to see if that block is currently migrating.
1329          */
1330         cell_prealloc = prealloc_get_cell(structs);
1331         r = bio_detain(cache, block, bio, cell_prealloc,
1332                        (cell_free_fn) prealloc_put_cell,
1333                        structs, &new_ocell);
1334         if (r > 0)
1335                 return;
1336
1337         r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
1338                        bio, &lookup_result);
1339
1340         if (r == -EWOULDBLOCK)
1341                 /* migration has been denied */
1342                 lookup_result.op = POLICY_MISS;
1343
1344         switch (lookup_result.op) {
1345         case POLICY_HIT:
1346                 if (passthrough) {
1347                         inc_miss_counter(cache, bio);
1348
1349                         /*
1350                          * Passthrough always maps to the origin,
1351                          * invalidating any cache blocks that are written
1352                          * to.
1353                          */
1354
1355                         if (bio_data_dir(bio) == WRITE) {
1356                                 atomic_inc(&cache->stats.demotion);
1357                                 invalidate(cache, structs, block, lookup_result.cblock, new_ocell);
1358                                 release_cell = false;
1359
1360                         } else {
1361                                 /* FIXME: factor out issue_origin() */
1362                                 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1363                                 remap_to_origin_clear_discard(cache, bio, block);
1364                                 issue(cache, bio);
1365                         }
1366                 } else {
1367                         inc_hit_counter(cache, bio);
1368
1369                         if (bio_data_dir(bio) == WRITE &&
1370                             writethrough_mode(&cache->features) &&
1371                             !is_dirty(cache, lookup_result.cblock)) {
1372                                 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1373                                 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
1374                                 issue(cache, bio);
1375                         } else
1376                                 issue_cache_bio(cache, bio, pb, block, lookup_result.cblock);
1377                 }
1378
1379                 break;
1380
1381         case POLICY_MISS:
1382                 inc_miss_counter(cache, bio);
1383                 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1384                 remap_to_origin_clear_discard(cache, bio, block);
1385                 issue(cache, bio);
1386                 break;
1387
1388         case POLICY_NEW:
1389                 atomic_inc(&cache->stats.promotion);
1390                 promote(cache, structs, block, lookup_result.cblock, new_ocell);
1391                 release_cell = false;
1392                 break;
1393
1394         case POLICY_REPLACE:
1395                 cell_prealloc = prealloc_get_cell(structs);
1396                 r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
1397                                (cell_free_fn) prealloc_put_cell,
1398                                structs, &old_ocell);
1399                 if (r > 0) {
1400                         /*
1401                          * We have to be careful to avoid lock inversion of
1402                          * the cells.  So we back off, and wait for the
1403                          * old_ocell to become free.
1404                          */
1405                         policy_force_mapping(cache->policy, block,
1406                                              lookup_result.old_oblock);
1407                         atomic_inc(&cache->stats.cache_cell_clash);
1408                         break;
1409                 }
1410                 atomic_inc(&cache->stats.demotion);
1411                 atomic_inc(&cache->stats.promotion);
1412
1413                 demote_then_promote(cache, structs, lookup_result.old_oblock,
1414                                     block, lookup_result.cblock,
1415                                     old_ocell, new_ocell);
1416                 release_cell = false;
1417                 break;
1418
1419         default:
1420                 DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__,
1421                             (unsigned) lookup_result.op);
1422                 bio_io_error(bio);
1423         }
1424
1425         if (release_cell)
1426                 cell_defer(cache, new_ocell, false);
1427 }
1428
1429 static int need_commit_due_to_time(struct cache *cache)
1430 {
1431         return jiffies < cache->last_commit_jiffies ||
1432                jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
1433 }
1434
1435 static int commit_if_needed(struct cache *cache)
1436 {
1437         int r = 0;
1438
1439         if ((cache->commit_requested || need_commit_due_to_time(cache)) &&
1440             dm_cache_changed_this_transaction(cache->cmd)) {
1441                 atomic_inc(&cache->stats.commit_count);
1442                 cache->commit_requested = false;
1443                 r = dm_cache_commit(cache->cmd, false);
1444                 cache->last_commit_jiffies = jiffies;
1445         }
1446
1447         return r;
1448 }
1449
1450 static void process_deferred_bios(struct cache *cache)
1451 {
1452         unsigned long flags;
1453         struct bio_list bios;
1454         struct bio *bio;
1455         struct prealloc structs;
1456
1457         memset(&structs, 0, sizeof(structs));
1458         bio_list_init(&bios);
1459
1460         spin_lock_irqsave(&cache->lock, flags);
1461         bio_list_merge(&bios, &cache->deferred_bios);
1462         bio_list_init(&cache->deferred_bios);
1463         spin_unlock_irqrestore(&cache->lock, flags);
1464
1465         while (!bio_list_empty(&bios)) {
1466                 /*
1467                  * If we've got no free migration structs, and processing
1468                  * this bio might require one, we pause until there are some
1469                  * prepared mappings to process.
1470                  */
1471                 if (prealloc_data_structs(cache, &structs)) {
1472                         spin_lock_irqsave(&cache->lock, flags);
1473                         bio_list_merge(&cache->deferred_bios, &bios);
1474                         spin_unlock_irqrestore(&cache->lock, flags);
1475                         break;
1476                 }
1477
1478                 bio = bio_list_pop(&bios);
1479
1480                 if (bio->bi_rw & REQ_FLUSH)
1481                         process_flush_bio(cache, bio);
1482                 else if (bio->bi_rw & REQ_DISCARD)
1483                         process_discard_bio(cache, bio);
1484                 else
1485                         process_bio(cache, &structs, bio);
1486         }
1487
1488         prealloc_free_structs(cache, &structs);
1489 }
1490
1491 static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
1492 {
1493         unsigned long flags;
1494         struct bio_list bios;
1495         struct bio *bio;
1496
1497         bio_list_init(&bios);
1498
1499         spin_lock_irqsave(&cache->lock, flags);
1500         bio_list_merge(&bios, &cache->deferred_flush_bios);
1501         bio_list_init(&cache->deferred_flush_bios);
1502         spin_unlock_irqrestore(&cache->lock, flags);
1503
1504         while ((bio = bio_list_pop(&bios)))
1505                 submit_bios ? generic_make_request(bio) : bio_io_error(bio);
1506 }
1507
1508 static void process_deferred_writethrough_bios(struct cache *cache)
1509 {
1510         unsigned long flags;
1511         struct bio_list bios;
1512         struct bio *bio;
1513
1514         bio_list_init(&bios);
1515
1516         spin_lock_irqsave(&cache->lock, flags);
1517         bio_list_merge(&bios, &cache->deferred_writethrough_bios);
1518         bio_list_init(&cache->deferred_writethrough_bios);
1519         spin_unlock_irqrestore(&cache->lock, flags);
1520
1521         while ((bio = bio_list_pop(&bios)))
1522                 generic_make_request(bio);
1523 }
1524
1525 static void writeback_some_dirty_blocks(struct cache *cache)
1526 {
1527         int r = 0;
1528         dm_oblock_t oblock;
1529         dm_cblock_t cblock;
1530         struct prealloc structs;
1531         struct dm_bio_prison_cell *old_ocell;
1532
1533         memset(&structs, 0, sizeof(structs));
1534
1535         while (spare_migration_bandwidth(cache)) {
1536                 if (prealloc_data_structs(cache, &structs))
1537                         break;
1538
1539                 r = policy_writeback_work(cache->policy, &oblock, &cblock);
1540                 if (r)
1541                         break;
1542
1543                 r = get_cell(cache, oblock, &structs, &old_ocell);
1544                 if (r) {
1545                         policy_set_dirty(cache->policy, oblock);
1546                         break;
1547                 }
1548
1549                 writeback(cache, &structs, oblock, cblock, old_ocell);
1550         }
1551
1552         prealloc_free_structs(cache, &structs);
1553 }
1554
1555 /*----------------------------------------------------------------
1556  * Invalidations.
1557  * Dropping something from the cache *without* writing back.
1558  *--------------------------------------------------------------*/
1559
1560 static void process_invalidation_request(struct cache *cache, struct invalidation_request *req)
1561 {
1562         int r = 0;
1563         uint64_t begin = from_cblock(req->cblocks->begin);
1564         uint64_t end = from_cblock(req->cblocks->end);
1565
1566         while (begin != end) {
1567                 r = policy_remove_cblock(cache->policy, to_cblock(begin));
1568                 if (!r) {
1569                         r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin));
1570                         if (r)
1571                                 break;
1572
1573                 } else if (r == -ENODATA) {
1574                         /* harmless, already unmapped */
1575                         r = 0;
1576
1577                 } else {
1578                         DMERR("policy_remove_cblock failed");
1579                         break;
1580                 }
1581
1582                 begin++;
1583         }
1584
1585         cache->commit_requested = true;
1586
1587         req->err = r;
1588         atomic_set(&req->complete, 1);
1589
1590         wake_up(&req->result_wait);
1591 }
1592
1593 static void process_invalidation_requests(struct cache *cache)
1594 {
1595         struct list_head list;
1596         struct invalidation_request *req, *tmp;
1597
1598         INIT_LIST_HEAD(&list);
1599         spin_lock(&cache->invalidation_lock);
1600         list_splice_init(&cache->invalidation_requests, &list);
1601         spin_unlock(&cache->invalidation_lock);
1602
1603         list_for_each_entry_safe (req, tmp, &list, list)
1604                 process_invalidation_request(cache, req);
1605 }
1606
1607 /*----------------------------------------------------------------
1608  * Main worker loop
1609  *--------------------------------------------------------------*/
1610 static bool is_quiescing(struct cache *cache)
1611 {
1612         return atomic_read(&cache->quiescing);
1613 }
1614
1615 static void ack_quiescing(struct cache *cache)
1616 {
1617         if (is_quiescing(cache)) {
1618                 atomic_inc(&cache->quiescing_ack);
1619                 wake_up(&cache->quiescing_wait);
1620         }
1621 }
1622
1623 static void wait_for_quiescing_ack(struct cache *cache)
1624 {
1625         wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
1626 }
1627
1628 static void start_quiescing(struct cache *cache)
1629 {
1630         atomic_inc(&cache->quiescing);
1631         wait_for_quiescing_ack(cache);
1632 }
1633
1634 static void stop_quiescing(struct cache *cache)
1635 {
1636         atomic_set(&cache->quiescing, 0);
1637         atomic_set(&cache->quiescing_ack, 0);
1638 }
1639
1640 static void wait_for_migrations(struct cache *cache)
1641 {
1642         wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
1643 }
1644
1645 static void stop_worker(struct cache *cache)
1646 {
1647         cancel_delayed_work(&cache->waker);
1648         flush_workqueue(cache->wq);
1649 }
1650
1651 static void requeue_deferred_io(struct cache *cache)
1652 {
1653         struct bio *bio;
1654         struct bio_list bios;
1655
1656         bio_list_init(&bios);
1657         bio_list_merge(&bios, &cache->deferred_bios);
1658         bio_list_init(&cache->deferred_bios);
1659
1660         while ((bio = bio_list_pop(&bios)))
1661                 bio_endio(bio, DM_ENDIO_REQUEUE);
1662 }
1663
1664 static int more_work(struct cache *cache)
1665 {
1666         if (is_quiescing(cache))
1667                 return !list_empty(&cache->quiesced_migrations) ||
1668                         !list_empty(&cache->completed_migrations) ||
1669                         !list_empty(&cache->need_commit_migrations);
1670         else
1671                 return !bio_list_empty(&cache->deferred_bios) ||
1672                         !bio_list_empty(&cache->deferred_flush_bios) ||
1673                         !bio_list_empty(&cache->deferred_writethrough_bios) ||
1674                         !list_empty(&cache->quiesced_migrations) ||
1675                         !list_empty(&cache->completed_migrations) ||
1676                         !list_empty(&cache->need_commit_migrations) ||
1677                         cache->invalidate;
1678 }
1679
1680 static void do_worker(struct work_struct *ws)
1681 {
1682         struct cache *cache = container_of(ws, struct cache, worker);
1683
1684         do {
1685                 if (!is_quiescing(cache)) {
1686                         writeback_some_dirty_blocks(cache);
1687                         process_deferred_writethrough_bios(cache);
1688                         process_deferred_bios(cache);
1689                         process_invalidation_requests(cache);
1690                 }
1691
1692                 process_migrations(cache, &cache->quiesced_migrations, issue_copy);
1693                 process_migrations(cache, &cache->completed_migrations, complete_migration);
1694
1695                 if (commit_if_needed(cache)) {
1696                         process_deferred_flush_bios(cache, false);
1697
1698                         /*
1699                          * FIXME: rollback metadata or just go into a
1700                          * failure mode and error everything
1701                          */
1702                 } else {
1703                         process_deferred_flush_bios(cache, true);
1704                         process_migrations(cache, &cache->need_commit_migrations,
1705                                            migration_success_post_commit);
1706                 }
1707
1708                 ack_quiescing(cache);
1709
1710         } while (more_work(cache));
1711 }
1712
1713 /*
1714  * We want to commit periodically so that not too much
1715  * unwritten metadata builds up.
1716  */
1717 static void do_waker(struct work_struct *ws)
1718 {
1719         struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
1720         policy_tick(cache->policy);
1721         wake_worker(cache);
1722         queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
1723 }
1724
1725 /*----------------------------------------------------------------*/
1726
1727 static int is_congested(struct dm_dev *dev, int bdi_bits)
1728 {
1729         struct request_queue *q = bdev_get_queue(dev->bdev);
1730         return bdi_congested(&q->backing_dev_info, bdi_bits);
1731 }
1732
1733 static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1734 {
1735         struct cache *cache = container_of(cb, struct cache, callbacks);
1736
1737         return is_congested(cache->origin_dev, bdi_bits) ||
1738                 is_congested(cache->cache_dev, bdi_bits);
1739 }
1740
1741 /*----------------------------------------------------------------
1742  * Target methods
1743  *--------------------------------------------------------------*/
1744
1745 /*
1746  * This function gets called on the error paths of the constructor, so we
1747  * have to cope with a partially initialised struct.
1748  */
1749 static void destroy(struct cache *cache)
1750 {
1751         unsigned i;
1752
1753         if (cache->next_migration)
1754                 mempool_free(cache->next_migration, cache->migration_pool);
1755
1756         if (cache->migration_pool)
1757                 mempool_destroy(cache->migration_pool);
1758
1759         if (cache->all_io_ds)
1760                 dm_deferred_set_destroy(cache->all_io_ds);
1761
1762         if (cache->prison)
1763                 dm_bio_prison_destroy(cache->prison);
1764
1765         if (cache->wq)
1766                 destroy_workqueue(cache->wq);
1767
1768         if (cache->dirty_bitset)
1769                 free_bitset(cache->dirty_bitset);
1770
1771         if (cache->discard_bitset)
1772                 free_bitset(cache->discard_bitset);
1773
1774         if (cache->copier)
1775                 dm_kcopyd_client_destroy(cache->copier);
1776
1777         if (cache->cmd)
1778                 dm_cache_metadata_close(cache->cmd);
1779
1780         if (cache->metadata_dev)
1781                 dm_put_device(cache->ti, cache->metadata_dev);
1782
1783         if (cache->origin_dev)
1784                 dm_put_device(cache->ti, cache->origin_dev);
1785
1786         if (cache->cache_dev)
1787                 dm_put_device(cache->ti, cache->cache_dev);
1788
1789         if (cache->policy)
1790                 dm_cache_policy_destroy(cache->policy);
1791
1792         for (i = 0; i < cache->nr_ctr_args ; i++)
1793                 kfree(cache->ctr_args[i]);
1794         kfree(cache->ctr_args);
1795
1796         kfree(cache);
1797 }
1798
1799 static void cache_dtr(struct dm_target *ti)
1800 {
1801         struct cache *cache = ti->private;
1802
1803         destroy(cache);
1804 }
1805
1806 static sector_t get_dev_size(struct dm_dev *dev)
1807 {
1808         return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1809 }
1810
1811 /*----------------------------------------------------------------*/
1812
1813 /*
1814  * Construct a cache device mapping.
1815  *
1816  * cache <metadata dev> <cache dev> <origin dev> <block size>
1817  *       <#feature args> [<feature arg>]*
1818  *       <policy> <#policy args> [<policy arg>]*
1819  *
1820  * metadata dev    : fast device holding the persistent metadata
1821  * cache dev       : fast device holding cached data blocks
1822  * origin dev      : slow device holding original data blocks
1823  * block size      : cache unit size in sectors
1824  *
1825  * #feature args   : number of feature arguments passed
1826  * feature args    : writethrough.  (The default is writeback.)
1827  *
1828  * policy          : the replacement policy to use
1829  * #policy args    : an even number of policy arguments corresponding
1830  *                   to key/value pairs passed to the policy
1831  * policy args     : key/value pairs passed to the policy
1832  *                   E.g. 'sequential_threshold 1024'
1833  *                   See cache-policies.txt for details.
1834  *
1835  * Optional feature arguments are:
1836  *   writethrough  : write through caching that prohibits cache block
1837  *                   content from being different from origin block content.
1838  *                   Without this argument, the default behaviour is to write
1839  *                   back cache block contents later for performance reasons,
1840  *                   so they may differ from the corresponding origin blocks.
1841  */
1842 struct cache_args {
1843         struct dm_target *ti;
1844
1845         struct dm_dev *metadata_dev;
1846
1847         struct dm_dev *cache_dev;
1848         sector_t cache_sectors;
1849
1850         struct dm_dev *origin_dev;
1851         sector_t origin_sectors;
1852
1853         uint32_t block_size;
1854
1855         const char *policy_name;
1856         int policy_argc;
1857         const char **policy_argv;
1858
1859         struct cache_features features;
1860 };
1861
1862 static void destroy_cache_args(struct cache_args *ca)
1863 {
1864         if (ca->metadata_dev)
1865                 dm_put_device(ca->ti, ca->metadata_dev);
1866
1867         if (ca->cache_dev)
1868                 dm_put_device(ca->ti, ca->cache_dev);
1869
1870         if (ca->origin_dev)
1871                 dm_put_device(ca->ti, ca->origin_dev);
1872
1873         kfree(ca);
1874 }
1875
1876 static bool at_least_one_arg(struct dm_arg_set *as, char **error)
1877 {
1878         if (!as->argc) {
1879                 *error = "Insufficient args";
1880                 return false;
1881         }
1882
1883         return true;
1884 }
1885
1886 static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
1887                               char **error)
1888 {
1889         int r;
1890         sector_t metadata_dev_size;
1891         char b[BDEVNAME_SIZE];
1892
1893         if (!at_least_one_arg(as, error))
1894                 return -EINVAL;
1895
1896         r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1897                           &ca->metadata_dev);
1898         if (r) {
1899                 *error = "Error opening metadata device";
1900                 return r;
1901         }
1902
1903         metadata_dev_size = get_dev_size(ca->metadata_dev);
1904         if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
1905                 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1906                        bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
1907
1908         return 0;
1909 }
1910
1911 static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
1912                            char **error)
1913 {
1914         int r;
1915
1916         if (!at_least_one_arg(as, error))
1917                 return -EINVAL;
1918
1919         r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1920                           &ca->cache_dev);
1921         if (r) {
1922                 *error = "Error opening cache device";
1923                 return r;
1924         }
1925         ca->cache_sectors = get_dev_size(ca->cache_dev);
1926
1927         return 0;
1928 }
1929
1930 static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
1931                             char **error)
1932 {
1933         int r;
1934
1935         if (!at_least_one_arg(as, error))
1936                 return -EINVAL;
1937
1938         r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1939                           &ca->origin_dev);
1940         if (r) {
1941                 *error = "Error opening origin device";
1942                 return r;
1943         }
1944
1945         ca->origin_sectors = get_dev_size(ca->origin_dev);
1946         if (ca->ti->len > ca->origin_sectors) {
1947                 *error = "Device size larger than cached device";
1948                 return -EINVAL;
1949         }
1950
1951         return 0;
1952 }
1953
1954 static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
1955                             char **error)
1956 {
1957         unsigned long block_size;
1958
1959         if (!at_least_one_arg(as, error))
1960                 return -EINVAL;
1961
1962         if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
1963             block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
1964             block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
1965             block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
1966                 *error = "Invalid data block size";
1967                 return -EINVAL;
1968         }
1969
1970         if (block_size > ca->cache_sectors) {
1971                 *error = "Data block size is larger than the cache device";
1972                 return -EINVAL;
1973         }
1974
1975         ca->block_size = block_size;
1976
1977         return 0;
1978 }
1979
1980 static void init_features(struct cache_features *cf)
1981 {
1982         cf->mode = CM_WRITE;
1983         cf->io_mode = CM_IO_WRITEBACK;
1984 }
1985
1986 static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
1987                           char **error)
1988 {
1989         static struct dm_arg _args[] = {
1990                 {0, 1, "Invalid number of cache feature arguments"},
1991         };
1992
1993         int r;
1994         unsigned argc;
1995         const char *arg;
1996         struct cache_features *cf = &ca->features;
1997
1998         init_features(cf);
1999
2000         r = dm_read_arg_group(_args, as, &argc, error);
2001         if (r)
2002                 return -EINVAL;
2003
2004         while (argc--) {
2005                 arg = dm_shift_arg(as);
2006
2007                 if (!strcasecmp(arg, "writeback"))
2008                         cf->io_mode = CM_IO_WRITEBACK;
2009
2010                 else if (!strcasecmp(arg, "writethrough"))
2011                         cf->io_mode = CM_IO_WRITETHROUGH;
2012
2013                 else if (!strcasecmp(arg, "passthrough"))
2014                         cf->io_mode = CM_IO_PASSTHROUGH;
2015
2016                 else {
2017                         *error = "Unrecognised cache feature requested";
2018                         return -EINVAL;
2019                 }
2020         }
2021
2022         return 0;
2023 }
2024
2025 static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
2026                         char **error)
2027 {
2028         static struct dm_arg _args[] = {
2029                 {0, 1024, "Invalid number of policy arguments"},
2030         };
2031
2032         int r;
2033
2034         if (!at_least_one_arg(as, error))
2035                 return -EINVAL;
2036
2037         ca->policy_name = dm_shift_arg(as);
2038
2039         r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
2040         if (r)
2041                 return -EINVAL;
2042
2043         ca->policy_argv = (const char **)as->argv;
2044         dm_consume_args(as, ca->policy_argc);
2045
2046         return 0;
2047 }
2048
2049 static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
2050                             char **error)
2051 {
2052         int r;
2053         struct dm_arg_set as;
2054
2055         as.argc = argc;
2056         as.argv = argv;
2057
2058         r = parse_metadata_dev(ca, &as, error);
2059         if (r)
2060                 return r;
2061
2062         r = parse_cache_dev(ca, &as, error);
2063         if (r)
2064                 return r;
2065
2066         r = parse_origin_dev(ca, &as, error);
2067         if (r)
2068                 return r;
2069
2070         r = parse_block_size(ca, &as, error);
2071         if (r)
2072                 return r;
2073
2074         r = parse_features(ca, &as, error);
2075         if (r)
2076                 return r;
2077
2078         r = parse_policy(ca, &as, error);
2079         if (r)
2080                 return r;
2081
2082         return 0;
2083 }
2084
2085 /*----------------------------------------------------------------*/
2086
2087 static struct kmem_cache *migration_cache;
2088
2089 #define NOT_CORE_OPTION 1
2090
2091 static int process_config_option(struct cache *cache, const char *key, const char *value)
2092 {
2093         unsigned long tmp;
2094
2095         if (!strcasecmp(key, "migration_threshold")) {
2096                 if (kstrtoul(value, 10, &tmp))
2097                         return -EINVAL;
2098
2099                 cache->migration_threshold = tmp;
2100                 return 0;
2101         }
2102
2103         return NOT_CORE_OPTION;
2104 }
2105
2106 static int set_config_value(struct cache *cache, const char *key, const char *value)
2107 {
2108         int r = process_config_option(cache, key, value);
2109
2110         if (r == NOT_CORE_OPTION)
2111                 r = policy_set_config_value(cache->policy, key, value);
2112
2113         if (r)
2114                 DMWARN("bad config value for %s: %s", key, value);
2115
2116         return r;
2117 }
2118
2119 static int set_config_values(struct cache *cache, int argc, const char **argv)
2120 {
2121         int r = 0;
2122
2123         if (argc & 1) {
2124                 DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
2125                 return -EINVAL;
2126         }
2127
2128         while (argc) {
2129                 r = set_config_value(cache, argv[0], argv[1]);
2130                 if (r)
2131                         break;
2132
2133                 argc -= 2;
2134                 argv += 2;
2135         }
2136
2137         return r;
2138 }
2139
2140 static int create_cache_policy(struct cache *cache, struct cache_args *ca,
2141                                char **error)
2142 {
2143         struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
2144                                                            cache->cache_size,
2145                                                            cache->origin_sectors,
2146                                                            cache->sectors_per_block);
2147         if (IS_ERR(p)) {
2148                 *error = "Error creating cache's policy";
2149                 return PTR_ERR(p);
2150         }
2151         cache->policy = p;
2152
2153         return 0;
2154 }
2155
2156 #define DEFAULT_MIGRATION_THRESHOLD 2048
2157
2158 static int cache_create(struct cache_args *ca, struct cache **result)
2159 {
2160         int r = 0;
2161         char **error = &ca->ti->error;
2162         struct cache *cache;
2163         struct dm_target *ti = ca->ti;
2164         dm_block_t origin_blocks;
2165         struct dm_cache_metadata *cmd;
2166         bool may_format = ca->features.mode == CM_WRITE;
2167
2168         cache = kzalloc(sizeof(*cache), GFP_KERNEL);
2169         if (!cache)
2170                 return -ENOMEM;
2171
2172         cache->ti = ca->ti;
2173         ti->private = cache;
2174         ti->num_flush_bios = 2;
2175         ti->flush_supported = true;
2176
2177         ti->num_discard_bios = 1;
2178         ti->discards_supported = true;
2179         ti->discard_zeroes_data_unsupported = true;
2180         /* Discard bios must be split on a block boundary */
2181         ti->split_discard_bios = true;
2182
2183         cache->features = ca->features;
2184         ti->per_bio_data_size = get_per_bio_data_size(cache);
2185
2186         cache->callbacks.congested_fn = cache_is_congested;
2187         dm_table_add_target_callbacks(ti->table, &cache->callbacks);
2188
2189         cache->metadata_dev = ca->metadata_dev;
2190         cache->origin_dev = ca->origin_dev;
2191         cache->cache_dev = ca->cache_dev;
2192
2193         ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
2194
2195         /* FIXME: factor out this whole section */
2196         origin_blocks = cache->origin_sectors = ca->origin_sectors;
2197         origin_blocks = block_div(origin_blocks, ca->block_size);
2198         cache->origin_blocks = to_oblock(origin_blocks);
2199
2200         cache->sectors_per_block = ca->block_size;
2201         if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
2202                 r = -EINVAL;
2203                 goto bad;
2204         }
2205
2206         if (ca->block_size & (ca->block_size - 1)) {
2207                 dm_block_t cache_size = ca->cache_sectors;
2208
2209                 cache->sectors_per_block_shift = -1;
2210                 cache_size = block_div(cache_size, ca->block_size);
2211                 cache->cache_size = to_cblock(cache_size);
2212         } else {
2213                 cache->sectors_per_block_shift = __ffs(ca->block_size);
2214                 cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift);
2215         }
2216
2217         r = create_cache_policy(cache, ca, error);
2218         if (r)
2219                 goto bad;
2220
2221         cache->policy_nr_args = ca->policy_argc;
2222         cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
2223
2224         r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
2225         if (r) {
2226                 *error = "Error setting cache policy's config values";
2227                 goto bad;
2228         }
2229
2230         cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
2231                                      ca->block_size, may_format,
2232                                      dm_cache_policy_get_hint_size(cache->policy));
2233         if (IS_ERR(cmd)) {
2234                 *error = "Error creating metadata object";
2235                 r = PTR_ERR(cmd);
2236                 goto bad;
2237         }
2238         cache->cmd = cmd;
2239
2240         if (passthrough_mode(&cache->features)) {
2241                 bool all_clean;
2242
2243                 r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
2244                 if (r) {
2245                         *error = "dm_cache_metadata_all_clean() failed";
2246                         goto bad;
2247                 }
2248
2249                 if (!all_clean) {
2250                         *error = "Cannot enter passthrough mode unless all blocks are clean";
2251                         r = -EINVAL;
2252                         goto bad;
2253                 }
2254         }
2255
2256         spin_lock_init(&cache->lock);
2257         bio_list_init(&cache->deferred_bios);
2258         bio_list_init(&cache->deferred_flush_bios);
2259         bio_list_init(&cache->deferred_writethrough_bios);
2260         INIT_LIST_HEAD(&cache->quiesced_migrations);
2261         INIT_LIST_HEAD(&cache->completed_migrations);
2262         INIT_LIST_HEAD(&cache->need_commit_migrations);
2263         atomic_set(&cache->nr_migrations, 0);
2264         init_waitqueue_head(&cache->migration_wait);
2265
2266         init_waitqueue_head(&cache->quiescing_wait);
2267         atomic_set(&cache->quiescing, 0);
2268         atomic_set(&cache->quiescing_ack, 0);
2269
2270         r = -ENOMEM;
2271         atomic_set(&cache->nr_dirty, 0);
2272         cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
2273         if (!cache->dirty_bitset) {
2274                 *error = "could not allocate dirty bitset";
2275                 goto bad;
2276         }
2277         clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
2278
2279         cache->discard_nr_blocks = cache->origin_blocks;
2280         cache->discard_bitset = alloc_bitset(from_oblock(cache->discard_nr_blocks));
2281         if (!cache->discard_bitset) {
2282                 *error = "could not allocate discard bitset";
2283                 goto bad;
2284         }
2285         clear_bitset(cache->discard_bitset, from_oblock(cache->discard_nr_blocks));
2286
2287         cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2288         if (IS_ERR(cache->copier)) {
2289                 *error = "could not create kcopyd client";
2290                 r = PTR_ERR(cache->copier);
2291                 goto bad;
2292         }
2293
2294         cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2295         if (!cache->wq) {
2296                 *error = "could not create workqueue for metadata object";
2297                 goto bad;
2298         }
2299         INIT_WORK(&cache->worker, do_worker);
2300         INIT_DELAYED_WORK(&cache->waker, do_waker);
2301         cache->last_commit_jiffies = jiffies;
2302
2303         cache->prison = dm_bio_prison_create(PRISON_CELLS);
2304         if (!cache->prison) {
2305                 *error = "could not create bio prison";
2306                 goto bad;
2307         }
2308
2309         cache->all_io_ds = dm_deferred_set_create();
2310         if (!cache->all_io_ds) {
2311                 *error = "could not create all_io deferred set";
2312                 goto bad;
2313         }
2314
2315         cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
2316                                                          migration_cache);
2317         if (!cache->migration_pool) {
2318                 *error = "Error creating cache's migration mempool";
2319                 goto bad;
2320         }
2321
2322         cache->next_migration = NULL;
2323
2324         cache->need_tick_bio = true;
2325         cache->sized = false;
2326         cache->invalidate = false;
2327         cache->commit_requested = false;
2328         cache->loaded_mappings = false;
2329         cache->loaded_discards = false;
2330
2331         load_stats(cache);
2332
2333         atomic_set(&cache->stats.demotion, 0);
2334         atomic_set(&cache->stats.promotion, 0);
2335         atomic_set(&cache->stats.copies_avoided, 0);
2336         atomic_set(&cache->stats.cache_cell_clash, 0);
2337         atomic_set(&cache->stats.commit_count, 0);
2338         atomic_set(&cache->stats.discard_count, 0);
2339
2340         spin_lock_init(&cache->invalidation_lock);
2341         INIT_LIST_HEAD(&cache->invalidation_requests);
2342
2343         *result = cache;
2344         return 0;
2345
2346 bad:
2347         destroy(cache);
2348         return r;
2349 }
2350
2351 static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
2352 {
2353         unsigned i;
2354         const char **copy;
2355
2356         copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
2357         if (!copy)
2358                 return -ENOMEM;
2359         for (i = 0; i < argc; i++) {
2360                 copy[i] = kstrdup(argv[i], GFP_KERNEL);
2361                 if (!copy[i]) {
2362                         while (i--)
2363                                 kfree(copy[i]);
2364                         kfree(copy);
2365                         return -ENOMEM;
2366                 }
2367         }
2368
2369         cache->nr_ctr_args = argc;
2370         cache->ctr_args = copy;
2371
2372         return 0;
2373 }
2374
2375 static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
2376 {
2377         int r = -EINVAL;
2378         struct cache_args *ca;
2379         struct cache *cache = NULL;
2380
2381         ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2382         if (!ca) {
2383                 ti->error = "Error allocating memory for cache";
2384                 return -ENOMEM;
2385         }
2386         ca->ti = ti;
2387
2388         r = parse_cache_args(ca, argc, argv, &ti->error);
2389         if (r)
2390                 goto out;
2391
2392         r = cache_create(ca, &cache);
2393         if (r)
2394                 goto out;
2395
2396         r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
2397         if (r) {
2398                 destroy(cache);
2399                 goto out;
2400         }
2401
2402         ti->private = cache;
2403
2404 out:
2405         destroy_cache_args(ca);
2406         return r;
2407 }
2408
2409 static int cache_map(struct dm_target *ti, struct bio *bio)
2410 {
2411         struct cache *cache = ti->private;
2412
2413         int r;
2414         dm_oblock_t block = get_bio_block(cache, bio);
2415         size_t pb_data_size = get_per_bio_data_size(cache);
2416         bool can_migrate = false;
2417         bool discarded_block;
2418         struct dm_bio_prison_cell *cell;
2419         struct policy_result lookup_result;
2420         struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
2421
2422         if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
2423                 /*
2424                  * This can only occur if the io goes to a partial block at
2425                  * the end of the origin device.  We don't cache these.
2426                  * Just remap to the origin and carry on.
2427                  */
2428                 remap_to_origin(cache, bio);
2429                 return DM_MAPIO_REMAPPED;
2430         }
2431
2432         if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
2433                 defer_bio(cache, bio);
2434                 return DM_MAPIO_SUBMITTED;
2435         }
2436
2437         /*
2438          * Check to see if that block is currently migrating.
2439          */
2440         cell = alloc_prison_cell(cache);
2441         if (!cell) {
2442                 defer_bio(cache, bio);
2443                 return DM_MAPIO_SUBMITTED;
2444         }
2445
2446         r = bio_detain(cache, block, bio, cell,
2447                        (cell_free_fn) free_prison_cell,
2448                        cache, &cell);
2449         if (r) {
2450                 if (r < 0)
2451                         defer_bio(cache, bio);
2452
2453                 return DM_MAPIO_SUBMITTED;
2454         }
2455
2456         discarded_block = is_discarded_oblock(cache, block);
2457
2458         r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
2459                        bio, &lookup_result);
2460         if (r == -EWOULDBLOCK) {
2461                 cell_defer(cache, cell, true);
2462                 return DM_MAPIO_SUBMITTED;
2463
2464         } else if (r) {
2465                 DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
2466                 bio_io_error(bio);
2467                 return DM_MAPIO_SUBMITTED;
2468         }
2469
2470         r = DM_MAPIO_REMAPPED;
2471         switch (lookup_result.op) {
2472         case POLICY_HIT:
2473                 if (passthrough_mode(&cache->features)) {
2474                         if (bio_data_dir(bio) == WRITE) {
2475                                 /*
2476                                  * We need to invalidate this block, so
2477                                  * defer for the worker thread.
2478                                  */
2479                                 cell_defer(cache, cell, true);
2480                                 r = DM_MAPIO_SUBMITTED;
2481
2482                         } else {
2483                                 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2484                                 inc_miss_counter(cache, bio);
2485                                 remap_to_origin_clear_discard(cache, bio, block);
2486
2487                                 cell_defer(cache, cell, false);
2488                         }
2489
2490                 } else {
2491                         inc_hit_counter(cache, bio);
2492                         pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2493
2494                         if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
2495                             !is_dirty(cache, lookup_result.cblock))
2496                                 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
2497                         else
2498                                 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
2499
2500                         cell_defer(cache, cell, false);
2501                 }
2502                 break;
2503
2504         case POLICY_MISS:
2505                 inc_miss_counter(cache, bio);
2506                 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2507
2508                 if (pb->req_nr != 0) {
2509                         /*
2510                          * This is a duplicate writethrough io that is no
2511                          * longer needed because the block has been demoted.
2512                          */
2513                         bio_endio(bio, 0);
2514                         cell_defer(cache, cell, false);
2515                         return DM_MAPIO_SUBMITTED;
2516                 } else {
2517                         remap_to_origin_clear_discard(cache, bio, block);
2518                         cell_defer(cache, cell, false);
2519                 }
2520                 break;
2521
2522         default:
2523                 DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
2524                             (unsigned) lookup_result.op);
2525                 bio_io_error(bio);
2526                 r = DM_MAPIO_SUBMITTED;
2527         }
2528
2529         return r;
2530 }
2531
2532 static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
2533 {
2534         struct cache *cache = ti->private;
2535         unsigned long flags;
2536         size_t pb_data_size = get_per_bio_data_size(cache);
2537         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
2538
2539         if (pb->tick) {
2540                 policy_tick(cache->policy);
2541
2542                 spin_lock_irqsave(&cache->lock, flags);
2543                 cache->need_tick_bio = true;
2544                 spin_unlock_irqrestore(&cache->lock, flags);
2545         }
2546
2547         check_for_quiesced_migrations(cache, pb);
2548
2549         return 0;
2550 }
2551
2552 static int write_dirty_bitset(struct cache *cache)
2553 {
2554         unsigned i, r;
2555
2556         for (i = 0; i < from_cblock(cache->cache_size); i++) {
2557                 r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
2558                                        is_dirty(cache, to_cblock(i)));
2559                 if (r)
2560                         return r;
2561         }
2562
2563         return 0;
2564 }
2565
2566 static int write_discard_bitset(struct cache *cache)
2567 {
2568         unsigned i, r;
2569
2570         r = dm_cache_discard_bitset_resize(cache->cmd, cache->sectors_per_block,
2571                                            cache->origin_blocks);
2572         if (r) {
2573                 DMERR("could not resize on-disk discard bitset");
2574                 return r;
2575         }
2576
2577         for (i = 0; i < from_oblock(cache->discard_nr_blocks); i++) {
2578                 r = dm_cache_set_discard(cache->cmd, to_oblock(i),
2579                                          is_discarded(cache, to_oblock(i)));
2580                 if (r)
2581                         return r;
2582         }
2583
2584         return 0;
2585 }
2586
2587 /*
2588  * returns true on success
2589  */
2590 static bool sync_metadata(struct cache *cache)
2591 {
2592         int r1, r2, r3, r4;
2593
2594         r1 = write_dirty_bitset(cache);
2595         if (r1)
2596                 DMERR("could not write dirty bitset");
2597
2598         r2 = write_discard_bitset(cache);
2599         if (r2)
2600                 DMERR("could not write discard bitset");
2601
2602         save_stats(cache);
2603
2604         r3 = dm_cache_write_hints(cache->cmd, cache->policy);
2605         if (r3)
2606                 DMERR("could not write hints");
2607
2608         /*
2609          * If writing the above metadata failed, we still commit, but don't
2610          * set the clean shutdown flag.  This will effectively force every
2611          * dirty bit to be set on reload.
2612          */
2613         r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3);
2614         if (r4)
2615                 DMERR("could not write cache metadata.  Data loss may occur.");
2616
2617         return !r1 && !r2 && !r3 && !r4;
2618 }
2619
2620 static void cache_postsuspend(struct dm_target *ti)
2621 {
2622         struct cache *cache = ti->private;
2623
2624         start_quiescing(cache);
2625         wait_for_migrations(cache);
2626         stop_worker(cache);
2627         requeue_deferred_io(cache);
2628         stop_quiescing(cache);
2629
2630         (void) sync_metadata(cache);
2631 }
2632
2633 static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
2634                         bool dirty, uint32_t hint, bool hint_valid)
2635 {
2636         int r;
2637         struct cache *cache = context;
2638
2639         r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid);
2640         if (r)
2641                 return r;
2642
2643         if (dirty)
2644                 set_dirty(cache, oblock, cblock);
2645         else
2646                 clear_dirty(cache, oblock, cblock);
2647
2648         return 0;
2649 }
2650
2651 static int load_discard(void *context, sector_t discard_block_size,
2652                         dm_oblock_t oblock, bool discard)
2653 {
2654         struct cache *cache = context;
2655
2656         if (discard)
2657                 set_discard(cache, oblock);
2658         else
2659                 clear_discard(cache, oblock);
2660
2661         return 0;
2662 }
2663
2664 static dm_cblock_t get_cache_dev_size(struct cache *cache)
2665 {
2666         sector_t size = get_dev_size(cache->cache_dev);
2667         (void) sector_div(size, cache->sectors_per_block);
2668         return to_cblock(size);
2669 }
2670
2671 static bool can_resize(struct cache *cache, dm_cblock_t new_size)
2672 {
2673         if (from_cblock(new_size) > from_cblock(cache->cache_size))
2674                 return true;
2675
2676         /*
2677          * We can't drop a dirty block when shrinking the cache.
2678          */
2679         while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
2680                 new_size = to_cblock(from_cblock(new_size) + 1);
2681                 if (is_dirty(cache, new_size)) {
2682                         DMERR("unable to shrink cache; cache block %llu is dirty",
2683                               (unsigned long long) from_cblock(new_size));
2684                         return false;
2685                 }
2686         }
2687
2688         return true;
2689 }
2690
2691 static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
2692 {
2693         int r;
2694
2695         r = dm_cache_resize(cache->cmd, new_size);
2696         if (r) {
2697                 DMERR("could not resize cache metadata");
2698                 return r;
2699         }
2700
2701         cache->cache_size = new_size;
2702
2703         return 0;
2704 }
2705
2706 static int cache_preresume(struct dm_target *ti)
2707 {
2708         int r = 0;
2709         struct cache *cache = ti->private;
2710         dm_cblock_t csize = get_cache_dev_size(cache);
2711
2712         /*
2713          * Check to see if the cache has resized.
2714          */
2715         if (!cache->sized) {
2716                 r = resize_cache_dev(cache, csize);
2717                 if (r)
2718                         return r;
2719
2720                 cache->sized = true;
2721
2722         } else if (csize != cache->cache_size) {
2723                 if (!can_resize(cache, csize))
2724                         return -EINVAL;
2725
2726                 r = resize_cache_dev(cache, csize);
2727                 if (r)
2728                         return r;
2729         }
2730
2731         if (!cache->loaded_mappings) {
2732                 r = dm_cache_load_mappings(cache->cmd, cache->policy,
2733                                            load_mapping, cache);
2734                 if (r) {
2735                         DMERR("could not load cache mappings");
2736                         return r;
2737                 }
2738
2739                 cache->loaded_mappings = true;
2740         }
2741
2742         if (!cache->loaded_discards) {
2743                 r = dm_cache_load_discards(cache->cmd, load_discard, cache);
2744                 if (r) {
2745                         DMERR("could not load origin discards");
2746                         return r;
2747                 }
2748
2749                 cache->loaded_discards = true;
2750         }
2751
2752         return r;
2753 }
2754
2755 static void cache_resume(struct dm_target *ti)
2756 {
2757         struct cache *cache = ti->private;
2758
2759         cache->need_tick_bio = true;
2760         do_waker(&cache->waker.work);
2761 }
2762
2763 /*
2764  * Status format:
2765  *
2766  * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
2767  * <cache block size> <#used cache blocks>/<#total cache blocks>
2768  * <#read hits> <#read misses> <#write hits> <#write misses>
2769  * <#demotions> <#promotions> <#dirty>
2770  * <#features> <features>*
2771  * <#core args> <core args>
2772  * <policy name> <#policy args> <policy args>*
2773  */
2774 static void cache_status(struct dm_target *ti, status_type_t type,
2775                          unsigned status_flags, char *result, unsigned maxlen)
2776 {
2777         int r = 0;
2778         unsigned i;
2779         ssize_t sz = 0;
2780         dm_block_t nr_free_blocks_metadata = 0;
2781         dm_block_t nr_blocks_metadata = 0;
2782         char buf[BDEVNAME_SIZE];
2783         struct cache *cache = ti->private;
2784         dm_cblock_t residency;
2785
2786         switch (type) {
2787         case STATUSTYPE_INFO:
2788                 /* Commit to ensure statistics aren't out-of-date */
2789                 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) {
2790                         r = dm_cache_commit(cache->cmd, false);
2791                         if (r)
2792                                 DMERR("could not commit metadata for accurate status");
2793                 }
2794
2795                 r = dm_cache_get_free_metadata_block_count(cache->cmd,
2796                                                            &nr_free_blocks_metadata);
2797                 if (r) {
2798                         DMERR("could not get metadata free block count");
2799                         goto err;
2800                 }
2801
2802                 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
2803                 if (r) {
2804                         DMERR("could not get metadata device size");
2805                         goto err;
2806                 }
2807
2808                 residency = policy_residency(cache->policy);
2809
2810                 DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
2811                        (unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
2812                        (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2813                        (unsigned long long)nr_blocks_metadata,
2814                        cache->sectors_per_block,
2815                        (unsigned long long) from_cblock(residency),
2816                        (unsigned long long) from_cblock(cache->cache_size),
2817                        (unsigned) atomic_read(&cache->stats.read_hit),
2818                        (unsigned) atomic_read(&cache->stats.read_miss),
2819                        (unsigned) atomic_read(&cache->stats.write_hit),
2820                        (unsigned) atomic_read(&cache->stats.write_miss),
2821                        (unsigned) atomic_read(&cache->stats.demotion),
2822                        (unsigned) atomic_read(&cache->stats.promotion),
2823                        (unsigned long) atomic_read(&cache->nr_dirty));
2824
2825                 if (writethrough_mode(&cache->features))
2826                         DMEMIT("1 writethrough ");
2827
2828                 else if (passthrough_mode(&cache->features))
2829                         DMEMIT("1 passthrough ");
2830
2831                 else if (writeback_mode(&cache->features))
2832                         DMEMIT("1 writeback ");
2833
2834                 else {
2835                         DMERR("internal error: unknown io mode: %d", (int) cache->features.io_mode);
2836                         goto err;
2837                 }
2838
2839                 DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
2840
2841                 DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));
2842                 if (sz < maxlen) {
2843                         r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz);
2844                         if (r)
2845                                 DMERR("policy_emit_config_values returned %d", r);
2846                 }
2847
2848                 break;
2849
2850         case STATUSTYPE_TABLE:
2851                 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
2852                 DMEMIT("%s ", buf);
2853                 format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
2854                 DMEMIT("%s ", buf);
2855                 format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
2856                 DMEMIT("%s", buf);
2857
2858                 for (i = 0; i < cache->nr_ctr_args - 1; i++)
2859                         DMEMIT(" %s", cache->ctr_args[i]);
2860                 if (cache->nr_ctr_args)
2861                         DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
2862         }
2863
2864         return;
2865
2866 err:
2867         DMEMIT("Error");
2868 }
2869
2870 /*
2871  * A cache block range can take two forms:
2872  *
2873  * i) A single cblock, eg. '3456'
2874  * ii) A begin and end cblock with dots between, eg. 123-234
2875  */
2876 static int parse_cblock_range(struct cache *cache, const char *str,
2877                               struct cblock_range *result)
2878 {
2879         char dummy;
2880         uint64_t b, e;
2881         int r;
2882
2883         /*
2884          * Try and parse form (ii) first.
2885          */
2886         r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
2887         if (r < 0)
2888                 return r;
2889
2890         if (r == 2) {
2891                 result->begin = to_cblock(b);
2892                 result->end = to_cblock(e);
2893                 return 0;
2894         }
2895
2896         /*
2897          * That didn't work, try form (i).
2898          */
2899         r = sscanf(str, "%llu%c", &b, &dummy);
2900         if (r < 0)
2901                 return r;
2902
2903         if (r == 1) {
2904                 result->begin = to_cblock(b);
2905                 result->end = to_cblock(from_cblock(result->begin) + 1u);
2906                 return 0;
2907         }
2908
2909         DMERR("invalid cblock range '%s'", str);
2910         return -EINVAL;
2911 }
2912
2913 static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
2914 {
2915         uint64_t b = from_cblock(range->begin);
2916         uint64_t e = from_cblock(range->end);
2917         uint64_t n = from_cblock(cache->cache_size);
2918
2919         if (b >= n) {
2920                 DMERR("begin cblock out of range: %llu >= %llu", b, n);
2921                 return -EINVAL;
2922         }
2923
2924         if (e > n) {
2925                 DMERR("end cblock out of range: %llu > %llu", e, n);
2926                 return -EINVAL;
2927         }
2928
2929         if (b >= e) {
2930                 DMERR("invalid cblock range: %llu >= %llu", b, e);
2931                 return -EINVAL;
2932         }
2933
2934         return 0;
2935 }
2936
2937 static int request_invalidation(struct cache *cache, struct cblock_range *range)
2938 {
2939         struct invalidation_request req;
2940
2941         INIT_LIST_HEAD(&req.list);
2942         req.cblocks = range;
2943         atomic_set(&req.complete, 0);
2944         req.err = 0;
2945         init_waitqueue_head(&req.result_wait);
2946
2947         spin_lock(&cache->invalidation_lock);
2948         list_add(&req.list, &cache->invalidation_requests);
2949         spin_unlock(&cache->invalidation_lock);
2950         wake_worker(cache);
2951
2952         wait_event(req.result_wait, atomic_read(&req.complete));
2953         return req.err;
2954 }
2955
2956 static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
2957                                               const char **cblock_ranges)
2958 {
2959         int r = 0;
2960         unsigned i;
2961         struct cblock_range range;
2962
2963         if (!passthrough_mode(&cache->features)) {
2964                 DMERR("cache has to be in passthrough mode for invalidation");
2965                 return -EPERM;
2966         }
2967
2968         for (i = 0; i < count; i++) {
2969                 r = parse_cblock_range(cache, cblock_ranges[i], &range);
2970                 if (r)
2971                         break;
2972
2973                 r = validate_cblock_range(cache, &range);
2974                 if (r)
2975                         break;
2976
2977                 /*
2978                  * Pass begin and end origin blocks to the worker and wake it.
2979                  */
2980                 r = request_invalidation(cache, &range);
2981                 if (r)
2982                         break;
2983         }
2984
2985         return r;
2986 }
2987
2988 /*
2989  * Supports
2990  *      "<key> <value>"
2991  * and
2992  *     "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
2993  *
2994  * The key migration_threshold is supported by the cache target core.
2995  */
2996 static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
2997 {
2998         struct cache *cache = ti->private;
2999
3000         if (!argc)
3001                 return -EINVAL;
3002
3003         if (!strcasecmp(argv[0], "invalidate_cblocks"))
3004                 return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
3005
3006         if (argc != 2)
3007                 return -EINVAL;
3008
3009         return set_config_value(cache, argv[0], argv[1]);
3010 }
3011
3012 static int cache_iterate_devices(struct dm_target *ti,
3013                                  iterate_devices_callout_fn fn, void *data)
3014 {
3015         int r = 0;
3016         struct cache *cache = ti->private;
3017
3018         r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
3019         if (!r)
3020                 r = fn(ti, cache->origin_dev, 0, ti->len, data);
3021
3022         return r;
3023 }
3024
3025 /*
3026  * We assume I/O is going to the origin (which is the volume
3027  * more likely to have restrictions e.g. by being striped).
3028  * (Looking up the exact location of the data would be expensive
3029  * and could always be out of date by the time the bio is submitted.)
3030  */
3031 static int cache_bvec_merge(struct dm_target *ti,
3032                             struct bvec_merge_data *bvm,
3033                             struct bio_vec *biovec, int max_size)
3034 {
3035         struct cache *cache = ti->private;
3036         struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
3037
3038         if (!q->merge_bvec_fn)
3039                 return max_size;
3040
3041         bvm->bi_bdev = cache->origin_dev->bdev;
3042         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
3043 }
3044
3045 static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
3046 {
3047         /*
3048          * FIXME: these limits may be incompatible with the cache device
3049          */
3050         limits->max_discard_sectors = cache->sectors_per_block;
3051         limits->discard_granularity = cache->sectors_per_block << SECTOR_SHIFT;
3052 }
3053
3054 static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
3055 {
3056         struct cache *cache = ti->private;
3057         uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3058
3059         /*
3060          * If the system-determined stacked limits are compatible with the
3061          * cache's blocksize (io_opt is a factor) do not override them.
3062          */
3063         if (io_opt_sectors < cache->sectors_per_block ||
3064             do_div(io_opt_sectors, cache->sectors_per_block)) {
3065                 blk_limits_io_min(limits, 0);
3066                 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
3067         }
3068         set_discard_limits(cache, limits);
3069 }
3070
3071 /*----------------------------------------------------------------*/
3072
3073 static struct target_type cache_target = {
3074         .name = "cache",
3075         .version = {1, 4, 0},
3076         .module = THIS_MODULE,
3077         .ctr = cache_ctr,
3078         .dtr = cache_dtr,
3079         .map = cache_map,
3080         .end_io = cache_end_io,
3081         .postsuspend = cache_postsuspend,
3082         .preresume = cache_preresume,
3083         .resume = cache_resume,
3084         .status = cache_status,
3085         .message = cache_message,
3086         .iterate_devices = cache_iterate_devices,
3087         .merge = cache_bvec_merge,
3088         .io_hints = cache_io_hints,
3089 };
3090
3091 static int __init dm_cache_init(void)
3092 {
3093         int r;
3094
3095         r = dm_register_target(&cache_target);
3096         if (r) {
3097                 DMERR("cache target registration failed: %d", r);
3098                 return r;
3099         }
3100
3101         migration_cache = KMEM_CACHE(dm_cache_migration, 0);
3102         if (!migration_cache) {
3103                 dm_unregister_target(&cache_target);
3104                 return -ENOMEM;
3105         }
3106
3107         return 0;
3108 }
3109
3110 static void __exit dm_cache_exit(void)
3111 {
3112         dm_unregister_target(&cache_target);
3113         kmem_cache_destroy(migration_cache);
3114 }
3115
3116 module_init(dm_cache_init);
3117 module_exit(dm_cache_exit);
3118
3119 MODULE_DESCRIPTION(DM_NAME " cache target");
3120 MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
3121 MODULE_LICENSE("GPL");