]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/md/bcache/super.c
Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
[karo-tx-linux.git] / drivers / md / bcache / super.c
1 /*
2  * bcache setup/teardown code, and some metadata io - read a superblock and
3  * figure out what to do with it.
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8
9 #include "bcache.h"
10 #include "btree.h"
11 #include "debug.h"
12 #include "extents.h"
13 #include "request.h"
14 #include "writeback.h"
15
16 #include <linux/blkdev.h>
17 #include <linux/buffer_head.h>
18 #include <linux/debugfs.h>
19 #include <linux/genhd.h>
20 #include <linux/idr.h>
21 #include <linux/kthread.h>
22 #include <linux/module.h>
23 #include <linux/random.h>
24 #include <linux/reboot.h>
25 #include <linux/sysfs.h>
26
27 MODULE_LICENSE("GPL");
28 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
29
30 static const char bcache_magic[] = {
31         0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
32         0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
33 };
34
35 static const char invalid_uuid[] = {
36         0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
37         0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
38 };
39
40 /* Default is -1; we skip past it for struct cached_dev's cache mode */
41 const char * const bch_cache_modes[] = {
42         "default",
43         "writethrough",
44         "writeback",
45         "writearound",
46         "none",
47         NULL
48 };
49
50 static struct kobject *bcache_kobj;
51 struct mutex bch_register_lock;
52 LIST_HEAD(bch_cache_sets);
53 static LIST_HEAD(uncached_devices);
54
55 static int bcache_major;
56 static DEFINE_IDA(bcache_minor);
57 static wait_queue_head_t unregister_wait;
58 struct workqueue_struct *bcache_wq;
59
60 #define BTREE_MAX_PAGES         (256 * 1024 / PAGE_SIZE)
61
62 /* Superblock */
63
64 static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
65                               struct page **res)
66 {
67         const char *err;
68         struct cache_sb *s;
69         struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
70         unsigned i;
71
72         if (!bh)
73                 return "IO error";
74
75         s = (struct cache_sb *) bh->b_data;
76
77         sb->offset              = le64_to_cpu(s->offset);
78         sb->version             = le64_to_cpu(s->version);
79
80         memcpy(sb->magic,       s->magic, 16);
81         memcpy(sb->uuid,        s->uuid, 16);
82         memcpy(sb->set_uuid,    s->set_uuid, 16);
83         memcpy(sb->label,       s->label, SB_LABEL_SIZE);
84
85         sb->flags               = le64_to_cpu(s->flags);
86         sb->seq                 = le64_to_cpu(s->seq);
87         sb->last_mount          = le32_to_cpu(s->last_mount);
88         sb->first_bucket        = le16_to_cpu(s->first_bucket);
89         sb->keys                = le16_to_cpu(s->keys);
90
91         for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
92                 sb->d[i] = le64_to_cpu(s->d[i]);
93
94         pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
95                  sb->version, sb->flags, sb->seq, sb->keys);
96
97         err = "Not a bcache superblock";
98         if (sb->offset != SB_SECTOR)
99                 goto err;
100
101         if (memcmp(sb->magic, bcache_magic, 16))
102                 goto err;
103
104         err = "Too many journal buckets";
105         if (sb->keys > SB_JOURNAL_BUCKETS)
106                 goto err;
107
108         err = "Bad checksum";
109         if (s->csum != csum_set(s))
110                 goto err;
111
112         err = "Bad UUID";
113         if (bch_is_zero(sb->uuid, 16))
114                 goto err;
115
116         sb->block_size  = le16_to_cpu(s->block_size);
117
118         err = "Superblock block size smaller than device block size";
119         if (sb->block_size << 9 < bdev_logical_block_size(bdev))
120                 goto err;
121
122         switch (sb->version) {
123         case BCACHE_SB_VERSION_BDEV:
124                 sb->data_offset = BDEV_DATA_START_DEFAULT;
125                 break;
126         case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
127                 sb->data_offset = le64_to_cpu(s->data_offset);
128
129                 err = "Bad data offset";
130                 if (sb->data_offset < BDEV_DATA_START_DEFAULT)
131                         goto err;
132
133                 break;
134         case BCACHE_SB_VERSION_CDEV:
135         case BCACHE_SB_VERSION_CDEV_WITH_UUID:
136                 sb->nbuckets    = le64_to_cpu(s->nbuckets);
137                 sb->block_size  = le16_to_cpu(s->block_size);
138                 sb->bucket_size = le16_to_cpu(s->bucket_size);
139
140                 sb->nr_in_set   = le16_to_cpu(s->nr_in_set);
141                 sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
142
143                 err = "Too many buckets";
144                 if (sb->nbuckets > LONG_MAX)
145                         goto err;
146
147                 err = "Not enough buckets";
148                 if (sb->nbuckets < 1 << 7)
149                         goto err;
150
151                 err = "Bad block/bucket size";
152                 if (!is_power_of_2(sb->block_size) ||
153                     sb->block_size > PAGE_SECTORS ||
154                     !is_power_of_2(sb->bucket_size) ||
155                     sb->bucket_size < PAGE_SECTORS)
156                         goto err;
157
158                 err = "Invalid superblock: device too small";
159                 if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets)
160                         goto err;
161
162                 err = "Bad UUID";
163                 if (bch_is_zero(sb->set_uuid, 16))
164                         goto err;
165
166                 err = "Bad cache device number in set";
167                 if (!sb->nr_in_set ||
168                     sb->nr_in_set <= sb->nr_this_dev ||
169                     sb->nr_in_set > MAX_CACHES_PER_SET)
170                         goto err;
171
172                 err = "Journal buckets not sequential";
173                 for (i = 0; i < sb->keys; i++)
174                         if (sb->d[i] != sb->first_bucket + i)
175                                 goto err;
176
177                 err = "Too many journal buckets";
178                 if (sb->first_bucket + sb->keys > sb->nbuckets)
179                         goto err;
180
181                 err = "Invalid superblock: first bucket comes before end of super";
182                 if (sb->first_bucket * sb->bucket_size < 16)
183                         goto err;
184
185                 break;
186         default:
187                 err = "Unsupported superblock version";
188                 goto err;
189         }
190
191         sb->last_mount = get_seconds();
192         err = NULL;
193
194         get_page(bh->b_page);
195         *res = bh->b_page;
196 err:
197         put_bh(bh);
198         return err;
199 }
200
201 static void write_bdev_super_endio(struct bio *bio)
202 {
203         struct cached_dev *dc = bio->bi_private;
204         /* XXX: error checking */
205
206         closure_put(&dc->sb_write);
207 }
208
209 static void __write_super(struct cache_sb *sb, struct bio *bio)
210 {
211         struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
212         unsigned i;
213
214         bio->bi_iter.bi_sector  = SB_SECTOR;
215         bio->bi_iter.bi_size    = SB_SIZE;
216         bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
217         bch_bio_map(bio, NULL);
218
219         out->offset             = cpu_to_le64(sb->offset);
220         out->version            = cpu_to_le64(sb->version);
221
222         memcpy(out->uuid,       sb->uuid, 16);
223         memcpy(out->set_uuid,   sb->set_uuid, 16);
224         memcpy(out->label,      sb->label, SB_LABEL_SIZE);
225
226         out->flags              = cpu_to_le64(sb->flags);
227         out->seq                = cpu_to_le64(sb->seq);
228
229         out->last_mount         = cpu_to_le32(sb->last_mount);
230         out->first_bucket       = cpu_to_le16(sb->first_bucket);
231         out->keys               = cpu_to_le16(sb->keys);
232
233         for (i = 0; i < sb->keys; i++)
234                 out->d[i] = cpu_to_le64(sb->d[i]);
235
236         out->csum = csum_set(out);
237
238         pr_debug("ver %llu, flags %llu, seq %llu",
239                  sb->version, sb->flags, sb->seq);
240
241         submit_bio(bio);
242 }
243
244 static void bch_write_bdev_super_unlock(struct closure *cl)
245 {
246         struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
247
248         up(&dc->sb_write_mutex);
249 }
250
251 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
252 {
253         struct closure *cl = &dc->sb_write;
254         struct bio *bio = &dc->sb_bio;
255
256         down(&dc->sb_write_mutex);
257         closure_init(cl, parent);
258
259         bio_reset(bio);
260         bio->bi_bdev    = dc->bdev;
261         bio->bi_end_io  = write_bdev_super_endio;
262         bio->bi_private = dc;
263
264         closure_get(cl);
265         __write_super(&dc->sb, bio);
266
267         closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
268 }
269
270 static void write_super_endio(struct bio *bio)
271 {
272         struct cache *ca = bio->bi_private;
273
274         bch_count_io_errors(ca, bio->bi_error, "writing superblock");
275         closure_put(&ca->set->sb_write);
276 }
277
278 static void bcache_write_super_unlock(struct closure *cl)
279 {
280         struct cache_set *c = container_of(cl, struct cache_set, sb_write);
281
282         up(&c->sb_write_mutex);
283 }
284
285 void bcache_write_super(struct cache_set *c)
286 {
287         struct closure *cl = &c->sb_write;
288         struct cache *ca;
289         unsigned i;
290
291         down(&c->sb_write_mutex);
292         closure_init(cl, &c->cl);
293
294         c->sb.seq++;
295
296         for_each_cache(ca, c, i) {
297                 struct bio *bio = &ca->sb_bio;
298
299                 ca->sb.version          = BCACHE_SB_VERSION_CDEV_WITH_UUID;
300                 ca->sb.seq              = c->sb.seq;
301                 ca->sb.last_mount       = c->sb.last_mount;
302
303                 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
304
305                 bio_reset(bio);
306                 bio->bi_bdev    = ca->bdev;
307                 bio->bi_end_io  = write_super_endio;
308                 bio->bi_private = ca;
309
310                 closure_get(cl);
311                 __write_super(&ca->sb, bio);
312         }
313
314         closure_return_with_destructor(cl, bcache_write_super_unlock);
315 }
316
317 /* UUID io */
318
319 static void uuid_endio(struct bio *bio)
320 {
321         struct closure *cl = bio->bi_private;
322         struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
323
324         cache_set_err_on(bio->bi_error, c, "accessing uuids");
325         bch_bbio_free(bio, c);
326         closure_put(cl);
327 }
328
329 static void uuid_io_unlock(struct closure *cl)
330 {
331         struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
332
333         up(&c->uuid_write_mutex);
334 }
335
336 static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
337                     struct bkey *k, struct closure *parent)
338 {
339         struct closure *cl = &c->uuid_write;
340         struct uuid_entry *u;
341         unsigned i;
342         char buf[80];
343
344         BUG_ON(!parent);
345         down(&c->uuid_write_mutex);
346         closure_init(cl, parent);
347
348         for (i = 0; i < KEY_PTRS(k); i++) {
349                 struct bio *bio = bch_bbio_alloc(c);
350
351                 bio->bi_rw      = REQ_SYNC|REQ_META|op_flags;
352                 bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
353
354                 bio->bi_end_io  = uuid_endio;
355                 bio->bi_private = cl;
356                 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
357                 bch_bio_map(bio, c->uuids);
358
359                 bch_submit_bbio(bio, c, k, i);
360
361                 if (op != REQ_OP_WRITE)
362                         break;
363         }
364
365         bch_extent_to_text(buf, sizeof(buf), k);
366         pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf);
367
368         for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
369                 if (!bch_is_zero(u->uuid, 16))
370                         pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u",
371                                  u - c->uuids, u->uuid, u->label,
372                                  u->first_reg, u->last_reg, u->invalidated);
373
374         closure_return_with_destructor(cl, uuid_io_unlock);
375 }
376
377 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
378 {
379         struct bkey *k = &j->uuid_bucket;
380
381         if (__bch_btree_ptr_invalid(c, k))
382                 return "bad uuid pointer";
383
384         bkey_copy(&c->uuid_bucket, k);
385         uuid_io(c, REQ_OP_READ, READ_SYNC, k, cl);
386
387         if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
388                 struct uuid_entry_v0    *u0 = (void *) c->uuids;
389                 struct uuid_entry       *u1 = (void *) c->uuids;
390                 int i;
391
392                 closure_sync(cl);
393
394                 /*
395                  * Since the new uuid entry is bigger than the old, we have to
396                  * convert starting at the highest memory address and work down
397                  * in order to do it in place
398                  */
399
400                 for (i = c->nr_uuids - 1;
401                      i >= 0;
402                      --i) {
403                         memcpy(u1[i].uuid,      u0[i].uuid, 16);
404                         memcpy(u1[i].label,     u0[i].label, 32);
405
406                         u1[i].first_reg         = u0[i].first_reg;
407                         u1[i].last_reg          = u0[i].last_reg;
408                         u1[i].invalidated       = u0[i].invalidated;
409
410                         u1[i].flags     = 0;
411                         u1[i].sectors   = 0;
412                 }
413         }
414
415         return NULL;
416 }
417
418 static int __uuid_write(struct cache_set *c)
419 {
420         BKEY_PADDED(key) k;
421         struct closure cl;
422         closure_init_stack(&cl);
423
424         lockdep_assert_held(&bch_register_lock);
425
426         if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
427                 return 1;
428
429         SET_KEY_SIZE(&k.key, c->sb.bucket_size);
430         uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
431         closure_sync(&cl);
432
433         bkey_copy(&c->uuid_bucket, &k.key);
434         bkey_put(c, &k.key);
435         return 0;
436 }
437
438 int bch_uuid_write(struct cache_set *c)
439 {
440         int ret = __uuid_write(c);
441
442         if (!ret)
443                 bch_journal_meta(c, NULL);
444
445         return ret;
446 }
447
448 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
449 {
450         struct uuid_entry *u;
451
452         for (u = c->uuids;
453              u < c->uuids + c->nr_uuids; u++)
454                 if (!memcmp(u->uuid, uuid, 16))
455                         return u;
456
457         return NULL;
458 }
459
460 static struct uuid_entry *uuid_find_empty(struct cache_set *c)
461 {
462         static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
463         return uuid_find(c, zero_uuid);
464 }
465
466 /*
467  * Bucket priorities/gens:
468  *
469  * For each bucket, we store on disk its
470    * 8 bit gen
471    * 16 bit priority
472  *
473  * See alloc.c for an explanation of the gen. The priority is used to implement
474  * lru (and in the future other) cache replacement policies; for most purposes
475  * it's just an opaque integer.
476  *
477  * The gens and the priorities don't have a whole lot to do with each other, and
478  * it's actually the gens that must be written out at specific times - it's no
479  * big deal if the priorities don't get written, if we lose them we just reuse
480  * buckets in suboptimal order.
481  *
482  * On disk they're stored in a packed array, and in as many buckets are required
483  * to fit them all. The buckets we use to store them form a list; the journal
484  * header points to the first bucket, the first bucket points to the second
485  * bucket, et cetera.
486  *
487  * This code is used by the allocation code; periodically (whenever it runs out
488  * of buckets to allocate from) the allocation code will invalidate some
489  * buckets, but it can't use those buckets until their new gens are safely on
490  * disk.
491  */
492
493 static void prio_endio(struct bio *bio)
494 {
495         struct cache *ca = bio->bi_private;
496
497         cache_set_err_on(bio->bi_error, ca->set, "accessing priorities");
498         bch_bbio_free(bio, ca->set);
499         closure_put(&ca->prio);
500 }
501
502 static void prio_io(struct cache *ca, uint64_t bucket, int op,
503                     unsigned long op_flags)
504 {
505         struct closure *cl = &ca->prio;
506         struct bio *bio = bch_bbio_alloc(ca->set);
507
508         closure_init_stack(cl);
509
510         bio->bi_iter.bi_sector  = bucket * ca->sb.bucket_size;
511         bio->bi_bdev            = ca->bdev;
512         bio->bi_iter.bi_size    = bucket_bytes(ca);
513
514         bio->bi_end_io  = prio_endio;
515         bio->bi_private = ca;
516         bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
517         bch_bio_map(bio, ca->disk_buckets);
518
519         closure_bio_submit(bio, &ca->prio);
520         closure_sync(cl);
521 }
522
523 void bch_prio_write(struct cache *ca)
524 {
525         int i;
526         struct bucket *b;
527         struct closure cl;
528
529         closure_init_stack(&cl);
530
531         lockdep_assert_held(&ca->set->bucket_lock);
532
533         ca->disk_buckets->seq++;
534
535         atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
536                         &ca->meta_sectors_written);
537
538         //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
539         //       fifo_used(&ca->free_inc), fifo_used(&ca->unused));
540
541         for (i = prio_buckets(ca) - 1; i >= 0; --i) {
542                 long bucket;
543                 struct prio_set *p = ca->disk_buckets;
544                 struct bucket_disk *d = p->data;
545                 struct bucket_disk *end = d + prios_per_bucket(ca);
546
547                 for (b = ca->buckets + i * prios_per_bucket(ca);
548                      b < ca->buckets + ca->sb.nbuckets && d < end;
549                      b++, d++) {
550                         d->prio = cpu_to_le16(b->prio);
551                         d->gen = b->gen;
552                 }
553
554                 p->next_bucket  = ca->prio_buckets[i + 1];
555                 p->magic        = pset_magic(&ca->sb);
556                 p->csum         = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
557
558                 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
559                 BUG_ON(bucket == -1);
560
561                 mutex_unlock(&ca->set->bucket_lock);
562                 prio_io(ca, bucket, REQ_OP_WRITE, 0);
563                 mutex_lock(&ca->set->bucket_lock);
564
565                 ca->prio_buckets[i] = bucket;
566                 atomic_dec_bug(&ca->buckets[bucket].pin);
567         }
568
569         mutex_unlock(&ca->set->bucket_lock);
570
571         bch_journal_meta(ca->set, &cl);
572         closure_sync(&cl);
573
574         mutex_lock(&ca->set->bucket_lock);
575
576         /*
577          * Don't want the old priorities to get garbage collected until after we
578          * finish writing the new ones, and they're journalled
579          */
580         for (i = 0; i < prio_buckets(ca); i++) {
581                 if (ca->prio_last_buckets[i])
582                         __bch_bucket_free(ca,
583                                 &ca->buckets[ca->prio_last_buckets[i]]);
584
585                 ca->prio_last_buckets[i] = ca->prio_buckets[i];
586         }
587 }
588
589 static void prio_read(struct cache *ca, uint64_t bucket)
590 {
591         struct prio_set *p = ca->disk_buckets;
592         struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
593         struct bucket *b;
594         unsigned bucket_nr = 0;
595
596         for (b = ca->buckets;
597              b < ca->buckets + ca->sb.nbuckets;
598              b++, d++) {
599                 if (d == end) {
600                         ca->prio_buckets[bucket_nr] = bucket;
601                         ca->prio_last_buckets[bucket_nr] = bucket;
602                         bucket_nr++;
603
604                         prio_io(ca, bucket, REQ_OP_READ, READ_SYNC);
605
606                         if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
607                                 pr_warn("bad csum reading priorities");
608
609                         if (p->magic != pset_magic(&ca->sb))
610                                 pr_warn("bad magic reading priorities");
611
612                         bucket = p->next_bucket;
613                         d = p->data;
614                 }
615
616                 b->prio = le16_to_cpu(d->prio);
617                 b->gen = b->last_gc = d->gen;
618         }
619 }
620
621 /* Bcache device */
622
623 static int open_dev(struct block_device *b, fmode_t mode)
624 {
625         struct bcache_device *d = b->bd_disk->private_data;
626         if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
627                 return -ENXIO;
628
629         closure_get(&d->cl);
630         return 0;
631 }
632
633 static void release_dev(struct gendisk *b, fmode_t mode)
634 {
635         struct bcache_device *d = b->private_data;
636         closure_put(&d->cl);
637 }
638
639 static int ioctl_dev(struct block_device *b, fmode_t mode,
640                      unsigned int cmd, unsigned long arg)
641 {
642         struct bcache_device *d = b->bd_disk->private_data;
643         return d->ioctl(d, mode, cmd, arg);
644 }
645
646 static const struct block_device_operations bcache_ops = {
647         .open           = open_dev,
648         .release        = release_dev,
649         .ioctl          = ioctl_dev,
650         .owner          = THIS_MODULE,
651 };
652
653 void bcache_device_stop(struct bcache_device *d)
654 {
655         if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
656                 closure_queue(&d->cl);
657 }
658
659 static void bcache_device_unlink(struct bcache_device *d)
660 {
661         lockdep_assert_held(&bch_register_lock);
662
663         if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
664                 unsigned i;
665                 struct cache *ca;
666
667                 sysfs_remove_link(&d->c->kobj, d->name);
668                 sysfs_remove_link(&d->kobj, "cache");
669
670                 for_each_cache(ca, d->c, i)
671                         bd_unlink_disk_holder(ca->bdev, d->disk);
672         }
673 }
674
675 static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
676                                const char *name)
677 {
678         unsigned i;
679         struct cache *ca;
680
681         for_each_cache(ca, d->c, i)
682                 bd_link_disk_holder(ca->bdev, d->disk);
683
684         snprintf(d->name, BCACHEDEVNAME_SIZE,
685                  "%s%u", name, d->id);
686
687         WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
688              sysfs_create_link(&c->kobj, &d->kobj, d->name),
689              "Couldn't create device <-> cache set symlinks");
690
691         clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
692 }
693
694 static void bcache_device_detach(struct bcache_device *d)
695 {
696         lockdep_assert_held(&bch_register_lock);
697
698         if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
699                 struct uuid_entry *u = d->c->uuids + d->id;
700
701                 SET_UUID_FLASH_ONLY(u, 0);
702                 memcpy(u->uuid, invalid_uuid, 16);
703                 u->invalidated = cpu_to_le32(get_seconds());
704                 bch_uuid_write(d->c);
705         }
706
707         bcache_device_unlink(d);
708
709         d->c->devices[d->id] = NULL;
710         closure_put(&d->c->caching);
711         d->c = NULL;
712 }
713
714 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
715                                  unsigned id)
716 {
717         d->id = id;
718         d->c = c;
719         c->devices[id] = d;
720
721         closure_get(&c->caching);
722 }
723
724 static void bcache_device_free(struct bcache_device *d)
725 {
726         lockdep_assert_held(&bch_register_lock);
727
728         pr_info("%s stopped", d->disk->disk_name);
729
730         if (d->c)
731                 bcache_device_detach(d);
732         if (d->disk && d->disk->flags & GENHD_FL_UP)
733                 del_gendisk(d->disk);
734         if (d->disk && d->disk->queue)
735                 blk_cleanup_queue(d->disk->queue);
736         if (d->disk) {
737                 ida_simple_remove(&bcache_minor, d->disk->first_minor);
738                 put_disk(d->disk);
739         }
740
741         if (d->bio_split)
742                 bioset_free(d->bio_split);
743         kvfree(d->full_dirty_stripes);
744         kvfree(d->stripe_sectors_dirty);
745
746         closure_debug_destroy(&d->cl);
747 }
748
749 static int bcache_device_init(struct bcache_device *d, unsigned block_size,
750                               sector_t sectors)
751 {
752         struct request_queue *q;
753         size_t n;
754         int minor;
755
756         if (!d->stripe_size)
757                 d->stripe_size = 1 << 31;
758
759         d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
760
761         if (!d->nr_stripes ||
762             d->nr_stripes > INT_MAX ||
763             d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) {
764                 pr_err("nr_stripes too large");
765                 return -ENOMEM;
766         }
767
768         n = d->nr_stripes * sizeof(atomic_t);
769         d->stripe_sectors_dirty = n < PAGE_SIZE << 6
770                 ? kzalloc(n, GFP_KERNEL)
771                 : vzalloc(n);
772         if (!d->stripe_sectors_dirty)
773                 return -ENOMEM;
774
775         n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
776         d->full_dirty_stripes = n < PAGE_SIZE << 6
777                 ? kzalloc(n, GFP_KERNEL)
778                 : vzalloc(n);
779         if (!d->full_dirty_stripes)
780                 return -ENOMEM;
781
782         minor = ida_simple_get(&bcache_minor, 0, MINORMASK + 1, GFP_KERNEL);
783         if (minor < 0)
784                 return minor;
785
786         if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
787             !(d->disk = alloc_disk(1))) {
788                 ida_simple_remove(&bcache_minor, minor);
789                 return -ENOMEM;
790         }
791
792         set_capacity(d->disk, sectors);
793         snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", minor);
794
795         d->disk->major          = bcache_major;
796         d->disk->first_minor    = minor;
797         d->disk->fops           = &bcache_ops;
798         d->disk->private_data   = d;
799
800         q = blk_alloc_queue(GFP_KERNEL);
801         if (!q)
802                 return -ENOMEM;
803
804         blk_queue_make_request(q, NULL);
805         d->disk->queue                  = q;
806         q->queuedata                    = d;
807         q->backing_dev_info.congested_data = d;
808         q->limits.max_hw_sectors        = UINT_MAX;
809         q->limits.max_sectors           = UINT_MAX;
810         q->limits.max_segment_size      = UINT_MAX;
811         q->limits.max_segments          = BIO_MAX_PAGES;
812         blk_queue_max_discard_sectors(q, UINT_MAX);
813         q->limits.discard_granularity   = 512;
814         q->limits.io_min                = block_size;
815         q->limits.logical_block_size    = block_size;
816         q->limits.physical_block_size   = block_size;
817         set_bit(QUEUE_FLAG_NONROT,      &d->disk->queue->queue_flags);
818         clear_bit(QUEUE_FLAG_ADD_RANDOM, &d->disk->queue->queue_flags);
819         set_bit(QUEUE_FLAG_DISCARD,     &d->disk->queue->queue_flags);
820
821         blk_queue_write_cache(q, true, true);
822
823         return 0;
824 }
825
826 /* Cached device */
827
828 static void calc_cached_dev_sectors(struct cache_set *c)
829 {
830         uint64_t sectors = 0;
831         struct cached_dev *dc;
832
833         list_for_each_entry(dc, &c->cached_devs, list)
834                 sectors += bdev_sectors(dc->bdev);
835
836         c->cached_dev_sectors = sectors;
837 }
838
839 void bch_cached_dev_run(struct cached_dev *dc)
840 {
841         struct bcache_device *d = &dc->disk;
842         char buf[SB_LABEL_SIZE + 1];
843         char *env[] = {
844                 "DRIVER=bcache",
845                 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid),
846                 NULL,
847                 NULL,
848         };
849
850         memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
851         buf[SB_LABEL_SIZE] = '\0';
852         env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
853
854         if (atomic_xchg(&dc->running, 1)) {
855                 kfree(env[1]);
856                 kfree(env[2]);
857                 return;
858         }
859
860         if (!d->c &&
861             BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
862                 struct closure cl;
863                 closure_init_stack(&cl);
864
865                 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
866                 bch_write_bdev_super(dc, &cl);
867                 closure_sync(&cl);
868         }
869
870         add_disk(d->disk);
871         bd_link_disk_holder(dc->bdev, dc->disk.disk);
872         /* won't show up in the uevent file, use udevadm monitor -e instead
873          * only class / kset properties are persistent */
874         kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
875         kfree(env[1]);
876         kfree(env[2]);
877
878         if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
879             sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache"))
880                 pr_debug("error creating sysfs link");
881 }
882
883 static void cached_dev_detach_finish(struct work_struct *w)
884 {
885         struct cached_dev *dc = container_of(w, struct cached_dev, detach);
886         char buf[BDEVNAME_SIZE];
887         struct closure cl;
888         closure_init_stack(&cl);
889
890         BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
891         BUG_ON(atomic_read(&dc->count));
892
893         mutex_lock(&bch_register_lock);
894
895         memset(&dc->sb.set_uuid, 0, 16);
896         SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
897
898         bch_write_bdev_super(dc, &cl);
899         closure_sync(&cl);
900
901         bcache_device_detach(&dc->disk);
902         list_move(&dc->list, &uncached_devices);
903
904         clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
905         clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
906
907         mutex_unlock(&bch_register_lock);
908
909         pr_info("Caching disabled for %s", bdevname(dc->bdev, buf));
910
911         /* Drop ref we took in cached_dev_detach() */
912         closure_put(&dc->disk.cl);
913 }
914
915 void bch_cached_dev_detach(struct cached_dev *dc)
916 {
917         lockdep_assert_held(&bch_register_lock);
918
919         if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
920                 return;
921
922         if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
923                 return;
924
925         /*
926          * Block the device from being closed and freed until we're finished
927          * detaching
928          */
929         closure_get(&dc->disk.cl);
930
931         bch_writeback_queue(dc);
932         cached_dev_put(dc);
933 }
934
935 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
936 {
937         uint32_t rtime = cpu_to_le32(get_seconds());
938         struct uuid_entry *u;
939         char buf[BDEVNAME_SIZE];
940
941         bdevname(dc->bdev, buf);
942
943         if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))
944                 return -ENOENT;
945
946         if (dc->disk.c) {
947                 pr_err("Can't attach %s: already attached", buf);
948                 return -EINVAL;
949         }
950
951         if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
952                 pr_err("Can't attach %s: shutting down", buf);
953                 return -EINVAL;
954         }
955
956         if (dc->sb.block_size < c->sb.block_size) {
957                 /* Will die */
958                 pr_err("Couldn't attach %s: block size less than set's block size",
959                        buf);
960                 return -EINVAL;
961         }
962
963         u = uuid_find(c, dc->sb.uuid);
964
965         if (u &&
966             (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
967              BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
968                 memcpy(u->uuid, invalid_uuid, 16);
969                 u->invalidated = cpu_to_le32(get_seconds());
970                 u = NULL;
971         }
972
973         if (!u) {
974                 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
975                         pr_err("Couldn't find uuid for %s in set", buf);
976                         return -ENOENT;
977                 }
978
979                 u = uuid_find_empty(c);
980                 if (!u) {
981                         pr_err("Not caching %s, no room for UUID", buf);
982                         return -EINVAL;
983                 }
984         }
985
986         /* Deadlocks since we're called via sysfs...
987         sysfs_remove_file(&dc->kobj, &sysfs_attach);
988          */
989
990         if (bch_is_zero(u->uuid, 16)) {
991                 struct closure cl;
992                 closure_init_stack(&cl);
993
994                 memcpy(u->uuid, dc->sb.uuid, 16);
995                 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE);
996                 u->first_reg = u->last_reg = rtime;
997                 bch_uuid_write(c);
998
999                 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16);
1000                 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
1001
1002                 bch_write_bdev_super(dc, &cl);
1003                 closure_sync(&cl);
1004         } else {
1005                 u->last_reg = rtime;
1006                 bch_uuid_write(c);
1007         }
1008
1009         bcache_device_attach(&dc->disk, c, u - c->uuids);
1010         list_move(&dc->list, &c->cached_devs);
1011         calc_cached_dev_sectors(c);
1012
1013         smp_wmb();
1014         /*
1015          * dc->c must be set before dc->count != 0 - paired with the mb in
1016          * cached_dev_get()
1017          */
1018         atomic_set(&dc->count, 1);
1019
1020         /* Block writeback thread, but spawn it */
1021         down_write(&dc->writeback_lock);
1022         if (bch_cached_dev_writeback_start(dc)) {
1023                 up_write(&dc->writeback_lock);
1024                 return -ENOMEM;
1025         }
1026
1027         if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1028                 bch_sectors_dirty_init(dc);
1029                 atomic_set(&dc->has_dirty, 1);
1030                 atomic_inc(&dc->count);
1031                 bch_writeback_queue(dc);
1032         }
1033
1034         bch_cached_dev_run(dc);
1035         bcache_device_link(&dc->disk, c, "bdev");
1036
1037         /* Allow the writeback thread to proceed */
1038         up_write(&dc->writeback_lock);
1039
1040         pr_info("Caching %s as %s on set %pU",
1041                 bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
1042                 dc->disk.c->sb.set_uuid);
1043         return 0;
1044 }
1045
1046 void bch_cached_dev_release(struct kobject *kobj)
1047 {
1048         struct cached_dev *dc = container_of(kobj, struct cached_dev,
1049                                              disk.kobj);
1050         kfree(dc);
1051         module_put(THIS_MODULE);
1052 }
1053
1054 static void cached_dev_free(struct closure *cl)
1055 {
1056         struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1057
1058         cancel_delayed_work_sync(&dc->writeback_rate_update);
1059         if (!IS_ERR_OR_NULL(dc->writeback_thread))
1060                 kthread_stop(dc->writeback_thread);
1061
1062         mutex_lock(&bch_register_lock);
1063
1064         if (atomic_read(&dc->running))
1065                 bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
1066         bcache_device_free(&dc->disk);
1067         list_del(&dc->list);
1068
1069         mutex_unlock(&bch_register_lock);
1070
1071         if (!IS_ERR_OR_NULL(dc->bdev))
1072                 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1073
1074         wake_up(&unregister_wait);
1075
1076         kobject_put(&dc->disk.kobj);
1077 }
1078
1079 static void cached_dev_flush(struct closure *cl)
1080 {
1081         struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1082         struct bcache_device *d = &dc->disk;
1083
1084         mutex_lock(&bch_register_lock);
1085         bcache_device_unlink(d);
1086         mutex_unlock(&bch_register_lock);
1087
1088         bch_cache_accounting_destroy(&dc->accounting);
1089         kobject_del(&d->kobj);
1090
1091         continue_at(cl, cached_dev_free, system_wq);
1092 }
1093
1094 static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
1095 {
1096         int ret;
1097         struct io *io;
1098         struct request_queue *q = bdev_get_queue(dc->bdev);
1099
1100         __module_get(THIS_MODULE);
1101         INIT_LIST_HEAD(&dc->list);
1102         closure_init(&dc->disk.cl, NULL);
1103         set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
1104         kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
1105         INIT_WORK(&dc->detach, cached_dev_detach_finish);
1106         sema_init(&dc->sb_write_mutex, 1);
1107         INIT_LIST_HEAD(&dc->io_lru);
1108         spin_lock_init(&dc->io_lock);
1109         bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
1110
1111         dc->sequential_cutoff           = 4 << 20;
1112
1113         for (io = dc->io; io < dc->io + RECENT_IO; io++) {
1114                 list_add(&io->lru, &dc->io_lru);
1115                 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
1116         }
1117
1118         dc->disk.stripe_size = q->limits.io_opt >> 9;
1119
1120         if (dc->disk.stripe_size)
1121                 dc->partial_stripes_expensive =
1122                         q->limits.raid_partial_stripes_expensive;
1123
1124         ret = bcache_device_init(&dc->disk, block_size,
1125                          dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
1126         if (ret)
1127                 return ret;
1128
1129         set_capacity(dc->disk.disk,
1130                      dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
1131
1132         dc->disk.disk->queue->backing_dev_info.ra_pages =
1133                 max(dc->disk.disk->queue->backing_dev_info.ra_pages,
1134                     q->backing_dev_info.ra_pages);
1135
1136         bch_cached_dev_request_init(dc);
1137         bch_cached_dev_writeback_init(dc);
1138         return 0;
1139 }
1140
1141 /* Cached device - bcache superblock */
1142
1143 static void register_bdev(struct cache_sb *sb, struct page *sb_page,
1144                                  struct block_device *bdev,
1145                                  struct cached_dev *dc)
1146 {
1147         char name[BDEVNAME_SIZE];
1148         const char *err = "cannot allocate memory";
1149         struct cache_set *c;
1150
1151         memcpy(&dc->sb, sb, sizeof(struct cache_sb));
1152         dc->bdev = bdev;
1153         dc->bdev->bd_holder = dc;
1154
1155         bio_init(&dc->sb_bio);
1156         dc->sb_bio.bi_max_vecs  = 1;
1157         dc->sb_bio.bi_io_vec    = dc->sb_bio.bi_inline_vecs;
1158         dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
1159         get_page(sb_page);
1160
1161         if (cached_dev_init(dc, sb->block_size << 9))
1162                 goto err;
1163
1164         err = "error creating kobject";
1165         if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
1166                         "bcache"))
1167                 goto err;
1168         if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
1169                 goto err;
1170
1171         pr_info("registered backing device %s", bdevname(bdev, name));
1172
1173         list_add(&dc->list, &uncached_devices);
1174         list_for_each_entry(c, &bch_cache_sets, list)
1175                 bch_cached_dev_attach(dc, c);
1176
1177         if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
1178             BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
1179                 bch_cached_dev_run(dc);
1180
1181         return;
1182 err:
1183         pr_notice("error opening %s: %s", bdevname(bdev, name), err);
1184         bcache_device_stop(&dc->disk);
1185 }
1186
1187 /* Flash only volumes */
1188
1189 void bch_flash_dev_release(struct kobject *kobj)
1190 {
1191         struct bcache_device *d = container_of(kobj, struct bcache_device,
1192                                                kobj);
1193         kfree(d);
1194 }
1195
1196 static void flash_dev_free(struct closure *cl)
1197 {
1198         struct bcache_device *d = container_of(cl, struct bcache_device, cl);
1199         mutex_lock(&bch_register_lock);
1200         bcache_device_free(d);
1201         mutex_unlock(&bch_register_lock);
1202         kobject_put(&d->kobj);
1203 }
1204
1205 static void flash_dev_flush(struct closure *cl)
1206 {
1207         struct bcache_device *d = container_of(cl, struct bcache_device, cl);
1208
1209         mutex_lock(&bch_register_lock);
1210         bcache_device_unlink(d);
1211         mutex_unlock(&bch_register_lock);
1212         kobject_del(&d->kobj);
1213         continue_at(cl, flash_dev_free, system_wq);
1214 }
1215
1216 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
1217 {
1218         struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
1219                                           GFP_KERNEL);
1220         if (!d)
1221                 return -ENOMEM;
1222
1223         closure_init(&d->cl, NULL);
1224         set_closure_fn(&d->cl, flash_dev_flush, system_wq);
1225
1226         kobject_init(&d->kobj, &bch_flash_dev_ktype);
1227
1228         if (bcache_device_init(d, block_bytes(c), u->sectors))
1229                 goto err;
1230
1231         bcache_device_attach(d, c, u - c->uuids);
1232         bch_flash_dev_request_init(d);
1233         add_disk(d->disk);
1234
1235         if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
1236                 goto err;
1237
1238         bcache_device_link(d, c, "volume");
1239
1240         return 0;
1241 err:
1242         kobject_put(&d->kobj);
1243         return -ENOMEM;
1244 }
1245
1246 static int flash_devs_run(struct cache_set *c)
1247 {
1248         int ret = 0;
1249         struct uuid_entry *u;
1250
1251         for (u = c->uuids;
1252              u < c->uuids + c->nr_uuids && !ret;
1253              u++)
1254                 if (UUID_FLASH_ONLY(u))
1255                         ret = flash_dev_run(c, u);
1256
1257         return ret;
1258 }
1259
1260 int bch_flash_dev_create(struct cache_set *c, uint64_t size)
1261 {
1262         struct uuid_entry *u;
1263
1264         if (test_bit(CACHE_SET_STOPPING, &c->flags))
1265                 return -EINTR;
1266
1267         if (!test_bit(CACHE_SET_RUNNING, &c->flags))
1268                 return -EPERM;
1269
1270         u = uuid_find_empty(c);
1271         if (!u) {
1272                 pr_err("Can't create volume, no room for UUID");
1273                 return -EINVAL;
1274         }
1275
1276         get_random_bytes(u->uuid, 16);
1277         memset(u->label, 0, 32);
1278         u->first_reg = u->last_reg = cpu_to_le32(get_seconds());
1279
1280         SET_UUID_FLASH_ONLY(u, 1);
1281         u->sectors = size >> 9;
1282
1283         bch_uuid_write(c);
1284
1285         return flash_dev_run(c, u);
1286 }
1287
1288 /* Cache set */
1289
1290 __printf(2, 3)
1291 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
1292 {
1293         va_list args;
1294
1295         if (c->on_error != ON_ERROR_PANIC &&
1296             test_bit(CACHE_SET_STOPPING, &c->flags))
1297                 return false;
1298
1299         /* XXX: we can be called from atomic context
1300         acquire_console_sem();
1301         */
1302
1303         printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid);
1304
1305         va_start(args, fmt);
1306         vprintk(fmt, args);
1307         va_end(args);
1308
1309         printk(", disabling caching\n");
1310
1311         if (c->on_error == ON_ERROR_PANIC)
1312                 panic("panic forced after error\n");
1313
1314         bch_cache_set_unregister(c);
1315         return true;
1316 }
1317
1318 void bch_cache_set_release(struct kobject *kobj)
1319 {
1320         struct cache_set *c = container_of(kobj, struct cache_set, kobj);
1321         kfree(c);
1322         module_put(THIS_MODULE);
1323 }
1324
1325 static void cache_set_free(struct closure *cl)
1326 {
1327         struct cache_set *c = container_of(cl, struct cache_set, cl);
1328         struct cache *ca;
1329         unsigned i;
1330
1331         if (!IS_ERR_OR_NULL(c->debug))
1332                 debugfs_remove(c->debug);
1333
1334         bch_open_buckets_free(c);
1335         bch_btree_cache_free(c);
1336         bch_journal_free(c);
1337
1338         for_each_cache(ca, c, i)
1339                 if (ca) {
1340                         ca->set = NULL;
1341                         c->cache[ca->sb.nr_this_dev] = NULL;
1342                         kobject_put(&ca->kobj);
1343                 }
1344
1345         bch_bset_sort_state_free(&c->sort);
1346         free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
1347
1348         if (c->moving_gc_wq)
1349                 destroy_workqueue(c->moving_gc_wq);
1350         if (c->bio_split)
1351                 bioset_free(c->bio_split);
1352         if (c->fill_iter)
1353                 mempool_destroy(c->fill_iter);
1354         if (c->bio_meta)
1355                 mempool_destroy(c->bio_meta);
1356         if (c->search)
1357                 mempool_destroy(c->search);
1358         kfree(c->devices);
1359
1360         mutex_lock(&bch_register_lock);
1361         list_del(&c->list);
1362         mutex_unlock(&bch_register_lock);
1363
1364         pr_info("Cache set %pU unregistered", c->sb.set_uuid);
1365         wake_up(&unregister_wait);
1366
1367         closure_debug_destroy(&c->cl);
1368         kobject_put(&c->kobj);
1369 }
1370
1371 static void cache_set_flush(struct closure *cl)
1372 {
1373         struct cache_set *c = container_of(cl, struct cache_set, caching);
1374         struct cache *ca;
1375         struct btree *b;
1376         unsigned i;
1377
1378         if (!c)
1379                 closure_return(cl);
1380
1381         bch_cache_accounting_destroy(&c->accounting);
1382
1383         kobject_put(&c->internal);
1384         kobject_del(&c->kobj);
1385
1386         if (c->gc_thread)
1387                 kthread_stop(c->gc_thread);
1388
1389         if (!IS_ERR_OR_NULL(c->root))
1390                 list_add(&c->root->list, &c->btree_cache);
1391
1392         /* Should skip this if we're unregistering because of an error */
1393         list_for_each_entry(b, &c->btree_cache, list) {
1394                 mutex_lock(&b->write_lock);
1395                 if (btree_node_dirty(b))
1396                         __bch_btree_node_write(b, NULL);
1397                 mutex_unlock(&b->write_lock);
1398         }
1399
1400         for_each_cache(ca, c, i)
1401                 if (ca->alloc_thread)
1402                         kthread_stop(ca->alloc_thread);
1403
1404         if (c->journal.cur) {
1405                 cancel_delayed_work_sync(&c->journal.work);
1406                 /* flush last journal entry if needed */
1407                 c->journal.work.work.func(&c->journal.work.work);
1408         }
1409
1410         closure_return(cl);
1411 }
1412
1413 static void __cache_set_unregister(struct closure *cl)
1414 {
1415         struct cache_set *c = container_of(cl, struct cache_set, caching);
1416         struct cached_dev *dc;
1417         size_t i;
1418
1419         mutex_lock(&bch_register_lock);
1420
1421         for (i = 0; i < c->nr_uuids; i++)
1422                 if (c->devices[i]) {
1423                         if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
1424                             test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
1425                                 dc = container_of(c->devices[i],
1426                                                   struct cached_dev, disk);
1427                                 bch_cached_dev_detach(dc);
1428                         } else {
1429                                 bcache_device_stop(c->devices[i]);
1430                         }
1431                 }
1432
1433         mutex_unlock(&bch_register_lock);
1434
1435         continue_at(cl, cache_set_flush, system_wq);
1436 }
1437
1438 void bch_cache_set_stop(struct cache_set *c)
1439 {
1440         if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
1441                 closure_queue(&c->caching);
1442 }
1443
1444 void bch_cache_set_unregister(struct cache_set *c)
1445 {
1446         set_bit(CACHE_SET_UNREGISTERING, &c->flags);
1447         bch_cache_set_stop(c);
1448 }
1449
1450 #define alloc_bucket_pages(gfp, c)                      \
1451         ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
1452
1453 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1454 {
1455         int iter_size;
1456         struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
1457         if (!c)
1458                 return NULL;
1459
1460         __module_get(THIS_MODULE);
1461         closure_init(&c->cl, NULL);
1462         set_closure_fn(&c->cl, cache_set_free, system_wq);
1463
1464         closure_init(&c->caching, &c->cl);
1465         set_closure_fn(&c->caching, __cache_set_unregister, system_wq);
1466
1467         /* Maybe create continue_at_noreturn() and use it here? */
1468         closure_set_stopped(&c->cl);
1469         closure_put(&c->cl);
1470
1471         kobject_init(&c->kobj, &bch_cache_set_ktype);
1472         kobject_init(&c->internal, &bch_cache_set_internal_ktype);
1473
1474         bch_cache_accounting_init(&c->accounting, &c->cl);
1475
1476         memcpy(c->sb.set_uuid, sb->set_uuid, 16);
1477         c->sb.block_size        = sb->block_size;
1478         c->sb.bucket_size       = sb->bucket_size;
1479         c->sb.nr_in_set         = sb->nr_in_set;
1480         c->sb.last_mount        = sb->last_mount;
1481         c->bucket_bits          = ilog2(sb->bucket_size);
1482         c->block_bits           = ilog2(sb->block_size);
1483         c->nr_uuids             = bucket_bytes(c) / sizeof(struct uuid_entry);
1484
1485         c->btree_pages          = bucket_pages(c);
1486         if (c->btree_pages > BTREE_MAX_PAGES)
1487                 c->btree_pages = max_t(int, c->btree_pages / 4,
1488                                        BTREE_MAX_PAGES);
1489
1490         sema_init(&c->sb_write_mutex, 1);
1491         mutex_init(&c->bucket_lock);
1492         init_waitqueue_head(&c->btree_cache_wait);
1493         init_waitqueue_head(&c->bucket_wait);
1494         sema_init(&c->uuid_write_mutex, 1);
1495
1496         spin_lock_init(&c->btree_gc_time.lock);
1497         spin_lock_init(&c->btree_split_time.lock);
1498         spin_lock_init(&c->btree_read_time.lock);
1499
1500         bch_moving_init_cache_set(c);
1501
1502         INIT_LIST_HEAD(&c->list);
1503         INIT_LIST_HEAD(&c->cached_devs);
1504         INIT_LIST_HEAD(&c->btree_cache);
1505         INIT_LIST_HEAD(&c->btree_cache_freeable);
1506         INIT_LIST_HEAD(&c->btree_cache_freed);
1507         INIT_LIST_HEAD(&c->data_buckets);
1508
1509         c->search = mempool_create_slab_pool(32, bch_search_cache);
1510         if (!c->search)
1511                 goto err;
1512
1513         iter_size = (sb->bucket_size / sb->block_size + 1) *
1514                 sizeof(struct btree_iter_set);
1515
1516         if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) ||
1517             !(c->bio_meta = mempool_create_kmalloc_pool(2,
1518                                 sizeof(struct bbio) + sizeof(struct bio_vec) *
1519                                 bucket_pages(c))) ||
1520             !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
1521             !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
1522             !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
1523             !(c->moving_gc_wq = create_workqueue("bcache_gc")) ||
1524             bch_journal_alloc(c) ||
1525             bch_btree_cache_alloc(c) ||
1526             bch_open_buckets_alloc(c) ||
1527             bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
1528                 goto err;
1529
1530         c->congested_read_threshold_us  = 2000;
1531         c->congested_write_threshold_us = 20000;
1532         c->error_limit  = 8 << IO_ERROR_SHIFT;
1533
1534         return c;
1535 err:
1536         bch_cache_set_unregister(c);
1537         return NULL;
1538 }
1539
1540 static void run_cache_set(struct cache_set *c)
1541 {
1542         const char *err = "cannot allocate memory";
1543         struct cached_dev *dc, *t;
1544         struct cache *ca;
1545         struct closure cl;
1546         unsigned i;
1547
1548         closure_init_stack(&cl);
1549
1550         for_each_cache(ca, c, i)
1551                 c->nbuckets += ca->sb.nbuckets;
1552
1553         if (CACHE_SYNC(&c->sb)) {
1554                 LIST_HEAD(journal);
1555                 struct bkey *k;
1556                 struct jset *j;
1557
1558                 err = "cannot allocate memory for journal";
1559                 if (bch_journal_read(c, &journal))
1560                         goto err;
1561
1562                 pr_debug("btree_journal_read() done");
1563
1564                 err = "no journal entries found";
1565                 if (list_empty(&journal))
1566                         goto err;
1567
1568                 j = &list_entry(journal.prev, struct journal_replay, list)->j;
1569
1570                 err = "IO error reading priorities";
1571                 for_each_cache(ca, c, i)
1572                         prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]);
1573
1574                 /*
1575                  * If prio_read() fails it'll call cache_set_error and we'll
1576                  * tear everything down right away, but if we perhaps checked
1577                  * sooner we could avoid journal replay.
1578                  */
1579
1580                 k = &j->btree_root;
1581
1582                 err = "bad btree root";
1583                 if (__bch_btree_ptr_invalid(c, k))
1584                         goto err;
1585
1586                 err = "error reading btree root";
1587                 c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL);
1588                 if (IS_ERR_OR_NULL(c->root))
1589                         goto err;
1590
1591                 list_del_init(&c->root->list);
1592                 rw_unlock(true, c->root);
1593
1594                 err = uuid_read(c, j, &cl);
1595                 if (err)
1596                         goto err;
1597
1598                 err = "error in recovery";
1599                 if (bch_btree_check(c))
1600                         goto err;
1601
1602                 bch_journal_mark(c, &journal);
1603                 bch_initial_gc_finish(c);
1604                 pr_debug("btree_check() done");
1605
1606                 /*
1607                  * bcache_journal_next() can't happen sooner, or
1608                  * btree_gc_finish() will give spurious errors about last_gc >
1609                  * gc_gen - this is a hack but oh well.
1610                  */
1611                 bch_journal_next(&c->journal);
1612
1613                 err = "error starting allocator thread";
1614                 for_each_cache(ca, c, i)
1615                         if (bch_cache_allocator_start(ca))
1616                                 goto err;
1617
1618                 /*
1619                  * First place it's safe to allocate: btree_check() and
1620                  * btree_gc_finish() have to run before we have buckets to
1621                  * allocate, and bch_bucket_alloc_set() might cause a journal
1622                  * entry to be written so bcache_journal_next() has to be called
1623                  * first.
1624                  *
1625                  * If the uuids were in the old format we have to rewrite them
1626                  * before the next journal entry is written:
1627                  */
1628                 if (j->version < BCACHE_JSET_VERSION_UUID)
1629                         __uuid_write(c);
1630
1631                 bch_journal_replay(c, &journal);
1632         } else {
1633                 pr_notice("invalidating existing data");
1634
1635                 for_each_cache(ca, c, i) {
1636                         unsigned j;
1637
1638                         ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
1639                                               2, SB_JOURNAL_BUCKETS);
1640
1641                         for (j = 0; j < ca->sb.keys; j++)
1642                                 ca->sb.d[j] = ca->sb.first_bucket + j;
1643                 }
1644
1645                 bch_initial_gc_finish(c);
1646
1647                 err = "error starting allocator thread";
1648                 for_each_cache(ca, c, i)
1649                         if (bch_cache_allocator_start(ca))
1650                                 goto err;
1651
1652                 mutex_lock(&c->bucket_lock);
1653                 for_each_cache(ca, c, i)
1654                         bch_prio_write(ca);
1655                 mutex_unlock(&c->bucket_lock);
1656
1657                 err = "cannot allocate new UUID bucket";
1658                 if (__uuid_write(c))
1659                         goto err;
1660
1661                 err = "cannot allocate new btree root";
1662                 c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL);
1663                 if (IS_ERR_OR_NULL(c->root))
1664                         goto err;
1665
1666                 mutex_lock(&c->root->write_lock);
1667                 bkey_copy_key(&c->root->key, &MAX_KEY);
1668                 bch_btree_node_write(c->root, &cl);
1669                 mutex_unlock(&c->root->write_lock);
1670
1671                 bch_btree_set_root(c->root);
1672                 rw_unlock(true, c->root);
1673
1674                 /*
1675                  * We don't want to write the first journal entry until
1676                  * everything is set up - fortunately journal entries won't be
1677                  * written until the SET_CACHE_SYNC() here:
1678                  */
1679                 SET_CACHE_SYNC(&c->sb, true);
1680
1681                 bch_journal_next(&c->journal);
1682                 bch_journal_meta(c, &cl);
1683         }
1684
1685         err = "error starting gc thread";
1686         if (bch_gc_thread_start(c))
1687                 goto err;
1688
1689         closure_sync(&cl);
1690         c->sb.last_mount = get_seconds();
1691         bcache_write_super(c);
1692
1693         list_for_each_entry_safe(dc, t, &uncached_devices, list)
1694                 bch_cached_dev_attach(dc, c);
1695
1696         flash_devs_run(c);
1697
1698         set_bit(CACHE_SET_RUNNING, &c->flags);
1699         return;
1700 err:
1701         closure_sync(&cl);
1702         /* XXX: test this, it's broken */
1703         bch_cache_set_error(c, "%s", err);
1704 }
1705
1706 static bool can_attach_cache(struct cache *ca, struct cache_set *c)
1707 {
1708         return ca->sb.block_size        == c->sb.block_size &&
1709                 ca->sb.bucket_size      == c->sb.bucket_size &&
1710                 ca->sb.nr_in_set        == c->sb.nr_in_set;
1711 }
1712
1713 static const char *register_cache_set(struct cache *ca)
1714 {
1715         char buf[12];
1716         const char *err = "cannot allocate memory";
1717         struct cache_set *c;
1718
1719         list_for_each_entry(c, &bch_cache_sets, list)
1720                 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
1721                         if (c->cache[ca->sb.nr_this_dev])
1722                                 return "duplicate cache set member";
1723
1724                         if (!can_attach_cache(ca, c))
1725                                 return "cache sb does not match set";
1726
1727                         if (!CACHE_SYNC(&ca->sb))
1728                                 SET_CACHE_SYNC(&c->sb, false);
1729
1730                         goto found;
1731                 }
1732
1733         c = bch_cache_set_alloc(&ca->sb);
1734         if (!c)
1735                 return err;
1736
1737         err = "error creating kobject";
1738         if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) ||
1739             kobject_add(&c->internal, &c->kobj, "internal"))
1740                 goto err;
1741
1742         if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj))
1743                 goto err;
1744
1745         bch_debug_init_cache_set(c);
1746
1747         list_add(&c->list, &bch_cache_sets);
1748 found:
1749         sprintf(buf, "cache%i", ca->sb.nr_this_dev);
1750         if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
1751             sysfs_create_link(&c->kobj, &ca->kobj, buf))
1752                 goto err;
1753
1754         if (ca->sb.seq > c->sb.seq) {
1755                 c->sb.version           = ca->sb.version;
1756                 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
1757                 c->sb.flags             = ca->sb.flags;
1758                 c->sb.seq               = ca->sb.seq;
1759                 pr_debug("set version = %llu", c->sb.version);
1760         }
1761
1762         kobject_get(&ca->kobj);
1763         ca->set = c;
1764         ca->set->cache[ca->sb.nr_this_dev] = ca;
1765         c->cache_by_alloc[c->caches_loaded++] = ca;
1766
1767         if (c->caches_loaded == c->sb.nr_in_set)
1768                 run_cache_set(c);
1769
1770         return NULL;
1771 err:
1772         bch_cache_set_unregister(c);
1773         return err;
1774 }
1775
1776 /* Cache device */
1777
1778 void bch_cache_release(struct kobject *kobj)
1779 {
1780         struct cache *ca = container_of(kobj, struct cache, kobj);
1781         unsigned i;
1782
1783         if (ca->set) {
1784                 BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
1785                 ca->set->cache[ca->sb.nr_this_dev] = NULL;
1786         }
1787
1788         free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
1789         kfree(ca->prio_buckets);
1790         vfree(ca->buckets);
1791
1792         free_heap(&ca->heap);
1793         free_fifo(&ca->free_inc);
1794
1795         for (i = 0; i < RESERVE_NR; i++)
1796                 free_fifo(&ca->free[i]);
1797
1798         if (ca->sb_bio.bi_inline_vecs[0].bv_page)
1799                 put_page(ca->sb_bio.bi_io_vec[0].bv_page);
1800
1801         if (!IS_ERR_OR_NULL(ca->bdev))
1802                 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1803
1804         kfree(ca);
1805         module_put(THIS_MODULE);
1806 }
1807
1808 static int cache_alloc(struct cache_sb *sb, struct cache *ca)
1809 {
1810         size_t free;
1811         struct bucket *b;
1812
1813         __module_get(THIS_MODULE);
1814         kobject_init(&ca->kobj, &bch_cache_ktype);
1815
1816         bio_init(&ca->journal.bio);
1817         ca->journal.bio.bi_max_vecs = 8;
1818         ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
1819
1820         free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
1821
1822         if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
1823             !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
1824             !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
1825             !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
1826             !init_fifo(&ca->free_inc,   free << 2, GFP_KERNEL) ||
1827             !init_heap(&ca->heap,       free << 3, GFP_KERNEL) ||
1828             !(ca->buckets       = vzalloc(sizeof(struct bucket) *
1829                                           ca->sb.nbuckets)) ||
1830             !(ca->prio_buckets  = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
1831                                           2, GFP_KERNEL)) ||
1832             !(ca->disk_buckets  = alloc_bucket_pages(GFP_KERNEL, ca)))
1833                 return -ENOMEM;
1834
1835         ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
1836
1837         for_each_bucket(b, ca)
1838                 atomic_set(&b->pin, 0);
1839
1840         return 0;
1841 }
1842
1843 static int register_cache(struct cache_sb *sb, struct page *sb_page,
1844                                 struct block_device *bdev, struct cache *ca)
1845 {
1846         char name[BDEVNAME_SIZE];
1847         const char *err = NULL;
1848         int ret = 0;
1849
1850         memcpy(&ca->sb, sb, sizeof(struct cache_sb));
1851         ca->bdev = bdev;
1852         ca->bdev->bd_holder = ca;
1853
1854         bio_init(&ca->sb_bio);
1855         ca->sb_bio.bi_max_vecs  = 1;
1856         ca->sb_bio.bi_io_vec    = ca->sb_bio.bi_inline_vecs;
1857         ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
1858         get_page(sb_page);
1859
1860         if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1861                 ca->discard = CACHE_DISCARD(&ca->sb);
1862
1863         ret = cache_alloc(sb, ca);
1864         if (ret != 0)
1865                 goto err;
1866
1867         if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
1868                 err = "error calling kobject_add";
1869                 ret = -ENOMEM;
1870                 goto out;
1871         }
1872
1873         mutex_lock(&bch_register_lock);
1874         err = register_cache_set(ca);
1875         mutex_unlock(&bch_register_lock);
1876
1877         if (err) {
1878                 ret = -ENODEV;
1879                 goto out;
1880         }
1881
1882         pr_info("registered cache device %s", bdevname(bdev, name));
1883
1884 out:
1885         kobject_put(&ca->kobj);
1886
1887 err:
1888         if (err)
1889                 pr_notice("error opening %s: %s", bdevname(bdev, name), err);
1890
1891         return ret;
1892 }
1893
1894 /* Global interfaces/init */
1895
1896 static ssize_t register_bcache(struct kobject *, struct kobj_attribute *,
1897                                const char *, size_t);
1898
1899 kobj_attribute_write(register,          register_bcache);
1900 kobj_attribute_write(register_quiet,    register_bcache);
1901
1902 static bool bch_is_open_backing(struct block_device *bdev) {
1903         struct cache_set *c, *tc;
1904         struct cached_dev *dc, *t;
1905
1906         list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
1907                 list_for_each_entry_safe(dc, t, &c->cached_devs, list)
1908                         if (dc->bdev == bdev)
1909                                 return true;
1910         list_for_each_entry_safe(dc, t, &uncached_devices, list)
1911                 if (dc->bdev == bdev)
1912                         return true;
1913         return false;
1914 }
1915
1916 static bool bch_is_open_cache(struct block_device *bdev) {
1917         struct cache_set *c, *tc;
1918         struct cache *ca;
1919         unsigned i;
1920
1921         list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
1922                 for_each_cache(ca, c, i)
1923                         if (ca->bdev == bdev)
1924                                 return true;
1925         return false;
1926 }
1927
1928 static bool bch_is_open(struct block_device *bdev) {
1929         return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
1930 }
1931
1932 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
1933                                const char *buffer, size_t size)
1934 {
1935         ssize_t ret = size;
1936         const char *err = "cannot allocate memory";
1937         char *path = NULL;
1938         struct cache_sb *sb = NULL;
1939         struct block_device *bdev = NULL;
1940         struct page *sb_page = NULL;
1941
1942         if (!try_module_get(THIS_MODULE))
1943                 return -EBUSY;
1944
1945         if (!(path = kstrndup(buffer, size, GFP_KERNEL)) ||
1946             !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL)))
1947                 goto err;
1948
1949         err = "failed to open device";
1950         bdev = blkdev_get_by_path(strim(path),
1951                                   FMODE_READ|FMODE_WRITE|FMODE_EXCL,
1952                                   sb);
1953         if (IS_ERR(bdev)) {
1954                 if (bdev == ERR_PTR(-EBUSY)) {
1955                         bdev = lookup_bdev(strim(path));
1956                         mutex_lock(&bch_register_lock);
1957                         if (!IS_ERR(bdev) && bch_is_open(bdev))
1958                                 err = "device already registered";
1959                         else
1960                                 err = "device busy";
1961                         mutex_unlock(&bch_register_lock);
1962                         if (attr == &ksysfs_register_quiet)
1963                                 goto out;
1964                 }
1965                 goto err;
1966         }
1967
1968         err = "failed to set blocksize";
1969         if (set_blocksize(bdev, 4096))
1970                 goto err_close;
1971
1972         err = read_super(sb, bdev, &sb_page);
1973         if (err)
1974                 goto err_close;
1975
1976         if (SB_IS_BDEV(sb)) {
1977                 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1978                 if (!dc)
1979                         goto err_close;
1980
1981                 mutex_lock(&bch_register_lock);
1982                 register_bdev(sb, sb_page, bdev, dc);
1983                 mutex_unlock(&bch_register_lock);
1984         } else {
1985                 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
1986                 if (!ca)
1987                         goto err_close;
1988
1989                 if (register_cache(sb, sb_page, bdev, ca) != 0)
1990                         goto err_close;
1991         }
1992 out:
1993         if (sb_page)
1994                 put_page(sb_page);
1995         kfree(sb);
1996         kfree(path);
1997         module_put(THIS_MODULE);
1998         return ret;
1999
2000 err_close:
2001         blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2002 err:
2003         pr_info("error opening %s: %s", path, err);
2004         ret = -EINVAL;
2005         goto out;
2006 }
2007
2008 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
2009 {
2010         if (code == SYS_DOWN ||
2011             code == SYS_HALT ||
2012             code == SYS_POWER_OFF) {
2013                 DEFINE_WAIT(wait);
2014                 unsigned long start = jiffies;
2015                 bool stopped = false;
2016
2017                 struct cache_set *c, *tc;
2018                 struct cached_dev *dc, *tdc;
2019
2020                 mutex_lock(&bch_register_lock);
2021
2022                 if (list_empty(&bch_cache_sets) &&
2023                     list_empty(&uncached_devices))
2024                         goto out;
2025
2026                 pr_info("Stopping all devices:");
2027
2028                 list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
2029                         bch_cache_set_stop(c);
2030
2031                 list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
2032                         bcache_device_stop(&dc->disk);
2033
2034                 /* What's a condition variable? */
2035                 while (1) {
2036                         long timeout = start + 2 * HZ - jiffies;
2037
2038                         stopped = list_empty(&bch_cache_sets) &&
2039                                 list_empty(&uncached_devices);
2040
2041                         if (timeout < 0 || stopped)
2042                                 break;
2043
2044                         prepare_to_wait(&unregister_wait, &wait,
2045                                         TASK_UNINTERRUPTIBLE);
2046
2047                         mutex_unlock(&bch_register_lock);
2048                         schedule_timeout(timeout);
2049                         mutex_lock(&bch_register_lock);
2050                 }
2051
2052                 finish_wait(&unregister_wait, &wait);
2053
2054                 if (stopped)
2055                         pr_info("All devices stopped");
2056                 else
2057                         pr_notice("Timeout waiting for devices to be closed");
2058 out:
2059                 mutex_unlock(&bch_register_lock);
2060         }
2061
2062         return NOTIFY_DONE;
2063 }
2064
2065 static struct notifier_block reboot = {
2066         .notifier_call  = bcache_reboot,
2067         .priority       = INT_MAX, /* before any real devices */
2068 };
2069
2070 static void bcache_exit(void)
2071 {
2072         bch_debug_exit();
2073         bch_request_exit();
2074         if (bcache_kobj)
2075                 kobject_put(bcache_kobj);
2076         if (bcache_wq)
2077                 destroy_workqueue(bcache_wq);
2078         if (bcache_major)
2079                 unregister_blkdev(bcache_major, "bcache");
2080         unregister_reboot_notifier(&reboot);
2081 }
2082
2083 static int __init bcache_init(void)
2084 {
2085         static const struct attribute *files[] = {
2086                 &ksysfs_register.attr,
2087                 &ksysfs_register_quiet.attr,
2088                 NULL
2089         };
2090
2091         mutex_init(&bch_register_lock);
2092         init_waitqueue_head(&unregister_wait);
2093         register_reboot_notifier(&reboot);
2094         closure_debug_init();
2095
2096         bcache_major = register_blkdev(0, "bcache");
2097         if (bcache_major < 0) {
2098                 unregister_reboot_notifier(&reboot);
2099                 return bcache_major;
2100         }
2101
2102         if (!(bcache_wq = create_workqueue("bcache")) ||
2103             !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
2104             sysfs_create_files(bcache_kobj, files) ||
2105             bch_request_init() ||
2106             bch_debug_init(bcache_kobj))
2107                 goto err;
2108
2109         return 0;
2110 err:
2111         bcache_exit();
2112         return -ENOMEM;
2113 }
2114
2115 module_exit(bcache_exit);
2116 module_init(bcache_init);