2 * Copyright (C) 2012 Fusion-io All rights reserved.
3 * Copyright (C) 2012 Intel Corp. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/wait.h>
21 #include <linux/bio.h>
22 #include <linux/slab.h>
23 #include <linux/buffer_head.h>
24 #include <linux/blkdev.h>
25 #include <linux/random.h>
26 #include <linux/iocontext.h>
27 #include <linux/capability.h>
28 #include <linux/ratelimit.h>
29 #include <linux/kthread.h>
30 #include <linux/raid/pq.h>
31 #include <linux/hash.h>
32 #include <linux/list_sort.h>
33 #include <linux/raid/xor.h>
34 #include <linux/vmalloc.h>
35 #include <asm/div64.h>
37 #include "extent_map.h"
39 #include "transaction.h"
40 #include "print-tree.h"
43 #include "async-thread.h"
44 #include "check-integrity.h"
45 #include "rcu-string.h"
47 /* set when additional merges to this rbio are not allowed */
48 #define RBIO_RMW_LOCKED_BIT 1
51 * set when this rbio is sitting in the hash, but it is just a cache
54 #define RBIO_CACHE_BIT 2
57 * set when it is safe to trust the stripe_pages for caching
59 #define RBIO_CACHE_READY_BIT 3
62 * bbio and raid_map is managed by the caller, so we shouldn't free
63 * them here. And besides that, all rbios with this flag should not
64 * be cached, because we need raid_map to check the rbios' stripe
65 * is the same or not, but it is very likely that the caller has
66 * free raid_map, so don't cache those rbios.
68 #define RBIO_HOLD_BBIO_MAP_BIT 4
70 #define RBIO_CACHE_SIZE 1024
74 BTRFS_RBIO_READ_REBUILD = 1,
75 BTRFS_RBIO_PARITY_SCRUB = 2,
78 struct btrfs_raid_bio {
79 struct btrfs_fs_info *fs_info;
80 struct btrfs_bio *bbio;
83 * logical block numbers for the start of each stripe
84 * The last one or two are p/q. These are sorted,
85 * so raid_map[0] is the start of our full stripe
89 /* while we're doing rmw on a stripe
90 * we put it into a hash table so we can
91 * lock the stripe and merge more rbios
94 struct list_head hash_list;
97 * LRU list for the stripe cache
99 struct list_head stripe_cache;
102 * for scheduling work in the helper threads
104 struct btrfs_work work;
107 * bio list and bio_list_lock are used
108 * to add more bios into the stripe
109 * in hopes of avoiding the full rmw
111 struct bio_list bio_list;
112 spinlock_t bio_list_lock;
114 /* also protected by the bio_list_lock, the
115 * plug list is used by the plugging code
116 * to collect partial bios while plugged. The
117 * stripe locking code also uses it to hand off
118 * the stripe lock to the next pending IO
120 struct list_head plug_list;
123 * flags that tell us if it is safe to
124 * merge with this bio
128 /* size of each individual stripe on disk */
131 /* number of data stripes (no p/q) */
138 * set if we're doing a parity rebuild
139 * for a read from higher up, which is handled
140 * differently from a parity rebuild as part of
143 enum btrfs_rbio_ops operation;
145 /* first bad stripe */
148 /* second bad stripe (for raid6 use) */
153 * number of pages needed to represent the full
159 * size of all the bios in the bio_list. This
160 * helps us decide if the rbio maps to a full
167 atomic_t stripes_pending;
171 * these are two arrays of pointers. We allocate the
172 * rbio big enough to hold them both and setup their
173 * locations when the rbio is allocated
176 /* pointers to pages that we allocated for
177 * reading/writing stripes directly from the disk (including P/Q)
179 struct page **stripe_pages;
182 * pointers to the pages in the bio_list. Stored
183 * here for faster lookup
185 struct page **bio_pages;
188 * bitmap to record which horizontal stripe has data
190 unsigned long *dbitmap;
193 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
194 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
195 static void rmw_work(struct btrfs_work *work);
196 static void read_rebuild_work(struct btrfs_work *work);
197 static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
198 static void async_read_rebuild(struct btrfs_raid_bio *rbio);
199 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
200 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
201 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
202 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
203 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
205 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
207 static void async_scrub_parity(struct btrfs_raid_bio *rbio);
210 * the stripe hash table is used for locking, and to collect
211 * bios in hopes of making a full stripe
213 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
215 struct btrfs_stripe_hash_table *table;
216 struct btrfs_stripe_hash_table *x;
217 struct btrfs_stripe_hash *cur;
218 struct btrfs_stripe_hash *h;
219 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
223 if (info->stripe_hash_table)
227 * The table is large, starting with order 4 and can go as high as
228 * order 7 in case lock debugging is turned on.
230 * Try harder to allocate and fallback to vmalloc to lower the chance
231 * of a failing mount.
233 table_size = sizeof(*table) + sizeof(*h) * num_entries;
234 table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
236 table = vzalloc(table_size);
241 spin_lock_init(&table->cache_lock);
242 INIT_LIST_HEAD(&table->stripe_cache);
246 for (i = 0; i < num_entries; i++) {
248 INIT_LIST_HEAD(&cur->hash_list);
249 spin_lock_init(&cur->lock);
250 init_waitqueue_head(&cur->wait);
253 x = cmpxchg(&info->stripe_hash_table, NULL, table);
255 if (is_vmalloc_addr(x))
264 * caching an rbio means to copy anything from the
265 * bio_pages array into the stripe_pages array. We
266 * use the page uptodate bit in the stripe cache array
267 * to indicate if it has valid data
269 * once the caching is done, we set the cache ready
272 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
279 ret = alloc_rbio_pages(rbio);
283 for (i = 0; i < rbio->nr_pages; i++) {
284 if (!rbio->bio_pages[i])
287 s = kmap(rbio->bio_pages[i]);
288 d = kmap(rbio->stripe_pages[i]);
290 memcpy(d, s, PAGE_CACHE_SIZE);
292 kunmap(rbio->bio_pages[i]);
293 kunmap(rbio->stripe_pages[i]);
294 SetPageUptodate(rbio->stripe_pages[i]);
296 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
300 * we hash on the first logical address of the stripe
302 static int rbio_bucket(struct btrfs_raid_bio *rbio)
304 u64 num = rbio->raid_map[0];
307 * we shift down quite a bit. We're using byte
308 * addressing, and most of the lower bits are zeros.
309 * This tends to upset hash_64, and it consistently
310 * returns just one or two different values.
312 * shifting off the lower bits fixes things.
314 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
318 * stealing an rbio means taking all the uptodate pages from the stripe
319 * array in the source rbio and putting them into the destination rbio
321 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
327 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
330 for (i = 0; i < dest->nr_pages; i++) {
331 s = src->stripe_pages[i];
332 if (!s || !PageUptodate(s)) {
336 d = dest->stripe_pages[i];
340 dest->stripe_pages[i] = s;
341 src->stripe_pages[i] = NULL;
346 * merging means we take the bio_list from the victim and
347 * splice it into the destination. The victim should
348 * be discarded afterwards.
350 * must be called with dest->rbio_list_lock held
352 static void merge_rbio(struct btrfs_raid_bio *dest,
353 struct btrfs_raid_bio *victim)
355 bio_list_merge(&dest->bio_list, &victim->bio_list);
356 dest->bio_list_bytes += victim->bio_list_bytes;
357 bio_list_init(&victim->bio_list);
361 * used to prune items that are in the cache. The caller
362 * must hold the hash table lock.
364 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
366 int bucket = rbio_bucket(rbio);
367 struct btrfs_stripe_hash_table *table;
368 struct btrfs_stripe_hash *h;
372 * check the bit again under the hash table lock.
374 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
377 table = rbio->fs_info->stripe_hash_table;
378 h = table->table + bucket;
380 /* hold the lock for the bucket because we may be
381 * removing it from the hash table
386 * hold the lock for the bio list because we need
387 * to make sure the bio list is empty
389 spin_lock(&rbio->bio_list_lock);
391 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
392 list_del_init(&rbio->stripe_cache);
393 table->cache_size -= 1;
396 /* if the bio list isn't empty, this rbio is
397 * still involved in an IO. We take it out
398 * of the cache list, and drop the ref that
399 * was held for the list.
401 * If the bio_list was empty, we also remove
402 * the rbio from the hash_table, and drop
403 * the corresponding ref
405 if (bio_list_empty(&rbio->bio_list)) {
406 if (!list_empty(&rbio->hash_list)) {
407 list_del_init(&rbio->hash_list);
408 atomic_dec(&rbio->refs);
409 BUG_ON(!list_empty(&rbio->plug_list));
414 spin_unlock(&rbio->bio_list_lock);
415 spin_unlock(&h->lock);
418 __free_raid_bio(rbio);
422 * prune a given rbio from the cache
424 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
426 struct btrfs_stripe_hash_table *table;
429 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
432 table = rbio->fs_info->stripe_hash_table;
434 spin_lock_irqsave(&table->cache_lock, flags);
435 __remove_rbio_from_cache(rbio);
436 spin_unlock_irqrestore(&table->cache_lock, flags);
440 * remove everything in the cache
442 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
444 struct btrfs_stripe_hash_table *table;
446 struct btrfs_raid_bio *rbio;
448 table = info->stripe_hash_table;
450 spin_lock_irqsave(&table->cache_lock, flags);
451 while (!list_empty(&table->stripe_cache)) {
452 rbio = list_entry(table->stripe_cache.next,
453 struct btrfs_raid_bio,
455 __remove_rbio_from_cache(rbio);
457 spin_unlock_irqrestore(&table->cache_lock, flags);
461 * remove all cached entries and free the hash table
464 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
466 if (!info->stripe_hash_table)
468 btrfs_clear_rbio_cache(info);
469 if (is_vmalloc_addr(info->stripe_hash_table))
470 vfree(info->stripe_hash_table);
472 kfree(info->stripe_hash_table);
473 info->stripe_hash_table = NULL;
477 * insert an rbio into the stripe cache. It
478 * must have already been prepared by calling
481 * If this rbio was already cached, it gets
482 * moved to the front of the lru.
484 * If the size of the rbio cache is too big, we
487 static void cache_rbio(struct btrfs_raid_bio *rbio)
489 struct btrfs_stripe_hash_table *table;
492 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
495 table = rbio->fs_info->stripe_hash_table;
497 spin_lock_irqsave(&table->cache_lock, flags);
498 spin_lock(&rbio->bio_list_lock);
500 /* bump our ref if we were not in the list before */
501 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
502 atomic_inc(&rbio->refs);
504 if (!list_empty(&rbio->stripe_cache)){
505 list_move(&rbio->stripe_cache, &table->stripe_cache);
507 list_add(&rbio->stripe_cache, &table->stripe_cache);
508 table->cache_size += 1;
511 spin_unlock(&rbio->bio_list_lock);
513 if (table->cache_size > RBIO_CACHE_SIZE) {
514 struct btrfs_raid_bio *found;
516 found = list_entry(table->stripe_cache.prev,
517 struct btrfs_raid_bio,
521 __remove_rbio_from_cache(found);
524 spin_unlock_irqrestore(&table->cache_lock, flags);
529 * helper function to run the xor_blocks api. It is only
530 * able to do MAX_XOR_BLOCKS at a time, so we need to
533 static void run_xor(void **pages, int src_cnt, ssize_t len)
537 void *dest = pages[src_cnt];
540 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
541 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
543 src_cnt -= xor_src_cnt;
544 src_off += xor_src_cnt;
549 * returns true if the bio list inside this rbio
550 * covers an entire stripe (no rmw required).
551 * Must be called with the bio list lock held, or
552 * at a time when you know it is impossible to add
553 * new bios into the list
555 static int __rbio_is_full(struct btrfs_raid_bio *rbio)
557 unsigned long size = rbio->bio_list_bytes;
560 if (size != rbio->nr_data * rbio->stripe_len)
563 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
567 static int rbio_is_full(struct btrfs_raid_bio *rbio)
572 spin_lock_irqsave(&rbio->bio_list_lock, flags);
573 ret = __rbio_is_full(rbio);
574 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
579 * returns 1 if it is safe to merge two rbios together.
580 * The merging is safe if the two rbios correspond to
581 * the same stripe and if they are both going in the same
582 * direction (read vs write), and if neither one is
583 * locked for final IO
585 * The caller is responsible for locking such that
586 * rmw_locked is safe to test
588 static int rbio_can_merge(struct btrfs_raid_bio *last,
589 struct btrfs_raid_bio *cur)
591 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
592 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
596 * we can't merge with cached rbios, since the
597 * idea is that when we merge the destination
598 * rbio is going to run our IO for us. We can
599 * steal from cached rbio's though, other functions
602 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
603 test_bit(RBIO_CACHE_BIT, &cur->flags))
606 if (last->raid_map[0] !=
610 /* we can't merge with different operations */
611 if (last->operation != cur->operation)
614 * We've need read the full stripe from the drive.
615 * check and repair the parity and write the new results.
617 * We're not allowed to add any new bios to the
618 * bio list here, anyone else that wants to
619 * change this stripe needs to do their own rmw.
621 if (last->operation == BTRFS_RBIO_PARITY_SCRUB ||
622 cur->operation == BTRFS_RBIO_PARITY_SCRUB)
629 * helper to index into the pstripe
631 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
633 index += (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT;
634 return rbio->stripe_pages[index];
638 * helper to index into the qstripe, returns null
639 * if there is no qstripe
641 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
643 if (rbio->nr_data + 1 == rbio->real_stripes)
646 index += ((rbio->nr_data + 1) * rbio->stripe_len) >>
648 return rbio->stripe_pages[index];
652 * The first stripe in the table for a logical address
653 * has the lock. rbios are added in one of three ways:
655 * 1) Nobody has the stripe locked yet. The rbio is given
656 * the lock and 0 is returned. The caller must start the IO
659 * 2) Someone has the stripe locked, but we're able to merge
660 * with the lock owner. The rbio is freed and the IO will
661 * start automatically along with the existing rbio. 1 is returned.
663 * 3) Someone has the stripe locked, but we're not able to merge.
664 * The rbio is added to the lock owner's plug list, or merged into
665 * an rbio already on the plug list. When the lock owner unlocks,
666 * the next rbio on the list is run and the IO is started automatically.
669 * If we return 0, the caller still owns the rbio and must continue with
670 * IO submission. If we return 1, the caller must assume the rbio has
671 * already been freed.
673 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
675 int bucket = rbio_bucket(rbio);
676 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
677 struct btrfs_raid_bio *cur;
678 struct btrfs_raid_bio *pending;
681 struct btrfs_raid_bio *freeit = NULL;
682 struct btrfs_raid_bio *cache_drop = NULL;
686 spin_lock_irqsave(&h->lock, flags);
687 list_for_each_entry(cur, &h->hash_list, hash_list) {
689 if (cur->raid_map[0] == rbio->raid_map[0]) {
690 spin_lock(&cur->bio_list_lock);
692 /* can we steal this cached rbio's pages? */
693 if (bio_list_empty(&cur->bio_list) &&
694 list_empty(&cur->plug_list) &&
695 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
696 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
697 list_del_init(&cur->hash_list);
698 atomic_dec(&cur->refs);
700 steal_rbio(cur, rbio);
702 spin_unlock(&cur->bio_list_lock);
707 /* can we merge into the lock owner? */
708 if (rbio_can_merge(cur, rbio)) {
709 merge_rbio(cur, rbio);
710 spin_unlock(&cur->bio_list_lock);
718 * we couldn't merge with the running
719 * rbio, see if we can merge with the
720 * pending ones. We don't have to
721 * check for rmw_locked because there
722 * is no way they are inside finish_rmw
725 list_for_each_entry(pending, &cur->plug_list,
727 if (rbio_can_merge(pending, rbio)) {
728 merge_rbio(pending, rbio);
729 spin_unlock(&cur->bio_list_lock);
736 /* no merging, put us on the tail of the plug list,
737 * our rbio will be started with the currently
738 * running rbio unlocks
740 list_add_tail(&rbio->plug_list, &cur->plug_list);
741 spin_unlock(&cur->bio_list_lock);
747 atomic_inc(&rbio->refs);
748 list_add(&rbio->hash_list, &h->hash_list);
750 spin_unlock_irqrestore(&h->lock, flags);
752 remove_rbio_from_cache(cache_drop);
754 __free_raid_bio(freeit);
759 * called as rmw or parity rebuild is completed. If the plug list has more
760 * rbios waiting for this stripe, the next one on the list will be started
762 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
765 struct btrfs_stripe_hash *h;
769 bucket = rbio_bucket(rbio);
770 h = rbio->fs_info->stripe_hash_table->table + bucket;
772 if (list_empty(&rbio->plug_list))
775 spin_lock_irqsave(&h->lock, flags);
776 spin_lock(&rbio->bio_list_lock);
778 if (!list_empty(&rbio->hash_list)) {
780 * if we're still cached and there is no other IO
781 * to perform, just leave this rbio here for others
782 * to steal from later
784 if (list_empty(&rbio->plug_list) &&
785 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
787 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
788 BUG_ON(!bio_list_empty(&rbio->bio_list));
792 list_del_init(&rbio->hash_list);
793 atomic_dec(&rbio->refs);
796 * we use the plug list to hold all the rbios
797 * waiting for the chance to lock this stripe.
798 * hand the lock over to one of them.
800 if (!list_empty(&rbio->plug_list)) {
801 struct btrfs_raid_bio *next;
802 struct list_head *head = rbio->plug_list.next;
804 next = list_entry(head, struct btrfs_raid_bio,
807 list_del_init(&rbio->plug_list);
809 list_add(&next->hash_list, &h->hash_list);
810 atomic_inc(&next->refs);
811 spin_unlock(&rbio->bio_list_lock);
812 spin_unlock_irqrestore(&h->lock, flags);
814 if (next->operation == BTRFS_RBIO_READ_REBUILD)
815 async_read_rebuild(next);
816 else if (next->operation == BTRFS_RBIO_WRITE) {
817 steal_rbio(rbio, next);
818 async_rmw_stripe(next);
819 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
820 steal_rbio(rbio, next);
821 async_scrub_parity(next);
825 } else if (waitqueue_active(&h->wait)) {
826 spin_unlock(&rbio->bio_list_lock);
827 spin_unlock_irqrestore(&h->lock, flags);
833 spin_unlock(&rbio->bio_list_lock);
834 spin_unlock_irqrestore(&h->lock, flags);
838 remove_rbio_from_cache(rbio);
842 __free_bbio_and_raid_map(struct btrfs_bio *bbio, u64 *raid_map, int need)
850 static inline void free_bbio_and_raid_map(struct btrfs_raid_bio *rbio)
852 __free_bbio_and_raid_map(rbio->bbio, rbio->raid_map,
853 !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags));
856 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
860 WARN_ON(atomic_read(&rbio->refs) < 0);
861 if (!atomic_dec_and_test(&rbio->refs))
864 WARN_ON(!list_empty(&rbio->stripe_cache));
865 WARN_ON(!list_empty(&rbio->hash_list));
866 WARN_ON(!bio_list_empty(&rbio->bio_list));
868 for (i = 0; i < rbio->nr_pages; i++) {
869 if (rbio->stripe_pages[i]) {
870 __free_page(rbio->stripe_pages[i]);
871 rbio->stripe_pages[i] = NULL;
875 free_bbio_and_raid_map(rbio);
880 static void free_raid_bio(struct btrfs_raid_bio *rbio)
883 __free_raid_bio(rbio);
887 * this frees the rbio and runs through all the bios in the
888 * bio_list and calls end_io on them
890 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate)
892 struct bio *cur = bio_list_get(&rbio->bio_list);
900 set_bit(BIO_UPTODATE, &cur->bi_flags);
907 * end io function used by finish_rmw. When we finally
908 * get here, we've written a full stripe
910 static void raid_write_end_io(struct bio *bio, int err)
912 struct btrfs_raid_bio *rbio = bio->bi_private;
915 fail_bio_stripe(rbio, bio);
919 if (!atomic_dec_and_test(&rbio->stripes_pending))
924 /* OK, we have read all the stripes we need to. */
925 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
928 rbio_orig_end_io(rbio, err, 0);
933 * the read/modify/write code wants to use the original bio for
934 * any pages it included, and then use the rbio for everything
935 * else. This function decides if a given index (stripe number)
936 * and page number in that stripe fall inside the original bio
939 * if you set bio_list_only, you'll get a NULL back for any ranges
940 * that are outside the bio_list
942 * This doesn't take any refs on anything, you get a bare page pointer
943 * and the caller must bump refs as required.
945 * You must call index_rbio_pages once before you can trust
946 * the answers from this function.
948 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
949 int index, int pagenr, int bio_list_only)
952 struct page *p = NULL;
954 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
956 spin_lock_irq(&rbio->bio_list_lock);
957 p = rbio->bio_pages[chunk_page];
958 spin_unlock_irq(&rbio->bio_list_lock);
960 if (p || bio_list_only)
963 return rbio->stripe_pages[chunk_page];
967 * number of pages we need for the entire stripe across all the
970 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
972 unsigned long nr = stripe_len * nr_stripes;
973 return DIV_ROUND_UP(nr, PAGE_CACHE_SIZE);
977 * allocation and initial setup for the btrfs_raid_bio. Not
978 * this does not allocate any pages for rbio->pages.
980 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
981 struct btrfs_bio *bbio, u64 *raid_map,
984 struct btrfs_raid_bio *rbio;
986 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
987 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
988 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
991 rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
992 DIV_ROUND_UP(stripe_npages, BITS_PER_LONG / 8),
995 return ERR_PTR(-ENOMEM);
997 bio_list_init(&rbio->bio_list);
998 INIT_LIST_HEAD(&rbio->plug_list);
999 spin_lock_init(&rbio->bio_list_lock);
1000 INIT_LIST_HEAD(&rbio->stripe_cache);
1001 INIT_LIST_HEAD(&rbio->hash_list);
1003 rbio->raid_map = raid_map;
1004 rbio->fs_info = root->fs_info;
1005 rbio->stripe_len = stripe_len;
1006 rbio->nr_pages = num_pages;
1007 rbio->real_stripes = real_stripes;
1008 rbio->stripe_npages = stripe_npages;
1011 atomic_set(&rbio->refs, 1);
1012 atomic_set(&rbio->error, 0);
1013 atomic_set(&rbio->stripes_pending, 0);
1016 * the stripe_pages and bio_pages array point to the extra
1017 * memory we allocated past the end of the rbio
1020 rbio->stripe_pages = p;
1021 rbio->bio_pages = p + sizeof(struct page *) * num_pages;
1022 rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
1024 if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE)
1025 nr_data = real_stripes - 2;
1027 nr_data = real_stripes - 1;
1029 rbio->nr_data = nr_data;
1033 /* allocate pages for all the stripes in the bio, including parity */
1034 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1039 for (i = 0; i < rbio->nr_pages; i++) {
1040 if (rbio->stripe_pages[i])
1042 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1045 rbio->stripe_pages[i] = page;
1046 ClearPageUptodate(page);
1051 /* allocate pages for just the p/q stripes */
1052 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1057 i = (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT;
1059 for (; i < rbio->nr_pages; i++) {
1060 if (rbio->stripe_pages[i])
1062 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1065 rbio->stripe_pages[i] = page;
1071 * add a single page from a specific stripe into our list of bios for IO
1072 * this will try to merge into existing bios if possible, and returns
1073 * zero if all went well.
1075 static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1076 struct bio_list *bio_list,
1079 unsigned long page_index,
1080 unsigned long bio_max_len)
1082 struct bio *last = bio_list->tail;
1086 struct btrfs_bio_stripe *stripe;
1089 stripe = &rbio->bbio->stripes[stripe_nr];
1090 disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT);
1092 /* if the device is missing, just fail this stripe */
1093 if (!stripe->dev->bdev)
1094 return fail_rbio_index(rbio, stripe_nr);
1096 /* see if we can add this page onto our existing bio */
1098 last_end = (u64)last->bi_iter.bi_sector << 9;
1099 last_end += last->bi_iter.bi_size;
1102 * we can't merge these if they are from different
1103 * devices or if they are not contiguous
1105 if (last_end == disk_start && stripe->dev->bdev &&
1106 test_bit(BIO_UPTODATE, &last->bi_flags) &&
1107 last->bi_bdev == stripe->dev->bdev) {
1108 ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0);
1109 if (ret == PAGE_CACHE_SIZE)
1114 /* put a new bio on the list */
1115 bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
1119 bio->bi_iter.bi_size = 0;
1120 bio->bi_bdev = stripe->dev->bdev;
1121 bio->bi_iter.bi_sector = disk_start >> 9;
1122 set_bit(BIO_UPTODATE, &bio->bi_flags);
1124 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
1125 bio_list_add(bio_list, bio);
1130 * while we're doing the read/modify/write cycle, we could
1131 * have errors in reading pages off the disk. This checks
1132 * for errors and if we're not able to read the page it'll
1133 * trigger parity reconstruction. The rmw will be finished
1134 * after we've reconstructed the failed stripes
1136 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1138 if (rbio->faila >= 0 || rbio->failb >= 0) {
1139 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1140 __raid56_parity_recover(rbio);
1147 * these are just the pages from the rbio array, not from anything
1148 * the FS sent down to us
1150 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, int page)
1153 index = stripe * (rbio->stripe_len >> PAGE_CACHE_SHIFT);
1155 return rbio->stripe_pages[index];
1159 * helper function to walk our bio list and populate the bio_pages array with
1160 * the result. This seems expensive, but it is faster than constantly
1161 * searching through the bio list as we setup the IO in finish_rmw or stripe
1164 * This must be called before you trust the answers from page_in_rbio
1166 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1170 unsigned long stripe_offset;
1171 unsigned long page_index;
1175 spin_lock_irq(&rbio->bio_list_lock);
1176 bio_list_for_each(bio, &rbio->bio_list) {
1177 start = (u64)bio->bi_iter.bi_sector << 9;
1178 stripe_offset = start - rbio->raid_map[0];
1179 page_index = stripe_offset >> PAGE_CACHE_SHIFT;
1181 for (i = 0; i < bio->bi_vcnt; i++) {
1182 p = bio->bi_io_vec[i].bv_page;
1183 rbio->bio_pages[page_index + i] = p;
1186 spin_unlock_irq(&rbio->bio_list_lock);
1190 * this is called from one of two situations. We either
1191 * have a full stripe from the higher layers, or we've read all
1192 * the missing bits off disk.
1194 * This will calculate the parity and then send down any
1197 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1199 struct btrfs_bio *bbio = rbio->bbio;
1200 void *pointers[rbio->real_stripes];
1201 int stripe_len = rbio->stripe_len;
1202 int nr_data = rbio->nr_data;
1207 struct bio_list bio_list;
1209 int pages_per_stripe = stripe_len >> PAGE_CACHE_SHIFT;
1212 bio_list_init(&bio_list);
1214 if (rbio->real_stripes - rbio->nr_data == 1) {
1215 p_stripe = rbio->real_stripes - 1;
1216 } else if (rbio->real_stripes - rbio->nr_data == 2) {
1217 p_stripe = rbio->real_stripes - 2;
1218 q_stripe = rbio->real_stripes - 1;
1223 /* at this point we either have a full stripe,
1224 * or we've read the full stripe from the drive.
1225 * recalculate the parity and write the new results.
1227 * We're not allowed to add any new bios to the
1228 * bio list here, anyone else that wants to
1229 * change this stripe needs to do their own rmw.
1231 spin_lock_irq(&rbio->bio_list_lock);
1232 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1233 spin_unlock_irq(&rbio->bio_list_lock);
1235 atomic_set(&rbio->error, 0);
1238 * now that we've set rmw_locked, run through the
1239 * bio list one last time and map the page pointers
1241 * We don't cache full rbios because we're assuming
1242 * the higher layers are unlikely to use this area of
1243 * the disk again soon. If they do use it again,
1244 * hopefully they will send another full bio.
1246 index_rbio_pages(rbio);
1247 if (!rbio_is_full(rbio))
1248 cache_rbio_pages(rbio);
1250 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1252 for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
1254 /* first collect one page from each data stripe */
1255 for (stripe = 0; stripe < nr_data; stripe++) {
1256 p = page_in_rbio(rbio, stripe, pagenr, 0);
1257 pointers[stripe] = kmap(p);
1260 /* then add the parity stripe */
1261 p = rbio_pstripe_page(rbio, pagenr);
1263 pointers[stripe++] = kmap(p);
1265 if (q_stripe != -1) {
1268 * raid6, add the qstripe and call the
1269 * library function to fill in our p/q
1271 p = rbio_qstripe_page(rbio, pagenr);
1273 pointers[stripe++] = kmap(p);
1275 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1279 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
1280 run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
1284 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1285 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1289 * time to start writing. Make bios for everything from the
1290 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1293 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1294 for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
1296 if (stripe < rbio->nr_data) {
1297 page = page_in_rbio(rbio, stripe, pagenr, 1);
1301 page = rbio_stripe_page(rbio, stripe, pagenr);
1304 ret = rbio_add_io_page(rbio, &bio_list,
1305 page, stripe, pagenr, rbio->stripe_len);
1311 if (likely(!bbio->num_tgtdevs))
1314 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1315 if (!bbio->tgtdev_map[stripe])
1318 for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
1320 if (stripe < rbio->nr_data) {
1321 page = page_in_rbio(rbio, stripe, pagenr, 1);
1325 page = rbio_stripe_page(rbio, stripe, pagenr);
1328 ret = rbio_add_io_page(rbio, &bio_list, page,
1329 rbio->bbio->tgtdev_map[stripe],
1330 pagenr, rbio->stripe_len);
1337 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1338 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1341 bio = bio_list_pop(&bio_list);
1345 bio->bi_private = rbio;
1346 bio->bi_end_io = raid_write_end_io;
1347 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
1348 submit_bio(WRITE, bio);
1353 rbio_orig_end_io(rbio, -EIO, 0);
1357 * helper to find the stripe number for a given bio. Used to figure out which
1358 * stripe has failed. This expects the bio to correspond to a physical disk,
1359 * so it looks up based on physical sector numbers.
1361 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1364 u64 physical = bio->bi_iter.bi_sector;
1367 struct btrfs_bio_stripe *stripe;
1371 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1372 stripe = &rbio->bbio->stripes[i];
1373 stripe_start = stripe->physical;
1374 if (physical >= stripe_start &&
1375 physical < stripe_start + rbio->stripe_len &&
1376 bio->bi_bdev == stripe->dev->bdev) {
1384 * helper to find the stripe number for a given
1385 * bio (before mapping). Used to figure out which stripe has
1386 * failed. This looks up based on logical block numbers.
1388 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1391 u64 logical = bio->bi_iter.bi_sector;
1397 for (i = 0; i < rbio->nr_data; i++) {
1398 stripe_start = rbio->raid_map[i];
1399 if (logical >= stripe_start &&
1400 logical < stripe_start + rbio->stripe_len) {
1408 * returns -EIO if we had too many failures
1410 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1412 unsigned long flags;
1415 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1417 /* we already know this stripe is bad, move on */
1418 if (rbio->faila == failed || rbio->failb == failed)
1421 if (rbio->faila == -1) {
1422 /* first failure on this rbio */
1423 rbio->faila = failed;
1424 atomic_inc(&rbio->error);
1425 } else if (rbio->failb == -1) {
1426 /* second failure on this rbio */
1427 rbio->failb = failed;
1428 atomic_inc(&rbio->error);
1433 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1439 * helper to fail a stripe based on a physical disk
1442 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1445 int failed = find_bio_stripe(rbio, bio);
1450 return fail_rbio_index(rbio, failed);
1454 * this sets each page in the bio uptodate. It should only be used on private
1455 * rbio pages, nothing that comes in from the higher layers
1457 static void set_bio_pages_uptodate(struct bio *bio)
1462 for (i = 0; i < bio->bi_vcnt; i++) {
1463 p = bio->bi_io_vec[i].bv_page;
1469 * end io for the read phase of the rmw cycle. All the bios here are physical
1470 * stripe bios we've read from the disk so we can recalculate the parity of the
1473 * This will usually kick off finish_rmw once all the bios are read in, but it
1474 * may trigger parity reconstruction if we had any errors along the way
1476 static void raid_rmw_end_io(struct bio *bio, int err)
1478 struct btrfs_raid_bio *rbio = bio->bi_private;
1481 fail_bio_stripe(rbio, bio);
1483 set_bio_pages_uptodate(bio);
1487 if (!atomic_dec_and_test(&rbio->stripes_pending))
1491 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1495 * this will normally call finish_rmw to start our write
1496 * but if there are any failed stripes we'll reconstruct
1499 validate_rbio_for_rmw(rbio);
1504 rbio_orig_end_io(rbio, -EIO, 0);
1507 static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
1509 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1510 rmw_work, NULL, NULL);
1512 btrfs_queue_work(rbio->fs_info->rmw_workers,
1516 static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1518 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1519 read_rebuild_work, NULL, NULL);
1521 btrfs_queue_work(rbio->fs_info->rmw_workers,
1526 * the stripe must be locked by the caller. It will
1527 * unlock after all the writes are done
1529 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1531 int bios_to_read = 0;
1532 struct bio_list bio_list;
1534 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
1539 bio_list_init(&bio_list);
1541 ret = alloc_rbio_pages(rbio);
1545 index_rbio_pages(rbio);
1547 atomic_set(&rbio->error, 0);
1549 * build a list of bios to read all the missing parts of this
1552 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1553 for (pagenr = 0; pagenr < nr_pages; pagenr++) {
1556 * we want to find all the pages missing from
1557 * the rbio and read them from the disk. If
1558 * page_in_rbio finds a page in the bio list
1559 * we don't need to read it off the stripe.
1561 page = page_in_rbio(rbio, stripe, pagenr, 1);
1565 page = rbio_stripe_page(rbio, stripe, pagenr);
1567 * the bio cache may have handed us an uptodate
1568 * page. If so, be happy and use it
1570 if (PageUptodate(page))
1573 ret = rbio_add_io_page(rbio, &bio_list, page,
1574 stripe, pagenr, rbio->stripe_len);
1580 bios_to_read = bio_list_size(&bio_list);
1581 if (!bios_to_read) {
1583 * this can happen if others have merged with
1584 * us, it means there is nothing left to read.
1585 * But if there are missing devices it may not be
1586 * safe to do the full stripe write yet.
1592 * the bbio may be freed once we submit the last bio. Make sure
1593 * not to touch it after that
1595 atomic_set(&rbio->stripes_pending, bios_to_read);
1597 bio = bio_list_pop(&bio_list);
1601 bio->bi_private = rbio;
1602 bio->bi_end_io = raid_rmw_end_io;
1604 btrfs_bio_wq_end_io(rbio->fs_info, bio,
1605 BTRFS_WQ_ENDIO_RAID56);
1607 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
1608 submit_bio(READ, bio);
1610 /* the actual write will happen once the reads are done */
1614 rbio_orig_end_io(rbio, -EIO, 0);
1618 validate_rbio_for_rmw(rbio);
1623 * if the upper layers pass in a full stripe, we thank them by only allocating
1624 * enough pages to hold the parity, and sending it all down quickly.
1626 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1630 ret = alloc_rbio_parity_pages(rbio);
1632 __free_raid_bio(rbio);
1636 ret = lock_stripe_add(rbio);
1643 * partial stripe writes get handed over to async helpers.
1644 * We're really hoping to merge a few more writes into this
1645 * rbio before calculating new parity
1647 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1651 ret = lock_stripe_add(rbio);
1653 async_rmw_stripe(rbio);
1658 * sometimes while we were reading from the drive to
1659 * recalculate parity, enough new bios come into create
1660 * a full stripe. So we do a check here to see if we can
1661 * go directly to finish_rmw
1663 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1665 /* head off into rmw land if we don't have a full stripe */
1666 if (!rbio_is_full(rbio))
1667 return partial_stripe_write(rbio);
1668 return full_stripe_write(rbio);
1672 * We use plugging call backs to collect full stripes.
1673 * Any time we get a partial stripe write while plugged
1674 * we collect it into a list. When the unplug comes down,
1675 * we sort the list by logical block number and merge
1676 * everything we can into the same rbios
1678 struct btrfs_plug_cb {
1679 struct blk_plug_cb cb;
1680 struct btrfs_fs_info *info;
1681 struct list_head rbio_list;
1682 struct btrfs_work work;
1686 * rbios on the plug list are sorted for easier merging.
1688 static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1690 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1692 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1694 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1695 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1697 if (a_sector < b_sector)
1699 if (a_sector > b_sector)
1704 static void run_plug(struct btrfs_plug_cb *plug)
1706 struct btrfs_raid_bio *cur;
1707 struct btrfs_raid_bio *last = NULL;
1710 * sort our plug list then try to merge
1711 * everything we can in hopes of creating full
1714 list_sort(NULL, &plug->rbio_list, plug_cmp);
1715 while (!list_empty(&plug->rbio_list)) {
1716 cur = list_entry(plug->rbio_list.next,
1717 struct btrfs_raid_bio, plug_list);
1718 list_del_init(&cur->plug_list);
1720 if (rbio_is_full(cur)) {
1721 /* we have a full stripe, send it down */
1722 full_stripe_write(cur);
1726 if (rbio_can_merge(last, cur)) {
1727 merge_rbio(last, cur);
1728 __free_raid_bio(cur);
1732 __raid56_parity_write(last);
1737 __raid56_parity_write(last);
1743 * if the unplug comes from schedule, we have to push the
1744 * work off to a helper thread
1746 static void unplug_work(struct btrfs_work *work)
1748 struct btrfs_plug_cb *plug;
1749 plug = container_of(work, struct btrfs_plug_cb, work);
1753 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1755 struct btrfs_plug_cb *plug;
1756 plug = container_of(cb, struct btrfs_plug_cb, cb);
1758 if (from_schedule) {
1759 btrfs_init_work(&plug->work, btrfs_rmw_helper,
1760 unplug_work, NULL, NULL);
1761 btrfs_queue_work(plug->info->rmw_workers,
1769 * our main entry point for writes from the rest of the FS.
1771 int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
1772 struct btrfs_bio *bbio, u64 *raid_map,
1775 struct btrfs_raid_bio *rbio;
1776 struct btrfs_plug_cb *plug = NULL;
1777 struct blk_plug_cb *cb;
1779 rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
1781 __free_bbio_and_raid_map(bbio, raid_map, 1);
1782 return PTR_ERR(rbio);
1784 bio_list_add(&rbio->bio_list, bio);
1785 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1786 rbio->operation = BTRFS_RBIO_WRITE;
1789 * don't plug on full rbios, just get them out the door
1790 * as quickly as we can
1792 if (rbio_is_full(rbio))
1793 return full_stripe_write(rbio);
1795 cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info,
1798 plug = container_of(cb, struct btrfs_plug_cb, cb);
1800 plug->info = root->fs_info;
1801 INIT_LIST_HEAD(&plug->rbio_list);
1803 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1805 return __raid56_parity_write(rbio);
1811 * all parity reconstruction happens here. We've read in everything
1812 * we can find from the drives and this does the heavy lifting of
1813 * sorting the good from the bad.
1815 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1819 int faila = -1, failb = -1;
1820 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
1825 pointers = kzalloc(rbio->real_stripes * sizeof(void *),
1832 faila = rbio->faila;
1833 failb = rbio->failb;
1835 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1836 spin_lock_irq(&rbio->bio_list_lock);
1837 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1838 spin_unlock_irq(&rbio->bio_list_lock);
1841 index_rbio_pages(rbio);
1843 for (pagenr = 0; pagenr < nr_pages; pagenr++) {
1845 * Now we just use bitmap to mark the horizontal stripes in
1846 * which we have data when doing parity scrub.
1848 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1849 !test_bit(pagenr, rbio->dbitmap))
1852 /* setup our array of pointers with pages
1855 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1857 * if we're rebuilding a read, we have to use
1858 * pages from the bio list
1860 if (rbio->operation == BTRFS_RBIO_READ_REBUILD &&
1861 (stripe == faila || stripe == failb)) {
1862 page = page_in_rbio(rbio, stripe, pagenr, 0);
1864 page = rbio_stripe_page(rbio, stripe, pagenr);
1866 pointers[stripe] = kmap(page);
1869 /* all raid6 handling here */
1870 if (rbio->raid_map[rbio->real_stripes - 1] ==
1874 * single failure, rebuild from parity raid5
1878 if (faila == rbio->nr_data) {
1880 * Just the P stripe has failed, without
1881 * a bad data or Q stripe.
1882 * TODO, we should redo the xor here.
1888 * a single failure in raid6 is rebuilt
1889 * in the pstripe code below
1894 /* make sure our ps and qs are in order */
1895 if (faila > failb) {
1901 /* if the q stripe is failed, do a pstripe reconstruction
1903 * If both the q stripe and the P stripe are failed, we're
1904 * here due to a crc mismatch and we can't give them the
1907 if (rbio->raid_map[failb] == RAID6_Q_STRIPE) {
1908 if (rbio->raid_map[faila] == RAID5_P_STRIPE) {
1913 * otherwise we have one bad data stripe and
1914 * a good P stripe. raid5!
1919 if (rbio->raid_map[failb] == RAID5_P_STRIPE) {
1920 raid6_datap_recov(rbio->real_stripes,
1921 PAGE_SIZE, faila, pointers);
1923 raid6_2data_recov(rbio->real_stripes,
1924 PAGE_SIZE, faila, failb,
1930 /* rebuild from P stripe here (raid5 or raid6) */
1931 BUG_ON(failb != -1);
1933 /* Copy parity block into failed block to start with */
1934 memcpy(pointers[faila],
1935 pointers[rbio->nr_data],
1938 /* rearrange the pointer array */
1939 p = pointers[faila];
1940 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1941 pointers[stripe] = pointers[stripe + 1];
1942 pointers[rbio->nr_data - 1] = p;
1944 /* xor in the rest */
1945 run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE);
1947 /* if we're doing this rebuild as part of an rmw, go through
1948 * and set all of our private rbio pages in the
1949 * failed stripes as uptodate. This way finish_rmw will
1950 * know they can be trusted. If this was a read reconstruction,
1951 * other endio functions will fiddle the uptodate bits
1953 if (rbio->operation == BTRFS_RBIO_WRITE) {
1954 for (i = 0; i < nr_pages; i++) {
1956 page = rbio_stripe_page(rbio, faila, i);
1957 SetPageUptodate(page);
1960 page = rbio_stripe_page(rbio, failb, i);
1961 SetPageUptodate(page);
1965 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1967 * if we're rebuilding a read, we have to use
1968 * pages from the bio list
1970 if (rbio->operation == BTRFS_RBIO_READ_REBUILD &&
1971 (stripe == faila || stripe == failb)) {
1972 page = page_in_rbio(rbio, stripe, pagenr, 0);
1974 page = rbio_stripe_page(rbio, stripe, pagenr);
1985 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1987 !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags))
1988 cache_rbio_pages(rbio);
1990 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1992 rbio_orig_end_io(rbio, err, err == 0);
1993 } else if (err == 0) {
1997 if (rbio->operation == BTRFS_RBIO_WRITE)
1999 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
2000 finish_parity_scrub(rbio, 0);
2004 rbio_orig_end_io(rbio, err, 0);
2009 * This is called only for stripes we've read from disk to
2010 * reconstruct the parity.
2012 static void raid_recover_end_io(struct bio *bio, int err)
2014 struct btrfs_raid_bio *rbio = bio->bi_private;
2017 * we only read stripe pages off the disk, set them
2018 * up to date if there were no errors
2021 fail_bio_stripe(rbio, bio);
2023 set_bio_pages_uptodate(bio);
2026 if (!atomic_dec_and_test(&rbio->stripes_pending))
2029 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2030 rbio_orig_end_io(rbio, -EIO, 0);
2032 __raid_recover_end_io(rbio);
2036 * reads everything we need off the disk to reconstruct
2037 * the parity. endio handlers trigger final reconstruction
2038 * when the IO is done.
2040 * This is used both for reads from the higher layers and for
2041 * parity construction required to finish a rmw cycle.
2043 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2045 int bios_to_read = 0;
2046 struct bio_list bio_list;
2048 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
2053 bio_list_init(&bio_list);
2055 ret = alloc_rbio_pages(rbio);
2059 atomic_set(&rbio->error, 0);
2062 * read everything that hasn't failed. Thanks to the
2063 * stripe cache, it is possible that some or all of these
2064 * pages are going to be uptodate.
2066 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2067 if (rbio->faila == stripe || rbio->failb == stripe) {
2068 atomic_inc(&rbio->error);
2072 for (pagenr = 0; pagenr < nr_pages; pagenr++) {
2076 * the rmw code may have already read this
2079 p = rbio_stripe_page(rbio, stripe, pagenr);
2080 if (PageUptodate(p))
2083 ret = rbio_add_io_page(rbio, &bio_list,
2084 rbio_stripe_page(rbio, stripe, pagenr),
2085 stripe, pagenr, rbio->stripe_len);
2091 bios_to_read = bio_list_size(&bio_list);
2092 if (!bios_to_read) {
2094 * we might have no bios to read just because the pages
2095 * were up to date, or we might have no bios to read because
2096 * the devices were gone.
2098 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2099 __raid_recover_end_io(rbio);
2107 * the bbio may be freed once we submit the last bio. Make sure
2108 * not to touch it after that
2110 atomic_set(&rbio->stripes_pending, bios_to_read);
2112 bio = bio_list_pop(&bio_list);
2116 bio->bi_private = rbio;
2117 bio->bi_end_io = raid_recover_end_io;
2119 btrfs_bio_wq_end_io(rbio->fs_info, bio,
2120 BTRFS_WQ_ENDIO_RAID56);
2122 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
2123 submit_bio(READ, bio);
2129 if (rbio->operation == BTRFS_RBIO_READ_REBUILD)
2130 rbio_orig_end_io(rbio, -EIO, 0);
2135 * the main entry point for reads from the higher layers. This
2136 * is really only called when the normal read path had a failure,
2137 * so we assume the bio they send down corresponds to a failed part
2140 int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2141 struct btrfs_bio *bbio, u64 *raid_map,
2142 u64 stripe_len, int mirror_num, int hold_bbio)
2144 struct btrfs_raid_bio *rbio;
2147 rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
2149 __free_bbio_and_raid_map(bbio, raid_map, !hold_bbio);
2150 return PTR_ERR(rbio);
2154 set_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags);
2155 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2156 bio_list_add(&rbio->bio_list, bio);
2157 rbio->bio_list_bytes = bio->bi_iter.bi_size;
2159 rbio->faila = find_logical_bio_stripe(rbio, bio);
2160 if (rbio->faila == -1) {
2162 __free_bbio_and_raid_map(bbio, raid_map, !hold_bbio);
2168 * reconstruct from the q stripe if they are
2169 * asking for mirror 3
2171 if (mirror_num == 3)
2172 rbio->failb = rbio->real_stripes - 2;
2174 ret = lock_stripe_add(rbio);
2177 * __raid56_parity_recover will end the bio with
2178 * any errors it hits. We don't want to return
2179 * its error value up the stack because our caller
2180 * will end up calling bio_endio with any nonzero
2184 __raid56_parity_recover(rbio);
2186 * our rbio has been added to the list of
2187 * rbios that will be handled after the
2188 * currently lock owner is done
2194 static void rmw_work(struct btrfs_work *work)
2196 struct btrfs_raid_bio *rbio;
2198 rbio = container_of(work, struct btrfs_raid_bio, work);
2199 raid56_rmw_stripe(rbio);
2202 static void read_rebuild_work(struct btrfs_work *work)
2204 struct btrfs_raid_bio *rbio;
2206 rbio = container_of(work, struct btrfs_raid_bio, work);
2207 __raid56_parity_recover(rbio);
2211 * The following code is used to scrub/replace the parity stripe
2213 * Note: We need make sure all the pages that add into the scrub/replace
2214 * raid bio are correct and not be changed during the scrub/replace. That
2215 * is those pages just hold metadata or file data with checksum.
2218 struct btrfs_raid_bio *
2219 raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
2220 struct btrfs_bio *bbio, u64 *raid_map,
2221 u64 stripe_len, struct btrfs_device *scrub_dev,
2222 unsigned long *dbitmap, int stripe_nsectors)
2224 struct btrfs_raid_bio *rbio;
2227 rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
2230 bio_list_add(&rbio->bio_list, bio);
2232 * This is a special bio which is used to hold the completion handler
2233 * and make the scrub rbio is similar to the other types
2235 ASSERT(!bio->bi_iter.bi_size);
2236 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2238 for (i = 0; i < rbio->real_stripes; i++) {
2239 if (bbio->stripes[i].dev == scrub_dev) {
2245 /* Now we just support the sectorsize equals to page size */
2246 ASSERT(root->sectorsize == PAGE_SIZE);
2247 ASSERT(rbio->stripe_npages == stripe_nsectors);
2248 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2253 void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
2254 struct page *page, u64 logical)
2259 ASSERT(logical >= rbio->raid_map[0]);
2260 ASSERT(logical + PAGE_SIZE <= rbio->raid_map[0] +
2261 rbio->stripe_len * rbio->nr_data);
2262 stripe_offset = (int)(logical - rbio->raid_map[0]);
2263 index = stripe_offset >> PAGE_CACHE_SHIFT;
2264 rbio->bio_pages[index] = page;
2268 * We just scrub the parity that we have correct data on the same horizontal,
2269 * so we needn't allocate all pages for all the stripes.
2271 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2278 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2279 for (i = 0; i < rbio->real_stripes; i++) {
2280 index = i * rbio->stripe_npages + bit;
2281 if (rbio->stripe_pages[index])
2284 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2287 rbio->stripe_pages[index] = page;
2288 ClearPageUptodate(page);
2295 * end io function used by finish_rmw. When we finally
2296 * get here, we've written a full stripe
2298 static void raid_write_parity_end_io(struct bio *bio, int err)
2300 struct btrfs_raid_bio *rbio = bio->bi_private;
2303 fail_bio_stripe(rbio, bio);
2307 if (!atomic_dec_and_test(&rbio->stripes_pending))
2312 if (atomic_read(&rbio->error))
2315 rbio_orig_end_io(rbio, err, 0);
2318 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2321 struct btrfs_bio *bbio = rbio->bbio;
2322 void *pointers[rbio->real_stripes];
2323 DECLARE_BITMAP(pbitmap, rbio->stripe_npages);
2324 int nr_data = rbio->nr_data;
2329 struct page *p_page = NULL;
2330 struct page *q_page = NULL;
2331 struct bio_list bio_list;
2336 bio_list_init(&bio_list);
2338 if (rbio->real_stripes - rbio->nr_data == 1) {
2339 p_stripe = rbio->real_stripes - 1;
2340 } else if (rbio->real_stripes - rbio->nr_data == 2) {
2341 p_stripe = rbio->real_stripes - 2;
2342 q_stripe = rbio->real_stripes - 1;
2347 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2349 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2353 * Because the higher layers(scrubber) are unlikely to
2354 * use this area of the disk again soon, so don't cache
2357 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2362 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2365 SetPageUptodate(p_page);
2367 if (q_stripe != -1) {
2368 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2370 __free_page(p_page);
2373 SetPageUptodate(q_page);
2376 atomic_set(&rbio->error, 0);
2378 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2381 /* first collect one page from each data stripe */
2382 for (stripe = 0; stripe < nr_data; stripe++) {
2383 p = page_in_rbio(rbio, stripe, pagenr, 0);
2384 pointers[stripe] = kmap(p);
2387 /* then add the parity stripe */
2388 pointers[stripe++] = kmap(p_page);
2390 if (q_stripe != -1) {
2393 * raid6, add the qstripe and call the
2394 * library function to fill in our p/q
2396 pointers[stripe++] = kmap(q_page);
2398 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2402 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
2403 run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
2406 /* Check scrubbing pairty and repair it */
2407 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2409 if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE))
2410 memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE);
2412 /* Parity is right, needn't writeback */
2413 bitmap_clear(rbio->dbitmap, pagenr, 1);
2416 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
2417 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2420 __free_page(p_page);
2422 __free_page(q_page);
2426 * time to start writing. Make bios for everything from the
2427 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2430 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2433 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2434 ret = rbio_add_io_page(rbio, &bio_list,
2435 page, rbio->scrubp, pagenr, rbio->stripe_len);
2443 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2446 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2447 ret = rbio_add_io_page(rbio, &bio_list, page,
2448 bbio->tgtdev_map[rbio->scrubp],
2449 pagenr, rbio->stripe_len);
2455 nr_data = bio_list_size(&bio_list);
2457 /* Every parity is right */
2458 rbio_orig_end_io(rbio, 0, 0);
2462 atomic_set(&rbio->stripes_pending, nr_data);
2465 bio = bio_list_pop(&bio_list);
2469 bio->bi_private = rbio;
2470 bio->bi_end_io = raid_write_parity_end_io;
2471 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
2472 submit_bio(WRITE, bio);
2477 rbio_orig_end_io(rbio, -EIO, 0);
2480 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2482 if (stripe >= 0 && stripe < rbio->nr_data)
2488 * While we're doing the parity check and repair, we could have errors
2489 * in reading pages off the disk. This checks for errors and if we're
2490 * not able to read the page it'll trigger parity reconstruction. The
2491 * parity scrub will be finished after we've reconstructed the failed
2494 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2496 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2499 if (rbio->faila >= 0 || rbio->failb >= 0) {
2500 int dfail = 0, failp = -1;
2502 if (is_data_stripe(rbio, rbio->faila))
2504 else if (is_parity_stripe(rbio->faila))
2505 failp = rbio->faila;
2507 if (is_data_stripe(rbio, rbio->failb))
2509 else if (is_parity_stripe(rbio->failb))
2510 failp = rbio->failb;
2513 * Because we can not use a scrubbing parity to repair
2514 * the data, so the capability of the repair is declined.
2515 * (In the case of RAID5, we can not repair anything)
2517 if (dfail > rbio->bbio->max_errors - 1)
2521 * If all data is good, only parity is correctly, just
2522 * repair the parity.
2525 finish_parity_scrub(rbio, 0);
2530 * Here means we got one corrupted data stripe and one
2531 * corrupted parity on RAID6, if the corrupted parity
2532 * is scrubbing parity, luckly, use the other one to repair
2533 * the data, or we can not repair the data stripe.
2535 if (failp != rbio->scrubp)
2538 __raid_recover_end_io(rbio);
2540 finish_parity_scrub(rbio, 1);
2545 rbio_orig_end_io(rbio, -EIO, 0);
2549 * end io for the read phase of the rmw cycle. All the bios here are physical
2550 * stripe bios we've read from the disk so we can recalculate the parity of the
2553 * This will usually kick off finish_rmw once all the bios are read in, but it
2554 * may trigger parity reconstruction if we had any errors along the way
2556 static void raid56_parity_scrub_end_io(struct bio *bio, int err)
2558 struct btrfs_raid_bio *rbio = bio->bi_private;
2561 fail_bio_stripe(rbio, bio);
2563 set_bio_pages_uptodate(bio);
2567 if (!atomic_dec_and_test(&rbio->stripes_pending))
2571 * this will normally call finish_rmw to start our write
2572 * but if there are any failed stripes we'll reconstruct
2575 validate_rbio_for_parity_scrub(rbio);
2578 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2580 int bios_to_read = 0;
2581 struct bio_list bio_list;
2587 ret = alloc_rbio_essential_pages(rbio);
2591 bio_list_init(&bio_list);
2593 atomic_set(&rbio->error, 0);
2595 * build a list of bios to read all the missing parts of this
2598 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2599 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2602 * we want to find all the pages missing from
2603 * the rbio and read them from the disk. If
2604 * page_in_rbio finds a page in the bio list
2605 * we don't need to read it off the stripe.
2607 page = page_in_rbio(rbio, stripe, pagenr, 1);
2611 page = rbio_stripe_page(rbio, stripe, pagenr);
2613 * the bio cache may have handed us an uptodate
2614 * page. If so, be happy and use it
2616 if (PageUptodate(page))
2619 ret = rbio_add_io_page(rbio, &bio_list, page,
2620 stripe, pagenr, rbio->stripe_len);
2626 bios_to_read = bio_list_size(&bio_list);
2627 if (!bios_to_read) {
2629 * this can happen if others have merged with
2630 * us, it means there is nothing left to read.
2631 * But if there are missing devices it may not be
2632 * safe to do the full stripe write yet.
2638 * the bbio may be freed once we submit the last bio. Make sure
2639 * not to touch it after that
2641 atomic_set(&rbio->stripes_pending, bios_to_read);
2643 bio = bio_list_pop(&bio_list);
2647 bio->bi_private = rbio;
2648 bio->bi_end_io = raid56_parity_scrub_end_io;
2650 btrfs_bio_wq_end_io(rbio->fs_info, bio,
2651 BTRFS_WQ_ENDIO_RAID56);
2653 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
2654 submit_bio(READ, bio);
2656 /* the actual write will happen once the reads are done */
2660 rbio_orig_end_io(rbio, -EIO, 0);
2664 validate_rbio_for_parity_scrub(rbio);
2667 static void scrub_parity_work(struct btrfs_work *work)
2669 struct btrfs_raid_bio *rbio;
2671 rbio = container_of(work, struct btrfs_raid_bio, work);
2672 raid56_parity_scrub_stripe(rbio);
2675 static void async_scrub_parity(struct btrfs_raid_bio *rbio)
2677 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2678 scrub_parity_work, NULL, NULL);
2680 btrfs_queue_work(rbio->fs_info->rmw_workers,
2684 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2686 if (!lock_stripe_add(rbio))
2687 async_scrub_parity(rbio);