]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/btrfs/scrub.c
btrfs: scrub: setup all fields for sblock_to_check
[karo-tx-linux.git] / fs / btrfs / scrub.c
1 /*
2  * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
21 #include "ctree.h"
22 #include "volumes.h"
23 #include "disk-io.h"
24 #include "ordered-data.h"
25 #include "transaction.h"
26 #include "backref.h"
27 #include "extent_io.h"
28 #include "dev-replace.h"
29 #include "check-integrity.h"
30 #include "rcu-string.h"
31 #include "raid56.h"
32
33 /*
34  * This is only the first step towards a full-features scrub. It reads all
35  * extent and super block and verifies the checksums. In case a bad checksum
36  * is found or the extent cannot be read, good data will be written back if
37  * any can be found.
38  *
39  * Future enhancements:
40  *  - In case an unrepairable extent is encountered, track which files are
41  *    affected and report them
42  *  - track and record media errors, throw out bad devices
43  *  - add a mode to also read unallocated space
44  */
45
46 struct scrub_block;
47 struct scrub_ctx;
48
49 /*
50  * the following three values only influence the performance.
51  * The last one configures the number of parallel and outstanding I/O
52  * operations. The first two values configure an upper limit for the number
53  * of (dynamically allocated) pages that are added to a bio.
54  */
55 #define SCRUB_PAGES_PER_RD_BIO  32      /* 128k per bio */
56 #define SCRUB_PAGES_PER_WR_BIO  32      /* 128k per bio */
57 #define SCRUB_BIOS_PER_SCTX     64      /* 8MB per device in flight */
58
59 /*
60  * the following value times PAGE_SIZE needs to be large enough to match the
61  * largest node/leaf/sector size that shall be supported.
62  * Values larger than BTRFS_STRIPE_LEN are not supported.
63  */
64 #define SCRUB_MAX_PAGES_PER_BLOCK       16      /* 64k per node/leaf/sector */
65
66 struct scrub_recover {
67         atomic_t                refs;
68         struct btrfs_bio        *bbio;
69         u64                     map_length;
70 };
71
72 struct scrub_page {
73         struct scrub_block      *sblock;
74         struct page             *page;
75         struct btrfs_device     *dev;
76         struct list_head        list;
77         u64                     flags;  /* extent flags */
78         u64                     generation;
79         u64                     logical;
80         u64                     physical;
81         u64                     physical_for_dev_replace;
82         atomic_t                refs;
83         struct {
84                 unsigned int    mirror_num:8;
85                 unsigned int    have_csum:1;
86                 unsigned int    io_error:1;
87         };
88         u8                      csum[BTRFS_CSUM_SIZE];
89
90         struct scrub_recover    *recover;
91 };
92
93 struct scrub_bio {
94         int                     index;
95         struct scrub_ctx        *sctx;
96         struct btrfs_device     *dev;
97         struct bio              *bio;
98         int                     err;
99         u64                     logical;
100         u64                     physical;
101 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
102         struct scrub_page       *pagev[SCRUB_PAGES_PER_WR_BIO];
103 #else
104         struct scrub_page       *pagev[SCRUB_PAGES_PER_RD_BIO];
105 #endif
106         int                     page_count;
107         int                     next_free;
108         struct btrfs_work       work;
109 };
110
111 struct scrub_block {
112         struct scrub_page       *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
113         int                     page_count;
114         atomic_t                outstanding_pages;
115         atomic_t                refs; /* free mem on transition to zero */
116         struct scrub_ctx        *sctx;
117         struct scrub_parity     *sparity;
118         struct {
119                 unsigned int    header_error:1;
120                 unsigned int    checksum_error:1;
121                 unsigned int    no_io_error_seen:1;
122                 unsigned int    generation_error:1; /* also sets header_error */
123
124                 /* The following is for the data used to check parity */
125                 /* It is for the data with checksum */
126                 unsigned int    data_corrected:1;
127         };
128         struct btrfs_work       work;
129 };
130
131 /* Used for the chunks with parity stripe such RAID5/6 */
132 struct scrub_parity {
133         struct scrub_ctx        *sctx;
134
135         struct btrfs_device     *scrub_dev;
136
137         u64                     logic_start;
138
139         u64                     logic_end;
140
141         int                     nsectors;
142
143         int                     stripe_len;
144
145         atomic_t                refs;
146
147         struct list_head        spages;
148
149         /* Work of parity check and repair */
150         struct btrfs_work       work;
151
152         /* Mark the parity blocks which have data */
153         unsigned long           *dbitmap;
154
155         /*
156          * Mark the parity blocks which have data, but errors happen when
157          * read data or check data
158          */
159         unsigned long           *ebitmap;
160
161         unsigned long           bitmap[0];
162 };
163
164 struct scrub_wr_ctx {
165         struct scrub_bio *wr_curr_bio;
166         struct btrfs_device *tgtdev;
167         int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
168         atomic_t flush_all_writes;
169         struct mutex wr_lock;
170 };
171
172 struct scrub_ctx {
173         struct scrub_bio        *bios[SCRUB_BIOS_PER_SCTX];
174         struct btrfs_root       *dev_root;
175         int                     first_free;
176         int                     curr;
177         atomic_t                bios_in_flight;
178         atomic_t                workers_pending;
179         spinlock_t              list_lock;
180         wait_queue_head_t       list_wait;
181         u16                     csum_size;
182         struct list_head        csum_list;
183         atomic_t                cancel_req;
184         int                     readonly;
185         int                     pages_per_rd_bio;
186         u32                     sectorsize;
187         u32                     nodesize;
188
189         int                     is_dev_replace;
190         struct scrub_wr_ctx     wr_ctx;
191
192         /*
193          * statistics
194          */
195         struct btrfs_scrub_progress stat;
196         spinlock_t              stat_lock;
197
198         /*
199          * Use a ref counter to avoid use-after-free issues. Scrub workers
200          * decrement bios_in_flight and workers_pending and then do a wakeup
201          * on the list_wait wait queue. We must ensure the main scrub task
202          * doesn't free the scrub context before or while the workers are
203          * doing the wakeup() call.
204          */
205         atomic_t                refs;
206 };
207
208 struct scrub_fixup_nodatasum {
209         struct scrub_ctx        *sctx;
210         struct btrfs_device     *dev;
211         u64                     logical;
212         struct btrfs_root       *root;
213         struct btrfs_work       work;
214         int                     mirror_num;
215 };
216
217 struct scrub_nocow_inode {
218         u64                     inum;
219         u64                     offset;
220         u64                     root;
221         struct list_head        list;
222 };
223
224 struct scrub_copy_nocow_ctx {
225         struct scrub_ctx        *sctx;
226         u64                     logical;
227         u64                     len;
228         int                     mirror_num;
229         u64                     physical_for_dev_replace;
230         struct list_head        inodes;
231         struct btrfs_work       work;
232 };
233
234 struct scrub_warning {
235         struct btrfs_path       *path;
236         u64                     extent_item_size;
237         const char              *errstr;
238         sector_t                sector;
239         u64                     logical;
240         struct btrfs_device     *dev;
241 };
242
243 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
244 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
245 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
246 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
247 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
248 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
249                                      struct scrub_block *sblocks_for_recheck);
250 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
251                                 struct scrub_block *sblock, int is_metadata,
252                                 int have_csum, u8 *csum, u64 generation,
253                                 u16 csum_size, int retry_failed_mirror);
254 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
255                                          struct scrub_block *sblock,
256                                          int is_metadata, int have_csum,
257                                          const u8 *csum, u64 generation,
258                                          u16 csum_size);
259 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
260                                              struct scrub_block *sblock_good);
261 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
262                                             struct scrub_block *sblock_good,
263                                             int page_num, int force_write);
264 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
265 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
266                                            int page_num);
267 static int scrub_checksum_data(struct scrub_block *sblock);
268 static int scrub_checksum_tree_block(struct scrub_block *sblock);
269 static int scrub_checksum_super(struct scrub_block *sblock);
270 static void scrub_block_get(struct scrub_block *sblock);
271 static void scrub_block_put(struct scrub_block *sblock);
272 static void scrub_page_get(struct scrub_page *spage);
273 static void scrub_page_put(struct scrub_page *spage);
274 static void scrub_parity_get(struct scrub_parity *sparity);
275 static void scrub_parity_put(struct scrub_parity *sparity);
276 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
277                                     struct scrub_page *spage);
278 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
279                        u64 physical, struct btrfs_device *dev, u64 flags,
280                        u64 gen, int mirror_num, u8 *csum, int force,
281                        u64 physical_for_dev_replace);
282 static void scrub_bio_end_io(struct bio *bio);
283 static void scrub_bio_end_io_worker(struct btrfs_work *work);
284 static void scrub_block_complete(struct scrub_block *sblock);
285 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
286                                u64 extent_logical, u64 extent_len,
287                                u64 *extent_physical,
288                                struct btrfs_device **extent_dev,
289                                int *extent_mirror_num);
290 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
291                               struct scrub_wr_ctx *wr_ctx,
292                               struct btrfs_fs_info *fs_info,
293                               struct btrfs_device *dev,
294                               int is_dev_replace);
295 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
296 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
297                                     struct scrub_page *spage);
298 static void scrub_wr_submit(struct scrub_ctx *sctx);
299 static void scrub_wr_bio_end_io(struct bio *bio);
300 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
301 static int write_page_nocow(struct scrub_ctx *sctx,
302                             u64 physical_for_dev_replace, struct page *page);
303 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
304                                       struct scrub_copy_nocow_ctx *ctx);
305 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
306                             int mirror_num, u64 physical_for_dev_replace);
307 static void copy_nocow_pages_worker(struct btrfs_work *work);
308 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
309 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
310 static void scrub_put_ctx(struct scrub_ctx *sctx);
311
312
313 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
314 {
315         atomic_inc(&sctx->refs);
316         atomic_inc(&sctx->bios_in_flight);
317 }
318
319 static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
320 {
321         atomic_dec(&sctx->bios_in_flight);
322         wake_up(&sctx->list_wait);
323         scrub_put_ctx(sctx);
324 }
325
326 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
327 {
328         while (atomic_read(&fs_info->scrub_pause_req)) {
329                 mutex_unlock(&fs_info->scrub_lock);
330                 wait_event(fs_info->scrub_pause_wait,
331                    atomic_read(&fs_info->scrub_pause_req) == 0);
332                 mutex_lock(&fs_info->scrub_lock);
333         }
334 }
335
336 static void scrub_pause_on(struct btrfs_fs_info *fs_info)
337 {
338         atomic_inc(&fs_info->scrubs_paused);
339         wake_up(&fs_info->scrub_pause_wait);
340 }
341
342 static void scrub_pause_off(struct btrfs_fs_info *fs_info)
343 {
344         mutex_lock(&fs_info->scrub_lock);
345         __scrub_blocked_if_needed(fs_info);
346         atomic_dec(&fs_info->scrubs_paused);
347         mutex_unlock(&fs_info->scrub_lock);
348
349         wake_up(&fs_info->scrub_pause_wait);
350 }
351
352 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
353 {
354         scrub_pause_on(fs_info);
355         scrub_pause_off(fs_info);
356 }
357
358 /*
359  * used for workers that require transaction commits (i.e., for the
360  * NOCOW case)
361  */
362 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
363 {
364         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
365
366         atomic_inc(&sctx->refs);
367         /*
368          * increment scrubs_running to prevent cancel requests from
369          * completing as long as a worker is running. we must also
370          * increment scrubs_paused to prevent deadlocking on pause
371          * requests used for transactions commits (as the worker uses a
372          * transaction context). it is safe to regard the worker
373          * as paused for all matters practical. effectively, we only
374          * avoid cancellation requests from completing.
375          */
376         mutex_lock(&fs_info->scrub_lock);
377         atomic_inc(&fs_info->scrubs_running);
378         atomic_inc(&fs_info->scrubs_paused);
379         mutex_unlock(&fs_info->scrub_lock);
380
381         /*
382          * check if @scrubs_running=@scrubs_paused condition
383          * inside wait_event() is not an atomic operation.
384          * which means we may inc/dec @scrub_running/paused
385          * at any time. Let's wake up @scrub_pause_wait as
386          * much as we can to let commit transaction blocked less.
387          */
388         wake_up(&fs_info->scrub_pause_wait);
389
390         atomic_inc(&sctx->workers_pending);
391 }
392
393 /* used for workers that require transaction commits */
394 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
395 {
396         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
397
398         /*
399          * see scrub_pending_trans_workers_inc() why we're pretending
400          * to be paused in the scrub counters
401          */
402         mutex_lock(&fs_info->scrub_lock);
403         atomic_dec(&fs_info->scrubs_running);
404         atomic_dec(&fs_info->scrubs_paused);
405         mutex_unlock(&fs_info->scrub_lock);
406         atomic_dec(&sctx->workers_pending);
407         wake_up(&fs_info->scrub_pause_wait);
408         wake_up(&sctx->list_wait);
409         scrub_put_ctx(sctx);
410 }
411
412 static void scrub_free_csums(struct scrub_ctx *sctx)
413 {
414         while (!list_empty(&sctx->csum_list)) {
415                 struct btrfs_ordered_sum *sum;
416                 sum = list_first_entry(&sctx->csum_list,
417                                        struct btrfs_ordered_sum, list);
418                 list_del(&sum->list);
419                 kfree(sum);
420         }
421 }
422
423 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
424 {
425         int i;
426
427         if (!sctx)
428                 return;
429
430         scrub_free_wr_ctx(&sctx->wr_ctx);
431
432         /* this can happen when scrub is cancelled */
433         if (sctx->curr != -1) {
434                 struct scrub_bio *sbio = sctx->bios[sctx->curr];
435
436                 for (i = 0; i < sbio->page_count; i++) {
437                         WARN_ON(!sbio->pagev[i]->page);
438                         scrub_block_put(sbio->pagev[i]->sblock);
439                 }
440                 bio_put(sbio->bio);
441         }
442
443         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
444                 struct scrub_bio *sbio = sctx->bios[i];
445
446                 if (!sbio)
447                         break;
448                 kfree(sbio);
449         }
450
451         scrub_free_csums(sctx);
452         kfree(sctx);
453 }
454
455 static void scrub_put_ctx(struct scrub_ctx *sctx)
456 {
457         if (atomic_dec_and_test(&sctx->refs))
458                 scrub_free_ctx(sctx);
459 }
460
461 static noinline_for_stack
462 struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
463 {
464         struct scrub_ctx *sctx;
465         int             i;
466         struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
467         int ret;
468
469         sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
470         if (!sctx)
471                 goto nomem;
472         atomic_set(&sctx->refs, 1);
473         sctx->is_dev_replace = is_dev_replace;
474         sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
475         sctx->curr = -1;
476         sctx->dev_root = dev->dev_root;
477         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
478                 struct scrub_bio *sbio;
479
480                 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
481                 if (!sbio)
482                         goto nomem;
483                 sctx->bios[i] = sbio;
484
485                 sbio->index = i;
486                 sbio->sctx = sctx;
487                 sbio->page_count = 0;
488                 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
489                                 scrub_bio_end_io_worker, NULL, NULL);
490
491                 if (i != SCRUB_BIOS_PER_SCTX - 1)
492                         sctx->bios[i]->next_free = i + 1;
493                 else
494                         sctx->bios[i]->next_free = -1;
495         }
496         sctx->first_free = 0;
497         sctx->nodesize = dev->dev_root->nodesize;
498         sctx->sectorsize = dev->dev_root->sectorsize;
499         atomic_set(&sctx->bios_in_flight, 0);
500         atomic_set(&sctx->workers_pending, 0);
501         atomic_set(&sctx->cancel_req, 0);
502         sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
503         INIT_LIST_HEAD(&sctx->csum_list);
504
505         spin_lock_init(&sctx->list_lock);
506         spin_lock_init(&sctx->stat_lock);
507         init_waitqueue_head(&sctx->list_wait);
508
509         ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
510                                  fs_info->dev_replace.tgtdev, is_dev_replace);
511         if (ret) {
512                 scrub_free_ctx(sctx);
513                 return ERR_PTR(ret);
514         }
515         return sctx;
516
517 nomem:
518         scrub_free_ctx(sctx);
519         return ERR_PTR(-ENOMEM);
520 }
521
522 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
523                                      void *warn_ctx)
524 {
525         u64 isize;
526         u32 nlink;
527         int ret;
528         int i;
529         struct extent_buffer *eb;
530         struct btrfs_inode_item *inode_item;
531         struct scrub_warning *swarn = warn_ctx;
532         struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
533         struct inode_fs_paths *ipath = NULL;
534         struct btrfs_root *local_root;
535         struct btrfs_key root_key;
536         struct btrfs_key key;
537
538         root_key.objectid = root;
539         root_key.type = BTRFS_ROOT_ITEM_KEY;
540         root_key.offset = (u64)-1;
541         local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
542         if (IS_ERR(local_root)) {
543                 ret = PTR_ERR(local_root);
544                 goto err;
545         }
546
547         /*
548          * this makes the path point to (inum INODE_ITEM ioff)
549          */
550         key.objectid = inum;
551         key.type = BTRFS_INODE_ITEM_KEY;
552         key.offset = 0;
553
554         ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
555         if (ret) {
556                 btrfs_release_path(swarn->path);
557                 goto err;
558         }
559
560         eb = swarn->path->nodes[0];
561         inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
562                                         struct btrfs_inode_item);
563         isize = btrfs_inode_size(eb, inode_item);
564         nlink = btrfs_inode_nlink(eb, inode_item);
565         btrfs_release_path(swarn->path);
566
567         ipath = init_ipath(4096, local_root, swarn->path);
568         if (IS_ERR(ipath)) {
569                 ret = PTR_ERR(ipath);
570                 ipath = NULL;
571                 goto err;
572         }
573         ret = paths_from_inode(inum, ipath);
574
575         if (ret < 0)
576                 goto err;
577
578         /*
579          * we deliberately ignore the bit ipath might have been too small to
580          * hold all of the paths here
581          */
582         for (i = 0; i < ipath->fspath->elem_cnt; ++i)
583                 btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev "
584                         "%s, sector %llu, root %llu, inode %llu, offset %llu, "
585                         "length %llu, links %u (path: %s)", swarn->errstr,
586                         swarn->logical, rcu_str_deref(swarn->dev->name),
587                         (unsigned long long)swarn->sector, root, inum, offset,
588                         min(isize - offset, (u64)PAGE_SIZE), nlink,
589                         (char *)(unsigned long)ipath->fspath->val[i]);
590
591         free_ipath(ipath);
592         return 0;
593
594 err:
595         btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev "
596                 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
597                 "resolving failed with ret=%d", swarn->errstr,
598                 swarn->logical, rcu_str_deref(swarn->dev->name),
599                 (unsigned long long)swarn->sector, root, inum, offset, ret);
600
601         free_ipath(ipath);
602         return 0;
603 }
604
605 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
606 {
607         struct btrfs_device *dev;
608         struct btrfs_fs_info *fs_info;
609         struct btrfs_path *path;
610         struct btrfs_key found_key;
611         struct extent_buffer *eb;
612         struct btrfs_extent_item *ei;
613         struct scrub_warning swarn;
614         unsigned long ptr = 0;
615         u64 extent_item_pos;
616         u64 flags = 0;
617         u64 ref_root;
618         u32 item_size;
619         u8 ref_level;
620         int ret;
621
622         WARN_ON(sblock->page_count < 1);
623         dev = sblock->pagev[0]->dev;
624         fs_info = sblock->sctx->dev_root->fs_info;
625
626         path = btrfs_alloc_path();
627         if (!path)
628                 return;
629
630         swarn.sector = (sblock->pagev[0]->physical) >> 9;
631         swarn.logical = sblock->pagev[0]->logical;
632         swarn.errstr = errstr;
633         swarn.dev = NULL;
634
635         ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
636                                   &flags);
637         if (ret < 0)
638                 goto out;
639
640         extent_item_pos = swarn.logical - found_key.objectid;
641         swarn.extent_item_size = found_key.offset;
642
643         eb = path->nodes[0];
644         ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
645         item_size = btrfs_item_size_nr(eb, path->slots[0]);
646
647         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
648                 do {
649                         ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
650                                                       item_size, &ref_root,
651                                                       &ref_level);
652                         btrfs_warn_in_rcu(fs_info,
653                                 "%s at logical %llu on dev %s, "
654                                 "sector %llu: metadata %s (level %d) in tree "
655                                 "%llu", errstr, swarn.logical,
656                                 rcu_str_deref(dev->name),
657                                 (unsigned long long)swarn.sector,
658                                 ref_level ? "node" : "leaf",
659                                 ret < 0 ? -1 : ref_level,
660                                 ret < 0 ? -1 : ref_root);
661                 } while (ret != 1);
662                 btrfs_release_path(path);
663         } else {
664                 btrfs_release_path(path);
665                 swarn.path = path;
666                 swarn.dev = dev;
667                 iterate_extent_inodes(fs_info, found_key.objectid,
668                                         extent_item_pos, 1,
669                                         scrub_print_warning_inode, &swarn);
670         }
671
672 out:
673         btrfs_free_path(path);
674 }
675
676 static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
677 {
678         struct page *page = NULL;
679         unsigned long index;
680         struct scrub_fixup_nodatasum *fixup = fixup_ctx;
681         int ret;
682         int corrected = 0;
683         struct btrfs_key key;
684         struct inode *inode = NULL;
685         struct btrfs_fs_info *fs_info;
686         u64 end = offset + PAGE_SIZE - 1;
687         struct btrfs_root *local_root;
688         int srcu_index;
689
690         key.objectid = root;
691         key.type = BTRFS_ROOT_ITEM_KEY;
692         key.offset = (u64)-1;
693
694         fs_info = fixup->root->fs_info;
695         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
696
697         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
698         if (IS_ERR(local_root)) {
699                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
700                 return PTR_ERR(local_root);
701         }
702
703         key.type = BTRFS_INODE_ITEM_KEY;
704         key.objectid = inum;
705         key.offset = 0;
706         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
707         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
708         if (IS_ERR(inode))
709                 return PTR_ERR(inode);
710
711         index = offset >> PAGE_CACHE_SHIFT;
712
713         page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
714         if (!page) {
715                 ret = -ENOMEM;
716                 goto out;
717         }
718
719         if (PageUptodate(page)) {
720                 if (PageDirty(page)) {
721                         /*
722                          * we need to write the data to the defect sector. the
723                          * data that was in that sector is not in memory,
724                          * because the page was modified. we must not write the
725                          * modified page to that sector.
726                          *
727                          * TODO: what could be done here: wait for the delalloc
728                          *       runner to write out that page (might involve
729                          *       COW) and see whether the sector is still
730                          *       referenced afterwards.
731                          *
732                          * For the meantime, we'll treat this error
733                          * incorrectable, although there is a chance that a
734                          * later scrub will find the bad sector again and that
735                          * there's no dirty page in memory, then.
736                          */
737                         ret = -EIO;
738                         goto out;
739                 }
740                 ret = repair_io_failure(inode, offset, PAGE_SIZE,
741                                         fixup->logical, page,
742                                         offset - page_offset(page),
743                                         fixup->mirror_num);
744                 unlock_page(page);
745                 corrected = !ret;
746         } else {
747                 /*
748                  * we need to get good data first. the general readpage path
749                  * will call repair_io_failure for us, we just have to make
750                  * sure we read the bad mirror.
751                  */
752                 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
753                                         EXTENT_DAMAGED, GFP_NOFS);
754                 if (ret) {
755                         /* set_extent_bits should give proper error */
756                         WARN_ON(ret > 0);
757                         if (ret > 0)
758                                 ret = -EFAULT;
759                         goto out;
760                 }
761
762                 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
763                                                 btrfs_get_extent,
764                                                 fixup->mirror_num);
765                 wait_on_page_locked(page);
766
767                 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
768                                                 end, EXTENT_DAMAGED, 0, NULL);
769                 if (!corrected)
770                         clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
771                                                 EXTENT_DAMAGED, GFP_NOFS);
772         }
773
774 out:
775         if (page)
776                 put_page(page);
777
778         iput(inode);
779
780         if (ret < 0)
781                 return ret;
782
783         if (ret == 0 && corrected) {
784                 /*
785                  * we only need to call readpage for one of the inodes belonging
786                  * to this extent. so make iterate_extent_inodes stop
787                  */
788                 return 1;
789         }
790
791         return -EIO;
792 }
793
794 static void scrub_fixup_nodatasum(struct btrfs_work *work)
795 {
796         int ret;
797         struct scrub_fixup_nodatasum *fixup;
798         struct scrub_ctx *sctx;
799         struct btrfs_trans_handle *trans = NULL;
800         struct btrfs_path *path;
801         int uncorrectable = 0;
802
803         fixup = container_of(work, struct scrub_fixup_nodatasum, work);
804         sctx = fixup->sctx;
805
806         path = btrfs_alloc_path();
807         if (!path) {
808                 spin_lock(&sctx->stat_lock);
809                 ++sctx->stat.malloc_errors;
810                 spin_unlock(&sctx->stat_lock);
811                 uncorrectable = 1;
812                 goto out;
813         }
814
815         trans = btrfs_join_transaction(fixup->root);
816         if (IS_ERR(trans)) {
817                 uncorrectable = 1;
818                 goto out;
819         }
820
821         /*
822          * the idea is to trigger a regular read through the standard path. we
823          * read a page from the (failed) logical address by specifying the
824          * corresponding copynum of the failed sector. thus, that readpage is
825          * expected to fail.
826          * that is the point where on-the-fly error correction will kick in
827          * (once it's finished) and rewrite the failed sector if a good copy
828          * can be found.
829          */
830         ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
831                                                 path, scrub_fixup_readpage,
832                                                 fixup);
833         if (ret < 0) {
834                 uncorrectable = 1;
835                 goto out;
836         }
837         WARN_ON(ret != 1);
838
839         spin_lock(&sctx->stat_lock);
840         ++sctx->stat.corrected_errors;
841         spin_unlock(&sctx->stat_lock);
842
843 out:
844         if (trans && !IS_ERR(trans))
845                 btrfs_end_transaction(trans, fixup->root);
846         if (uncorrectable) {
847                 spin_lock(&sctx->stat_lock);
848                 ++sctx->stat.uncorrectable_errors;
849                 spin_unlock(&sctx->stat_lock);
850                 btrfs_dev_replace_stats_inc(
851                         &sctx->dev_root->fs_info->dev_replace.
852                         num_uncorrectable_read_errors);
853                 btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
854                     "unable to fixup (nodatasum) error at logical %llu on dev %s",
855                         fixup->logical, rcu_str_deref(fixup->dev->name));
856         }
857
858         btrfs_free_path(path);
859         kfree(fixup);
860
861         scrub_pending_trans_workers_dec(sctx);
862 }
863
864 static inline void scrub_get_recover(struct scrub_recover *recover)
865 {
866         atomic_inc(&recover->refs);
867 }
868
869 static inline void scrub_put_recover(struct scrub_recover *recover)
870 {
871         if (atomic_dec_and_test(&recover->refs)) {
872                 btrfs_put_bbio(recover->bbio);
873                 kfree(recover);
874         }
875 }
876
877 /*
878  * scrub_handle_errored_block gets called when either verification of the
879  * pages failed or the bio failed to read, e.g. with EIO. In the latter
880  * case, this function handles all pages in the bio, even though only one
881  * may be bad.
882  * The goal of this function is to repair the errored block by using the
883  * contents of one of the mirrors.
884  */
885 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
886 {
887         struct scrub_ctx *sctx = sblock_to_check->sctx;
888         struct btrfs_device *dev;
889         struct btrfs_fs_info *fs_info;
890         u64 length;
891         u64 logical;
892         u64 generation;
893         unsigned int failed_mirror_index;
894         unsigned int is_metadata;
895         unsigned int have_csum;
896         u8 *csum;
897         struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
898         struct scrub_block *sblock_bad;
899         int ret;
900         int mirror_index;
901         int page_num;
902         int success;
903         static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
904                                       DEFAULT_RATELIMIT_BURST);
905
906         BUG_ON(sblock_to_check->page_count < 1);
907         fs_info = sctx->dev_root->fs_info;
908         if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
909                 /*
910                  * if we find an error in a super block, we just report it.
911                  * They will get written with the next transaction commit
912                  * anyway
913                  */
914                 spin_lock(&sctx->stat_lock);
915                 ++sctx->stat.super_errors;
916                 spin_unlock(&sctx->stat_lock);
917                 return 0;
918         }
919         length = sblock_to_check->page_count * PAGE_SIZE;
920         logical = sblock_to_check->pagev[0]->logical;
921         generation = sblock_to_check->pagev[0]->generation;
922         BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
923         failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
924         is_metadata = !(sblock_to_check->pagev[0]->flags &
925                         BTRFS_EXTENT_FLAG_DATA);
926         have_csum = sblock_to_check->pagev[0]->have_csum;
927         csum = sblock_to_check->pagev[0]->csum;
928         dev = sblock_to_check->pagev[0]->dev;
929
930         if (sctx->is_dev_replace && !is_metadata && !have_csum) {
931                 sblocks_for_recheck = NULL;
932                 goto nodatasum_case;
933         }
934
935         /*
936          * read all mirrors one after the other. This includes to
937          * re-read the extent or metadata block that failed (that was
938          * the cause that this fixup code is called) another time,
939          * page by page this time in order to know which pages
940          * caused I/O errors and which ones are good (for all mirrors).
941          * It is the goal to handle the situation when more than one
942          * mirror contains I/O errors, but the errors do not
943          * overlap, i.e. the data can be repaired by selecting the
944          * pages from those mirrors without I/O error on the
945          * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
946          * would be that mirror #1 has an I/O error on the first page,
947          * the second page is good, and mirror #2 has an I/O error on
948          * the second page, but the first page is good.
949          * Then the first page of the first mirror can be repaired by
950          * taking the first page of the second mirror, and the
951          * second page of the second mirror can be repaired by
952          * copying the contents of the 2nd page of the 1st mirror.
953          * One more note: if the pages of one mirror contain I/O
954          * errors, the checksum cannot be verified. In order to get
955          * the best data for repairing, the first attempt is to find
956          * a mirror without I/O errors and with a validated checksum.
957          * Only if this is not possible, the pages are picked from
958          * mirrors with I/O errors without considering the checksum.
959          * If the latter is the case, at the end, the checksum of the
960          * repaired area is verified in order to correctly maintain
961          * the statistics.
962          */
963
964         sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
965                                       sizeof(*sblocks_for_recheck), GFP_NOFS);
966         if (!sblocks_for_recheck) {
967                 spin_lock(&sctx->stat_lock);
968                 sctx->stat.malloc_errors++;
969                 sctx->stat.read_errors++;
970                 sctx->stat.uncorrectable_errors++;
971                 spin_unlock(&sctx->stat_lock);
972                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
973                 goto out;
974         }
975
976         /* setup the context, map the logical blocks and alloc the pages */
977         ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
978         if (ret) {
979                 spin_lock(&sctx->stat_lock);
980                 sctx->stat.read_errors++;
981                 sctx->stat.uncorrectable_errors++;
982                 spin_unlock(&sctx->stat_lock);
983                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
984                 goto out;
985         }
986         BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
987         sblock_bad = sblocks_for_recheck + failed_mirror_index;
988
989         /* build and submit the bios for the failed mirror, check checksums */
990         scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
991                             csum, generation, sctx->csum_size, 1);
992
993         if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
994             sblock_bad->no_io_error_seen) {
995                 /*
996                  * the error disappeared after reading page by page, or
997                  * the area was part of a huge bio and other parts of the
998                  * bio caused I/O errors, or the block layer merged several
999                  * read requests into one and the error is caused by a
1000                  * different bio (usually one of the two latter cases is
1001                  * the cause)
1002                  */
1003                 spin_lock(&sctx->stat_lock);
1004                 sctx->stat.unverified_errors++;
1005                 sblock_to_check->data_corrected = 1;
1006                 spin_unlock(&sctx->stat_lock);
1007
1008                 if (sctx->is_dev_replace)
1009                         scrub_write_block_to_dev_replace(sblock_bad);
1010                 goto out;
1011         }
1012
1013         if (!sblock_bad->no_io_error_seen) {
1014                 spin_lock(&sctx->stat_lock);
1015                 sctx->stat.read_errors++;
1016                 spin_unlock(&sctx->stat_lock);
1017                 if (__ratelimit(&_rs))
1018                         scrub_print_warning("i/o error", sblock_to_check);
1019                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1020         } else if (sblock_bad->checksum_error) {
1021                 spin_lock(&sctx->stat_lock);
1022                 sctx->stat.csum_errors++;
1023                 spin_unlock(&sctx->stat_lock);
1024                 if (__ratelimit(&_rs))
1025                         scrub_print_warning("checksum error", sblock_to_check);
1026                 btrfs_dev_stat_inc_and_print(dev,
1027                                              BTRFS_DEV_STAT_CORRUPTION_ERRS);
1028         } else if (sblock_bad->header_error) {
1029                 spin_lock(&sctx->stat_lock);
1030                 sctx->stat.verify_errors++;
1031                 spin_unlock(&sctx->stat_lock);
1032                 if (__ratelimit(&_rs))
1033                         scrub_print_warning("checksum/header error",
1034                                             sblock_to_check);
1035                 if (sblock_bad->generation_error)
1036                         btrfs_dev_stat_inc_and_print(dev,
1037                                 BTRFS_DEV_STAT_GENERATION_ERRS);
1038                 else
1039                         btrfs_dev_stat_inc_and_print(dev,
1040                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1041         }
1042
1043         if (sctx->readonly) {
1044                 ASSERT(!sctx->is_dev_replace);
1045                 goto out;
1046         }
1047
1048         if (!is_metadata && !have_csum) {
1049                 struct scrub_fixup_nodatasum *fixup_nodatasum;
1050
1051                 WARN_ON(sctx->is_dev_replace);
1052
1053 nodatasum_case:
1054
1055                 /*
1056                  * !is_metadata and !have_csum, this means that the data
1057                  * might not be COW'ed, that it might be modified
1058                  * concurrently. The general strategy to work on the
1059                  * commit root does not help in the case when COW is not
1060                  * used.
1061                  */
1062                 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1063                 if (!fixup_nodatasum)
1064                         goto did_not_correct_error;
1065                 fixup_nodatasum->sctx = sctx;
1066                 fixup_nodatasum->dev = dev;
1067                 fixup_nodatasum->logical = logical;
1068                 fixup_nodatasum->root = fs_info->extent_root;
1069                 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
1070                 scrub_pending_trans_workers_inc(sctx);
1071                 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1072                                 scrub_fixup_nodatasum, NULL, NULL);
1073                 btrfs_queue_work(fs_info->scrub_workers,
1074                                  &fixup_nodatasum->work);
1075                 goto out;
1076         }
1077
1078         /*
1079          * now build and submit the bios for the other mirrors, check
1080          * checksums.
1081          * First try to pick the mirror which is completely without I/O
1082          * errors and also does not have a checksum error.
1083          * If one is found, and if a checksum is present, the full block
1084          * that is known to contain an error is rewritten. Afterwards
1085          * the block is known to be corrected.
1086          * If a mirror is found which is completely correct, and no
1087          * checksum is present, only those pages are rewritten that had
1088          * an I/O error in the block to be repaired, since it cannot be
1089          * determined, which copy of the other pages is better (and it
1090          * could happen otherwise that a correct page would be
1091          * overwritten by a bad one).
1092          */
1093         for (mirror_index = 0;
1094              mirror_index < BTRFS_MAX_MIRRORS &&
1095              sblocks_for_recheck[mirror_index].page_count > 0;
1096              mirror_index++) {
1097                 struct scrub_block *sblock_other;
1098
1099                 if (mirror_index == failed_mirror_index)
1100                         continue;
1101                 sblock_other = sblocks_for_recheck + mirror_index;
1102
1103                 /* build and submit the bios, check checksums */
1104                 scrub_recheck_block(fs_info, sblock_other, is_metadata,
1105                                     have_csum, csum, generation,
1106                                     sctx->csum_size, 0);
1107
1108                 if (!sblock_other->header_error &&
1109                     !sblock_other->checksum_error &&
1110                     sblock_other->no_io_error_seen) {
1111                         if (sctx->is_dev_replace) {
1112                                 scrub_write_block_to_dev_replace(sblock_other);
1113                                 goto corrected_error;
1114                         } else {
1115                                 ret = scrub_repair_block_from_good_copy(
1116                                                 sblock_bad, sblock_other);
1117                                 if (!ret)
1118                                         goto corrected_error;
1119                         }
1120                 }
1121         }
1122
1123         if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1124                 goto did_not_correct_error;
1125
1126         /*
1127          * In case of I/O errors in the area that is supposed to be
1128          * repaired, continue by picking good copies of those pages.
1129          * Select the good pages from mirrors to rewrite bad pages from
1130          * the area to fix. Afterwards verify the checksum of the block
1131          * that is supposed to be repaired. This verification step is
1132          * only done for the purpose of statistic counting and for the
1133          * final scrub report, whether errors remain.
1134          * A perfect algorithm could make use of the checksum and try
1135          * all possible combinations of pages from the different mirrors
1136          * until the checksum verification succeeds. For example, when
1137          * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1138          * of mirror #2 is readable but the final checksum test fails,
1139          * then the 2nd page of mirror #3 could be tried, whether now
1140          * the final checksum succeedes. But this would be a rare
1141          * exception and is therefore not implemented. At least it is
1142          * avoided that the good copy is overwritten.
1143          * A more useful improvement would be to pick the sectors
1144          * without I/O error based on sector sizes (512 bytes on legacy
1145          * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1146          * mirror could be repaired by taking 512 byte of a different
1147          * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1148          * area are unreadable.
1149          */
1150         success = 1;
1151         for (page_num = 0; page_num < sblock_bad->page_count;
1152              page_num++) {
1153                 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1154                 struct scrub_block *sblock_other = NULL;
1155
1156                 /* skip no-io-error page in scrub */
1157                 if (!page_bad->io_error && !sctx->is_dev_replace)
1158                         continue;
1159
1160                 /* try to find no-io-error page in mirrors */
1161                 if (page_bad->io_error) {
1162                         for (mirror_index = 0;
1163                              mirror_index < BTRFS_MAX_MIRRORS &&
1164                              sblocks_for_recheck[mirror_index].page_count > 0;
1165                              mirror_index++) {
1166                                 if (!sblocks_for_recheck[mirror_index].
1167                                     pagev[page_num]->io_error) {
1168                                         sblock_other = sblocks_for_recheck +
1169                                                        mirror_index;
1170                                         break;
1171                                 }
1172                         }
1173                         if (!sblock_other)
1174                                 success = 0;
1175                 }
1176
1177                 if (sctx->is_dev_replace) {
1178                         /*
1179                          * did not find a mirror to fetch the page
1180                          * from. scrub_write_page_to_dev_replace()
1181                          * handles this case (page->io_error), by
1182                          * filling the block with zeros before
1183                          * submitting the write request
1184                          */
1185                         if (!sblock_other)
1186                                 sblock_other = sblock_bad;
1187
1188                         if (scrub_write_page_to_dev_replace(sblock_other,
1189                                                             page_num) != 0) {
1190                                 btrfs_dev_replace_stats_inc(
1191                                         &sctx->dev_root->
1192                                         fs_info->dev_replace.
1193                                         num_write_errors);
1194                                 success = 0;
1195                         }
1196                 } else if (sblock_other) {
1197                         ret = scrub_repair_page_from_good_copy(sblock_bad,
1198                                                                sblock_other,
1199                                                                page_num, 0);
1200                         if (0 == ret)
1201                                 page_bad->io_error = 0;
1202                         else
1203                                 success = 0;
1204                 }
1205         }
1206
1207         if (success && !sctx->is_dev_replace) {
1208                 if (is_metadata || have_csum) {
1209                         /*
1210                          * need to verify the checksum now that all
1211                          * sectors on disk are repaired (the write
1212                          * request for data to be repaired is on its way).
1213                          * Just be lazy and use scrub_recheck_block()
1214                          * which re-reads the data before the checksum
1215                          * is verified, but most likely the data comes out
1216                          * of the page cache.
1217                          */
1218                         scrub_recheck_block(fs_info, sblock_bad,
1219                                             is_metadata, have_csum, csum,
1220                                             generation, sctx->csum_size, 1);
1221                         if (!sblock_bad->header_error &&
1222                             !sblock_bad->checksum_error &&
1223                             sblock_bad->no_io_error_seen)
1224                                 goto corrected_error;
1225                         else
1226                                 goto did_not_correct_error;
1227                 } else {
1228 corrected_error:
1229                         spin_lock(&sctx->stat_lock);
1230                         sctx->stat.corrected_errors++;
1231                         sblock_to_check->data_corrected = 1;
1232                         spin_unlock(&sctx->stat_lock);
1233                         btrfs_err_rl_in_rcu(fs_info,
1234                                 "fixed up error at logical %llu on dev %s",
1235                                 logical, rcu_str_deref(dev->name));
1236                 }
1237         } else {
1238 did_not_correct_error:
1239                 spin_lock(&sctx->stat_lock);
1240                 sctx->stat.uncorrectable_errors++;
1241                 spin_unlock(&sctx->stat_lock);
1242                 btrfs_err_rl_in_rcu(fs_info,
1243                         "unable to fixup (regular) error at logical %llu on dev %s",
1244                         logical, rcu_str_deref(dev->name));
1245         }
1246
1247 out:
1248         if (sblocks_for_recheck) {
1249                 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1250                      mirror_index++) {
1251                         struct scrub_block *sblock = sblocks_for_recheck +
1252                                                      mirror_index;
1253                         struct scrub_recover *recover;
1254                         int page_index;
1255
1256                         for (page_index = 0; page_index < sblock->page_count;
1257                              page_index++) {
1258                                 sblock->pagev[page_index]->sblock = NULL;
1259                                 recover = sblock->pagev[page_index]->recover;
1260                                 if (recover) {
1261                                         scrub_put_recover(recover);
1262                                         sblock->pagev[page_index]->recover =
1263                                                                         NULL;
1264                                 }
1265                                 scrub_page_put(sblock->pagev[page_index]);
1266                         }
1267                 }
1268                 kfree(sblocks_for_recheck);
1269         }
1270
1271         return 0;
1272 }
1273
1274 static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
1275 {
1276         if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1277                 return 2;
1278         else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1279                 return 3;
1280         else
1281                 return (int)bbio->num_stripes;
1282 }
1283
1284 static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1285                                                  u64 *raid_map,
1286                                                  u64 mapped_length,
1287                                                  int nstripes, int mirror,
1288                                                  int *stripe_index,
1289                                                  u64 *stripe_offset)
1290 {
1291         int i;
1292
1293         if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1294                 /* RAID5/6 */
1295                 for (i = 0; i < nstripes; i++) {
1296                         if (raid_map[i] == RAID6_Q_STRIPE ||
1297                             raid_map[i] == RAID5_P_STRIPE)
1298                                 continue;
1299
1300                         if (logical >= raid_map[i] &&
1301                             logical < raid_map[i] + mapped_length)
1302                                 break;
1303                 }
1304
1305                 *stripe_index = i;
1306                 *stripe_offset = logical - raid_map[i];
1307         } else {
1308                 /* The other RAID type */
1309                 *stripe_index = mirror;
1310                 *stripe_offset = 0;
1311         }
1312 }
1313
1314 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1315                                      struct scrub_block *sblocks_for_recheck)
1316 {
1317         struct scrub_ctx *sctx = original_sblock->sctx;
1318         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
1319         u64 length = original_sblock->page_count * PAGE_SIZE;
1320         u64 logical = original_sblock->pagev[0]->logical;
1321         u64 generation = original_sblock->pagev[0]->generation;
1322         u64 flags = original_sblock->pagev[0]->flags;
1323         u64 have_csum = original_sblock->pagev[0]->have_csum;
1324         struct scrub_recover *recover;
1325         struct btrfs_bio *bbio;
1326         u64 sublen;
1327         u64 mapped_length;
1328         u64 stripe_offset;
1329         int stripe_index;
1330         int page_index = 0;
1331         int mirror_index;
1332         int nmirrors;
1333         int ret;
1334
1335         /*
1336          * note: the two members refs and outstanding_pages
1337          * are not used (and not set) in the blocks that are used for
1338          * the recheck procedure
1339          */
1340
1341         while (length > 0) {
1342                 sublen = min_t(u64, length, PAGE_SIZE);
1343                 mapped_length = sublen;
1344                 bbio = NULL;
1345
1346                 /*
1347                  * with a length of PAGE_SIZE, each returned stripe
1348                  * represents one mirror
1349                  */
1350                 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
1351                                        &mapped_length, &bbio, 0, 1);
1352                 if (ret || !bbio || mapped_length < sublen) {
1353                         btrfs_put_bbio(bbio);
1354                         return -EIO;
1355                 }
1356
1357                 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1358                 if (!recover) {
1359                         btrfs_put_bbio(bbio);
1360                         return -ENOMEM;
1361                 }
1362
1363                 atomic_set(&recover->refs, 1);
1364                 recover->bbio = bbio;
1365                 recover->map_length = mapped_length;
1366
1367                 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
1368
1369                 nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
1370
1371                 for (mirror_index = 0; mirror_index < nmirrors;
1372                      mirror_index++) {
1373                         struct scrub_block *sblock;
1374                         struct scrub_page *page;
1375
1376                         sblock = sblocks_for_recheck + mirror_index;
1377                         sblock->sctx = sctx;
1378
1379                         page = kzalloc(sizeof(*page), GFP_NOFS);
1380                         if (!page) {
1381 leave_nomem:
1382                                 spin_lock(&sctx->stat_lock);
1383                                 sctx->stat.malloc_errors++;
1384                                 spin_unlock(&sctx->stat_lock);
1385                                 scrub_put_recover(recover);
1386                                 return -ENOMEM;
1387                         }
1388                         scrub_page_get(page);
1389                         sblock->pagev[page_index] = page;
1390                         page->sblock = sblock;
1391                         page->flags = flags;
1392                         page->generation = generation;
1393                         page->logical = logical;
1394                         page->have_csum = have_csum;
1395                         if (have_csum)
1396                                 memcpy(page->csum,
1397                                        original_sblock->pagev[0]->csum,
1398                                        sctx->csum_size);
1399
1400                         scrub_stripe_index_and_offset(logical,
1401                                                       bbio->map_type,
1402                                                       bbio->raid_map,
1403                                                       mapped_length,
1404                                                       bbio->num_stripes -
1405                                                       bbio->num_tgtdevs,
1406                                                       mirror_index,
1407                                                       &stripe_index,
1408                                                       &stripe_offset);
1409                         page->physical = bbio->stripes[stripe_index].physical +
1410                                          stripe_offset;
1411                         page->dev = bbio->stripes[stripe_index].dev;
1412
1413                         BUG_ON(page_index >= original_sblock->page_count);
1414                         page->physical_for_dev_replace =
1415                                 original_sblock->pagev[page_index]->
1416                                 physical_for_dev_replace;
1417                         /* for missing devices, dev->bdev is NULL */
1418                         page->mirror_num = mirror_index + 1;
1419                         sblock->page_count++;
1420                         page->page = alloc_page(GFP_NOFS);
1421                         if (!page->page)
1422                                 goto leave_nomem;
1423
1424                         scrub_get_recover(recover);
1425                         page->recover = recover;
1426                 }
1427                 scrub_put_recover(recover);
1428                 length -= sublen;
1429                 logical += sublen;
1430                 page_index++;
1431         }
1432
1433         return 0;
1434 }
1435
1436 struct scrub_bio_ret {
1437         struct completion event;
1438         int error;
1439 };
1440
1441 static void scrub_bio_wait_endio(struct bio *bio)
1442 {
1443         struct scrub_bio_ret *ret = bio->bi_private;
1444
1445         ret->error = bio->bi_error;
1446         complete(&ret->event);
1447 }
1448
1449 static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1450 {
1451         return page->recover &&
1452                (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
1453 }
1454
1455 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1456                                         struct bio *bio,
1457                                         struct scrub_page *page)
1458 {
1459         struct scrub_bio_ret done;
1460         int ret;
1461
1462         init_completion(&done.event);
1463         done.error = 0;
1464         bio->bi_iter.bi_sector = page->logical >> 9;
1465         bio->bi_private = &done;
1466         bio->bi_end_io = scrub_bio_wait_endio;
1467
1468         ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
1469                                     page->recover->map_length,
1470                                     page->mirror_num, 0);
1471         if (ret)
1472                 return ret;
1473
1474         wait_for_completion(&done.event);
1475         if (done.error)
1476                 return -EIO;
1477
1478         return 0;
1479 }
1480
1481 /*
1482  * this function will check the on disk data for checksum errors, header
1483  * errors and read I/O errors. If any I/O errors happen, the exact pages
1484  * which are errored are marked as being bad. The goal is to enable scrub
1485  * to take those pages that are not errored from all the mirrors so that
1486  * the pages that are errored in the just handled mirror can be repaired.
1487  */
1488 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1489                                 struct scrub_block *sblock, int is_metadata,
1490                                 int have_csum, u8 *csum, u64 generation,
1491                                 u16 csum_size, int retry_failed_mirror)
1492 {
1493         int page_num;
1494
1495         sblock->no_io_error_seen = 1;
1496         sblock->header_error = 0;
1497         sblock->checksum_error = 0;
1498
1499         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1500                 struct bio *bio;
1501                 struct scrub_page *page = sblock->pagev[page_num];
1502
1503                 if (page->dev->bdev == NULL) {
1504                         page->io_error = 1;
1505                         sblock->no_io_error_seen = 0;
1506                         continue;
1507                 }
1508
1509                 WARN_ON(!page->page);
1510                 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1511                 if (!bio) {
1512                         page->io_error = 1;
1513                         sblock->no_io_error_seen = 0;
1514                         continue;
1515                 }
1516                 bio->bi_bdev = page->dev->bdev;
1517
1518                 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1519                 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1520                         if (scrub_submit_raid56_bio_wait(fs_info, bio, page))
1521                                 sblock->no_io_error_seen = 0;
1522                 } else {
1523                         bio->bi_iter.bi_sector = page->physical >> 9;
1524
1525                         if (btrfsic_submit_bio_wait(READ, bio))
1526                                 sblock->no_io_error_seen = 0;
1527                 }
1528
1529                 bio_put(bio);
1530         }
1531
1532         if (sblock->no_io_error_seen)
1533                 scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1534                                              have_csum, csum, generation,
1535                                              csum_size);
1536
1537         return;
1538 }
1539
1540 static inline int scrub_check_fsid(u8 fsid[],
1541                                    struct scrub_page *spage)
1542 {
1543         struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1544         int ret;
1545
1546         ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
1547         return !ret;
1548 }
1549
1550 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1551                                          struct scrub_block *sblock,
1552                                          int is_metadata, int have_csum,
1553                                          const u8 *csum, u64 generation,
1554                                          u16 csum_size)
1555 {
1556         int page_num;
1557         u8 calculated_csum[BTRFS_CSUM_SIZE];
1558         u32 crc = ~(u32)0;
1559         void *mapped_buffer;
1560
1561         WARN_ON(!sblock->pagev[0]->page);
1562         if (is_metadata) {
1563                 struct btrfs_header *h;
1564
1565                 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
1566                 h = (struct btrfs_header *)mapped_buffer;
1567
1568                 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h) ||
1569                     !scrub_check_fsid(h->fsid, sblock->pagev[0]) ||
1570                     memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1571                            BTRFS_UUID_SIZE)) {
1572                         sblock->header_error = 1;
1573                 } else if (generation != btrfs_stack_header_generation(h)) {
1574                         sblock->header_error = 1;
1575                         sblock->generation_error = 1;
1576                 }
1577                 csum = h->csum;
1578         } else {
1579                 if (!have_csum)
1580                         return;
1581
1582                 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
1583         }
1584
1585         for (page_num = 0;;) {
1586                 if (page_num == 0 && is_metadata)
1587                         crc = btrfs_csum_data(
1588                                 ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1589                                 crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1590                 else
1591                         crc = btrfs_csum_data(mapped_buffer, crc, PAGE_SIZE);
1592
1593                 kunmap_atomic(mapped_buffer);
1594                 page_num++;
1595                 if (page_num >= sblock->page_count)
1596                         break;
1597                 WARN_ON(!sblock->pagev[page_num]->page);
1598
1599                 mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page);
1600         }
1601
1602         btrfs_csum_final(crc, calculated_csum);
1603         if (memcmp(calculated_csum, csum, csum_size))
1604                 sblock->checksum_error = 1;
1605 }
1606
1607 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1608                                              struct scrub_block *sblock_good)
1609 {
1610         int page_num;
1611         int ret = 0;
1612
1613         for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1614                 int ret_sub;
1615
1616                 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1617                                                            sblock_good,
1618                                                            page_num, 1);
1619                 if (ret_sub)
1620                         ret = ret_sub;
1621         }
1622
1623         return ret;
1624 }
1625
1626 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1627                                             struct scrub_block *sblock_good,
1628                                             int page_num, int force_write)
1629 {
1630         struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1631         struct scrub_page *page_good = sblock_good->pagev[page_num];
1632
1633         BUG_ON(page_bad->page == NULL);
1634         BUG_ON(page_good->page == NULL);
1635         if (force_write || sblock_bad->header_error ||
1636             sblock_bad->checksum_error || page_bad->io_error) {
1637                 struct bio *bio;
1638                 int ret;
1639
1640                 if (!page_bad->dev->bdev) {
1641                         btrfs_warn_rl(sblock_bad->sctx->dev_root->fs_info,
1642                                 "scrub_repair_page_from_good_copy(bdev == NULL) "
1643                                 "is unexpected");
1644                         return -EIO;
1645                 }
1646
1647                 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1648                 if (!bio)
1649                         return -EIO;
1650                 bio->bi_bdev = page_bad->dev->bdev;
1651                 bio->bi_iter.bi_sector = page_bad->physical >> 9;
1652
1653                 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1654                 if (PAGE_SIZE != ret) {
1655                         bio_put(bio);
1656                         return -EIO;
1657                 }
1658
1659                 if (btrfsic_submit_bio_wait(WRITE, bio)) {
1660                         btrfs_dev_stat_inc_and_print(page_bad->dev,
1661                                 BTRFS_DEV_STAT_WRITE_ERRS);
1662                         btrfs_dev_replace_stats_inc(
1663                                 &sblock_bad->sctx->dev_root->fs_info->
1664                                 dev_replace.num_write_errors);
1665                         bio_put(bio);
1666                         return -EIO;
1667                 }
1668                 bio_put(bio);
1669         }
1670
1671         return 0;
1672 }
1673
1674 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1675 {
1676         int page_num;
1677
1678         /*
1679          * This block is used for the check of the parity on the source device,
1680          * so the data needn't be written into the destination device.
1681          */
1682         if (sblock->sparity)
1683                 return;
1684
1685         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1686                 int ret;
1687
1688                 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1689                 if (ret)
1690                         btrfs_dev_replace_stats_inc(
1691                                 &sblock->sctx->dev_root->fs_info->dev_replace.
1692                                 num_write_errors);
1693         }
1694 }
1695
1696 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1697                                            int page_num)
1698 {
1699         struct scrub_page *spage = sblock->pagev[page_num];
1700
1701         BUG_ON(spage->page == NULL);
1702         if (spage->io_error) {
1703                 void *mapped_buffer = kmap_atomic(spage->page);
1704
1705                 memset(mapped_buffer, 0, PAGE_CACHE_SIZE);
1706                 flush_dcache_page(spage->page);
1707                 kunmap_atomic(mapped_buffer);
1708         }
1709         return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1710 }
1711
1712 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1713                                     struct scrub_page *spage)
1714 {
1715         struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1716         struct scrub_bio *sbio;
1717         int ret;
1718
1719         mutex_lock(&wr_ctx->wr_lock);
1720 again:
1721         if (!wr_ctx->wr_curr_bio) {
1722                 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1723                                               GFP_NOFS);
1724                 if (!wr_ctx->wr_curr_bio) {
1725                         mutex_unlock(&wr_ctx->wr_lock);
1726                         return -ENOMEM;
1727                 }
1728                 wr_ctx->wr_curr_bio->sctx = sctx;
1729                 wr_ctx->wr_curr_bio->page_count = 0;
1730         }
1731         sbio = wr_ctx->wr_curr_bio;
1732         if (sbio->page_count == 0) {
1733                 struct bio *bio;
1734
1735                 sbio->physical = spage->physical_for_dev_replace;
1736                 sbio->logical = spage->logical;
1737                 sbio->dev = wr_ctx->tgtdev;
1738                 bio = sbio->bio;
1739                 if (!bio) {
1740                         bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
1741                         if (!bio) {
1742                                 mutex_unlock(&wr_ctx->wr_lock);
1743                                 return -ENOMEM;
1744                         }
1745                         sbio->bio = bio;
1746                 }
1747
1748                 bio->bi_private = sbio;
1749                 bio->bi_end_io = scrub_wr_bio_end_io;
1750                 bio->bi_bdev = sbio->dev->bdev;
1751                 bio->bi_iter.bi_sector = sbio->physical >> 9;
1752                 sbio->err = 0;
1753         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1754                    spage->physical_for_dev_replace ||
1755                    sbio->logical + sbio->page_count * PAGE_SIZE !=
1756                    spage->logical) {
1757                 scrub_wr_submit(sctx);
1758                 goto again;
1759         }
1760
1761         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1762         if (ret != PAGE_SIZE) {
1763                 if (sbio->page_count < 1) {
1764                         bio_put(sbio->bio);
1765                         sbio->bio = NULL;
1766                         mutex_unlock(&wr_ctx->wr_lock);
1767                         return -EIO;
1768                 }
1769                 scrub_wr_submit(sctx);
1770                 goto again;
1771         }
1772
1773         sbio->pagev[sbio->page_count] = spage;
1774         scrub_page_get(spage);
1775         sbio->page_count++;
1776         if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1777                 scrub_wr_submit(sctx);
1778         mutex_unlock(&wr_ctx->wr_lock);
1779
1780         return 0;
1781 }
1782
1783 static void scrub_wr_submit(struct scrub_ctx *sctx)
1784 {
1785         struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1786         struct scrub_bio *sbio;
1787
1788         if (!wr_ctx->wr_curr_bio)
1789                 return;
1790
1791         sbio = wr_ctx->wr_curr_bio;
1792         wr_ctx->wr_curr_bio = NULL;
1793         WARN_ON(!sbio->bio->bi_bdev);
1794         scrub_pending_bio_inc(sctx);
1795         /* process all writes in a single worker thread. Then the block layer
1796          * orders the requests before sending them to the driver which
1797          * doubled the write performance on spinning disks when measured
1798          * with Linux 3.5 */
1799         btrfsic_submit_bio(WRITE, sbio->bio);
1800 }
1801
1802 static void scrub_wr_bio_end_io(struct bio *bio)
1803 {
1804         struct scrub_bio *sbio = bio->bi_private;
1805         struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
1806
1807         sbio->err = bio->bi_error;
1808         sbio->bio = bio;
1809
1810         btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1811                          scrub_wr_bio_end_io_worker, NULL, NULL);
1812         btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1813 }
1814
1815 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1816 {
1817         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1818         struct scrub_ctx *sctx = sbio->sctx;
1819         int i;
1820
1821         WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1822         if (sbio->err) {
1823                 struct btrfs_dev_replace *dev_replace =
1824                         &sbio->sctx->dev_root->fs_info->dev_replace;
1825
1826                 for (i = 0; i < sbio->page_count; i++) {
1827                         struct scrub_page *spage = sbio->pagev[i];
1828
1829                         spage->io_error = 1;
1830                         btrfs_dev_replace_stats_inc(&dev_replace->
1831                                                     num_write_errors);
1832                 }
1833         }
1834
1835         for (i = 0; i < sbio->page_count; i++)
1836                 scrub_page_put(sbio->pagev[i]);
1837
1838         bio_put(sbio->bio);
1839         kfree(sbio);
1840         scrub_pending_bio_dec(sctx);
1841 }
1842
1843 static int scrub_checksum(struct scrub_block *sblock)
1844 {
1845         u64 flags;
1846         int ret;
1847
1848         WARN_ON(sblock->page_count < 1);
1849         flags = sblock->pagev[0]->flags;
1850         ret = 0;
1851         if (flags & BTRFS_EXTENT_FLAG_DATA)
1852                 ret = scrub_checksum_data(sblock);
1853         else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1854                 ret = scrub_checksum_tree_block(sblock);
1855         else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1856                 (void)scrub_checksum_super(sblock);
1857         else
1858                 WARN_ON(1);
1859         if (ret)
1860                 scrub_handle_errored_block(sblock);
1861
1862         return ret;
1863 }
1864
1865 static int scrub_checksum_data(struct scrub_block *sblock)
1866 {
1867         struct scrub_ctx *sctx = sblock->sctx;
1868         u8 csum[BTRFS_CSUM_SIZE];
1869         u8 *on_disk_csum;
1870         struct page *page;
1871         void *buffer;
1872         u32 crc = ~(u32)0;
1873         int fail = 0;
1874         u64 len;
1875         int index;
1876
1877         BUG_ON(sblock->page_count < 1);
1878         if (!sblock->pagev[0]->have_csum)
1879                 return 0;
1880
1881         on_disk_csum = sblock->pagev[0]->csum;
1882         page = sblock->pagev[0]->page;
1883         buffer = kmap_atomic(page);
1884
1885         len = sctx->sectorsize;
1886         index = 0;
1887         for (;;) {
1888                 u64 l = min_t(u64, len, PAGE_SIZE);
1889
1890                 crc = btrfs_csum_data(buffer, crc, l);
1891                 kunmap_atomic(buffer);
1892                 len -= l;
1893                 if (len == 0)
1894                         break;
1895                 index++;
1896                 BUG_ON(index >= sblock->page_count);
1897                 BUG_ON(!sblock->pagev[index]->page);
1898                 page = sblock->pagev[index]->page;
1899                 buffer = kmap_atomic(page);
1900         }
1901
1902         btrfs_csum_final(crc, csum);
1903         if (memcmp(csum, on_disk_csum, sctx->csum_size))
1904                 fail = 1;
1905
1906         return fail;
1907 }
1908
1909 static int scrub_checksum_tree_block(struct scrub_block *sblock)
1910 {
1911         struct scrub_ctx *sctx = sblock->sctx;
1912         struct btrfs_header *h;
1913         struct btrfs_root *root = sctx->dev_root;
1914         struct btrfs_fs_info *fs_info = root->fs_info;
1915         u8 calculated_csum[BTRFS_CSUM_SIZE];
1916         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1917         struct page *page;
1918         void *mapped_buffer;
1919         u64 mapped_size;
1920         void *p;
1921         u32 crc = ~(u32)0;
1922         int fail = 0;
1923         int crc_fail = 0;
1924         u64 len;
1925         int index;
1926
1927         BUG_ON(sblock->page_count < 1);
1928         page = sblock->pagev[0]->page;
1929         mapped_buffer = kmap_atomic(page);
1930         h = (struct btrfs_header *)mapped_buffer;
1931         memcpy(on_disk_csum, h->csum, sctx->csum_size);
1932
1933         /*
1934          * we don't use the getter functions here, as we
1935          * a) don't have an extent buffer and
1936          * b) the page is already kmapped
1937          */
1938
1939         if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
1940                 ++fail;
1941
1942         if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h))
1943                 ++fail;
1944
1945         if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
1946                 ++fail;
1947
1948         if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1949                    BTRFS_UUID_SIZE))
1950                 ++fail;
1951
1952         len = sctx->nodesize - BTRFS_CSUM_SIZE;
1953         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1954         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1955         index = 0;
1956         for (;;) {
1957                 u64 l = min_t(u64, len, mapped_size);
1958
1959                 crc = btrfs_csum_data(p, crc, l);
1960                 kunmap_atomic(mapped_buffer);
1961                 len -= l;
1962                 if (len == 0)
1963                         break;
1964                 index++;
1965                 BUG_ON(index >= sblock->page_count);
1966                 BUG_ON(!sblock->pagev[index]->page);
1967                 page = sblock->pagev[index]->page;
1968                 mapped_buffer = kmap_atomic(page);
1969                 mapped_size = PAGE_SIZE;
1970                 p = mapped_buffer;
1971         }
1972
1973         btrfs_csum_final(crc, calculated_csum);
1974         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1975                 ++crc_fail;
1976
1977         return fail || crc_fail;
1978 }
1979
1980 static int scrub_checksum_super(struct scrub_block *sblock)
1981 {
1982         struct btrfs_super_block *s;
1983         struct scrub_ctx *sctx = sblock->sctx;
1984         u8 calculated_csum[BTRFS_CSUM_SIZE];
1985         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1986         struct page *page;
1987         void *mapped_buffer;
1988         u64 mapped_size;
1989         void *p;
1990         u32 crc = ~(u32)0;
1991         int fail_gen = 0;
1992         int fail_cor = 0;
1993         u64 len;
1994         int index;
1995
1996         BUG_ON(sblock->page_count < 1);
1997         page = sblock->pagev[0]->page;
1998         mapped_buffer = kmap_atomic(page);
1999         s = (struct btrfs_super_block *)mapped_buffer;
2000         memcpy(on_disk_csum, s->csum, sctx->csum_size);
2001
2002         if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
2003                 ++fail_cor;
2004
2005         if (sblock->pagev[0]->generation != btrfs_super_generation(s))
2006                 ++fail_gen;
2007
2008         if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
2009                 ++fail_cor;
2010
2011         len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
2012         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
2013         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
2014         index = 0;
2015         for (;;) {
2016                 u64 l = min_t(u64, len, mapped_size);
2017
2018                 crc = btrfs_csum_data(p, crc, l);
2019                 kunmap_atomic(mapped_buffer);
2020                 len -= l;
2021                 if (len == 0)
2022                         break;
2023                 index++;
2024                 BUG_ON(index >= sblock->page_count);
2025                 BUG_ON(!sblock->pagev[index]->page);
2026                 page = sblock->pagev[index]->page;
2027                 mapped_buffer = kmap_atomic(page);
2028                 mapped_size = PAGE_SIZE;
2029                 p = mapped_buffer;
2030         }
2031
2032         btrfs_csum_final(crc, calculated_csum);
2033         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
2034                 ++fail_cor;
2035
2036         if (fail_cor + fail_gen) {
2037                 /*
2038                  * if we find an error in a super block, we just report it.
2039                  * They will get written with the next transaction commit
2040                  * anyway
2041                  */
2042                 spin_lock(&sctx->stat_lock);
2043                 ++sctx->stat.super_errors;
2044                 spin_unlock(&sctx->stat_lock);
2045                 if (fail_cor)
2046                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
2047                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2048                 else
2049                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
2050                                 BTRFS_DEV_STAT_GENERATION_ERRS);
2051         }
2052
2053         return fail_cor + fail_gen;
2054 }
2055
2056 static void scrub_block_get(struct scrub_block *sblock)
2057 {
2058         atomic_inc(&sblock->refs);
2059 }
2060
2061 static void scrub_block_put(struct scrub_block *sblock)
2062 {
2063         if (atomic_dec_and_test(&sblock->refs)) {
2064                 int i;
2065
2066                 if (sblock->sparity)
2067                         scrub_parity_put(sblock->sparity);
2068
2069                 for (i = 0; i < sblock->page_count; i++)
2070                         scrub_page_put(sblock->pagev[i]);
2071                 kfree(sblock);
2072         }
2073 }
2074
2075 static void scrub_page_get(struct scrub_page *spage)
2076 {
2077         atomic_inc(&spage->refs);
2078 }
2079
2080 static void scrub_page_put(struct scrub_page *spage)
2081 {
2082         if (atomic_dec_and_test(&spage->refs)) {
2083                 if (spage->page)
2084                         __free_page(spage->page);
2085                 kfree(spage);
2086         }
2087 }
2088
2089 static void scrub_submit(struct scrub_ctx *sctx)
2090 {
2091         struct scrub_bio *sbio;
2092
2093         if (sctx->curr == -1)
2094                 return;
2095
2096         sbio = sctx->bios[sctx->curr];
2097         sctx->curr = -1;
2098         scrub_pending_bio_inc(sctx);
2099         btrfsic_submit_bio(READ, sbio->bio);
2100 }
2101
2102 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2103                                     struct scrub_page *spage)
2104 {
2105         struct scrub_block *sblock = spage->sblock;
2106         struct scrub_bio *sbio;
2107         int ret;
2108
2109 again:
2110         /*
2111          * grab a fresh bio or wait for one to become available
2112          */
2113         while (sctx->curr == -1) {
2114                 spin_lock(&sctx->list_lock);
2115                 sctx->curr = sctx->first_free;
2116                 if (sctx->curr != -1) {
2117                         sctx->first_free = sctx->bios[sctx->curr]->next_free;
2118                         sctx->bios[sctx->curr]->next_free = -1;
2119                         sctx->bios[sctx->curr]->page_count = 0;
2120                         spin_unlock(&sctx->list_lock);
2121                 } else {
2122                         spin_unlock(&sctx->list_lock);
2123                         wait_event(sctx->list_wait, sctx->first_free != -1);
2124                 }
2125         }
2126         sbio = sctx->bios[sctx->curr];
2127         if (sbio->page_count == 0) {
2128                 struct bio *bio;
2129
2130                 sbio->physical = spage->physical;
2131                 sbio->logical = spage->logical;
2132                 sbio->dev = spage->dev;
2133                 bio = sbio->bio;
2134                 if (!bio) {
2135                         bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
2136                         if (!bio)
2137                                 return -ENOMEM;
2138                         sbio->bio = bio;
2139                 }
2140
2141                 bio->bi_private = sbio;
2142                 bio->bi_end_io = scrub_bio_end_io;
2143                 bio->bi_bdev = sbio->dev->bdev;
2144                 bio->bi_iter.bi_sector = sbio->physical >> 9;
2145                 sbio->err = 0;
2146         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2147                    spage->physical ||
2148                    sbio->logical + sbio->page_count * PAGE_SIZE !=
2149                    spage->logical ||
2150                    sbio->dev != spage->dev) {
2151                 scrub_submit(sctx);
2152                 goto again;
2153         }
2154
2155         sbio->pagev[sbio->page_count] = spage;
2156         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2157         if (ret != PAGE_SIZE) {
2158                 if (sbio->page_count < 1) {
2159                         bio_put(sbio->bio);
2160                         sbio->bio = NULL;
2161                         return -EIO;
2162                 }
2163                 scrub_submit(sctx);
2164                 goto again;
2165         }
2166
2167         scrub_block_get(sblock); /* one for the page added to the bio */
2168         atomic_inc(&sblock->outstanding_pages);
2169         sbio->page_count++;
2170         if (sbio->page_count == sctx->pages_per_rd_bio)
2171                 scrub_submit(sctx);
2172
2173         return 0;
2174 }
2175
2176 static void scrub_missing_raid56_end_io(struct bio *bio)
2177 {
2178         struct scrub_block *sblock = bio->bi_private;
2179         struct btrfs_fs_info *fs_info = sblock->sctx->dev_root->fs_info;
2180
2181         if (bio->bi_error)
2182                 sblock->no_io_error_seen = 0;
2183
2184         btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
2185 }
2186
2187 static void scrub_missing_raid56_worker(struct btrfs_work *work)
2188 {
2189         struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2190         struct scrub_ctx *sctx = sblock->sctx;
2191         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2192         unsigned int is_metadata;
2193         unsigned int have_csum;
2194         u8 *csum;
2195         u64 generation;
2196         u64 logical;
2197         struct btrfs_device *dev;
2198
2199         is_metadata = !(sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA);
2200         have_csum = sblock->pagev[0]->have_csum;
2201         csum = sblock->pagev[0]->csum;
2202         generation = sblock->pagev[0]->generation;
2203         logical = sblock->pagev[0]->logical;
2204         dev = sblock->pagev[0]->dev;
2205
2206         if (sblock->no_io_error_seen) {
2207                 scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
2208                                              have_csum, csum, generation,
2209                                              sctx->csum_size);
2210         }
2211
2212         if (!sblock->no_io_error_seen) {
2213                 spin_lock(&sctx->stat_lock);
2214                 sctx->stat.read_errors++;
2215                 spin_unlock(&sctx->stat_lock);
2216                 btrfs_err_rl_in_rcu(fs_info,
2217                         "IO error rebuilding logical %llu for dev %s",
2218                         logical, rcu_str_deref(dev->name));
2219         } else if (sblock->header_error || sblock->checksum_error) {
2220                 spin_lock(&sctx->stat_lock);
2221                 sctx->stat.uncorrectable_errors++;
2222                 spin_unlock(&sctx->stat_lock);
2223                 btrfs_err_rl_in_rcu(fs_info,
2224                         "failed to rebuild valid logical %llu for dev %s",
2225                         logical, rcu_str_deref(dev->name));
2226         } else {
2227                 scrub_write_block_to_dev_replace(sblock);
2228         }
2229
2230         scrub_block_put(sblock);
2231
2232         if (sctx->is_dev_replace &&
2233             atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2234                 mutex_lock(&sctx->wr_ctx.wr_lock);
2235                 scrub_wr_submit(sctx);
2236                 mutex_unlock(&sctx->wr_ctx.wr_lock);
2237         }
2238
2239         scrub_pending_bio_dec(sctx);
2240 }
2241
2242 static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2243 {
2244         struct scrub_ctx *sctx = sblock->sctx;
2245         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2246         u64 length = sblock->page_count * PAGE_SIZE;
2247         u64 logical = sblock->pagev[0]->logical;
2248         struct btrfs_bio *bbio;
2249         struct bio *bio;
2250         struct btrfs_raid_bio *rbio;
2251         int ret;
2252         int i;
2253
2254         ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
2255                                &bbio, 0, 1);
2256         if (ret || !bbio || !bbio->raid_map)
2257                 goto bbio_out;
2258
2259         if (WARN_ON(!sctx->is_dev_replace ||
2260                     !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2261                 /*
2262                  * We shouldn't be scrubbing a missing device. Even for dev
2263                  * replace, we should only get here for RAID 5/6. We either
2264                  * managed to mount something with no mirrors remaining or
2265                  * there's a bug in scrub_remap_extent()/btrfs_map_block().
2266                  */
2267                 goto bbio_out;
2268         }
2269
2270         bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2271         if (!bio)
2272                 goto bbio_out;
2273
2274         bio->bi_iter.bi_sector = logical >> 9;
2275         bio->bi_private = sblock;
2276         bio->bi_end_io = scrub_missing_raid56_end_io;
2277
2278         rbio = raid56_alloc_missing_rbio(sctx->dev_root, bio, bbio, length);
2279         if (!rbio)
2280                 goto rbio_out;
2281
2282         for (i = 0; i < sblock->page_count; i++) {
2283                 struct scrub_page *spage = sblock->pagev[i];
2284
2285                 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2286         }
2287
2288         btrfs_init_work(&sblock->work, btrfs_scrub_helper,
2289                         scrub_missing_raid56_worker, NULL, NULL);
2290         scrub_block_get(sblock);
2291         scrub_pending_bio_inc(sctx);
2292         raid56_submit_missing_rbio(rbio);
2293         return;
2294
2295 rbio_out:
2296         bio_put(bio);
2297 bbio_out:
2298         btrfs_put_bbio(bbio);
2299         spin_lock(&sctx->stat_lock);
2300         sctx->stat.malloc_errors++;
2301         spin_unlock(&sctx->stat_lock);
2302 }
2303
2304 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2305                        u64 physical, struct btrfs_device *dev, u64 flags,
2306                        u64 gen, int mirror_num, u8 *csum, int force,
2307                        u64 physical_for_dev_replace)
2308 {
2309         struct scrub_block *sblock;
2310         int index;
2311
2312         sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2313         if (!sblock) {
2314                 spin_lock(&sctx->stat_lock);
2315                 sctx->stat.malloc_errors++;
2316                 spin_unlock(&sctx->stat_lock);
2317                 return -ENOMEM;
2318         }
2319
2320         /* one ref inside this function, plus one for each page added to
2321          * a bio later on */
2322         atomic_set(&sblock->refs, 1);
2323         sblock->sctx = sctx;
2324         sblock->no_io_error_seen = 1;
2325
2326         for (index = 0; len > 0; index++) {
2327                 struct scrub_page *spage;
2328                 u64 l = min_t(u64, len, PAGE_SIZE);
2329
2330                 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2331                 if (!spage) {
2332 leave_nomem:
2333                         spin_lock(&sctx->stat_lock);
2334                         sctx->stat.malloc_errors++;
2335                         spin_unlock(&sctx->stat_lock);
2336                         scrub_block_put(sblock);
2337                         return -ENOMEM;
2338                 }
2339                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2340                 scrub_page_get(spage);
2341                 sblock->pagev[index] = spage;
2342                 spage->sblock = sblock;
2343                 spage->dev = dev;
2344                 spage->flags = flags;
2345                 spage->generation = gen;
2346                 spage->logical = logical;
2347                 spage->physical = physical;
2348                 spage->physical_for_dev_replace = physical_for_dev_replace;
2349                 spage->mirror_num = mirror_num;
2350                 if (csum) {
2351                         spage->have_csum = 1;
2352                         memcpy(spage->csum, csum, sctx->csum_size);
2353                 } else {
2354                         spage->have_csum = 0;
2355                 }
2356                 sblock->page_count++;
2357                 spage->page = alloc_page(GFP_NOFS);
2358                 if (!spage->page)
2359                         goto leave_nomem;
2360                 len -= l;
2361                 logical += l;
2362                 physical += l;
2363                 physical_for_dev_replace += l;
2364         }
2365
2366         WARN_ON(sblock->page_count == 0);
2367         if (dev->missing) {
2368                 /*
2369                  * This case should only be hit for RAID 5/6 device replace. See
2370                  * the comment in scrub_missing_raid56_pages() for details.
2371                  */
2372                 scrub_missing_raid56_pages(sblock);
2373         } else {
2374                 for (index = 0; index < sblock->page_count; index++) {
2375                         struct scrub_page *spage = sblock->pagev[index];
2376                         int ret;
2377
2378                         ret = scrub_add_page_to_rd_bio(sctx, spage);
2379                         if (ret) {
2380                                 scrub_block_put(sblock);
2381                                 return ret;
2382                         }
2383                 }
2384
2385                 if (force)
2386                         scrub_submit(sctx);
2387         }
2388
2389         /* last one frees, either here or in bio completion for last page */
2390         scrub_block_put(sblock);
2391         return 0;
2392 }
2393
2394 static void scrub_bio_end_io(struct bio *bio)
2395 {
2396         struct scrub_bio *sbio = bio->bi_private;
2397         struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
2398
2399         sbio->err = bio->bi_error;
2400         sbio->bio = bio;
2401
2402         btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2403 }
2404
2405 static void scrub_bio_end_io_worker(struct btrfs_work *work)
2406 {
2407         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2408         struct scrub_ctx *sctx = sbio->sctx;
2409         int i;
2410
2411         BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2412         if (sbio->err) {
2413                 for (i = 0; i < sbio->page_count; i++) {
2414                         struct scrub_page *spage = sbio->pagev[i];
2415
2416                         spage->io_error = 1;
2417                         spage->sblock->no_io_error_seen = 0;
2418                 }
2419         }
2420
2421         /* now complete the scrub_block items that have all pages completed */
2422         for (i = 0; i < sbio->page_count; i++) {
2423                 struct scrub_page *spage = sbio->pagev[i];
2424                 struct scrub_block *sblock = spage->sblock;
2425
2426                 if (atomic_dec_and_test(&sblock->outstanding_pages))
2427                         scrub_block_complete(sblock);
2428                 scrub_block_put(sblock);
2429         }
2430
2431         bio_put(sbio->bio);
2432         sbio->bio = NULL;
2433         spin_lock(&sctx->list_lock);
2434         sbio->next_free = sctx->first_free;
2435         sctx->first_free = sbio->index;
2436         spin_unlock(&sctx->list_lock);
2437
2438         if (sctx->is_dev_replace &&
2439             atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2440                 mutex_lock(&sctx->wr_ctx.wr_lock);
2441                 scrub_wr_submit(sctx);
2442                 mutex_unlock(&sctx->wr_ctx.wr_lock);
2443         }
2444
2445         scrub_pending_bio_dec(sctx);
2446 }
2447
2448 static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2449                                        unsigned long *bitmap,
2450                                        u64 start, u64 len)
2451 {
2452         u32 offset;
2453         int nsectors;
2454         int sectorsize = sparity->sctx->dev_root->sectorsize;
2455
2456         if (len >= sparity->stripe_len) {
2457                 bitmap_set(bitmap, 0, sparity->nsectors);
2458                 return;
2459         }
2460
2461         start -= sparity->logic_start;
2462         start = div_u64_rem(start, sparity->stripe_len, &offset);
2463         offset /= sectorsize;
2464         nsectors = (int)len / sectorsize;
2465
2466         if (offset + nsectors <= sparity->nsectors) {
2467                 bitmap_set(bitmap, offset, nsectors);
2468                 return;
2469         }
2470
2471         bitmap_set(bitmap, offset, sparity->nsectors - offset);
2472         bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2473 }
2474
2475 static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2476                                                    u64 start, u64 len)
2477 {
2478         __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2479 }
2480
2481 static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2482                                                   u64 start, u64 len)
2483 {
2484         __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2485 }
2486
2487 static void scrub_block_complete(struct scrub_block *sblock)
2488 {
2489         int corrupted = 0;
2490
2491         if (!sblock->no_io_error_seen) {
2492                 corrupted = 1;
2493                 scrub_handle_errored_block(sblock);
2494         } else {
2495                 /*
2496                  * if has checksum error, write via repair mechanism in
2497                  * dev replace case, otherwise write here in dev replace
2498                  * case.
2499                  */
2500                 corrupted = scrub_checksum(sblock);
2501                 if (!corrupted && sblock->sctx->is_dev_replace)
2502                         scrub_write_block_to_dev_replace(sblock);
2503         }
2504
2505         if (sblock->sparity && corrupted && !sblock->data_corrected) {
2506                 u64 start = sblock->pagev[0]->logical;
2507                 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2508                           PAGE_SIZE;
2509
2510                 scrub_parity_mark_sectors_error(sblock->sparity,
2511                                                 start, end - start);
2512         }
2513 }
2514
2515 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
2516                            u8 *csum)
2517 {
2518         struct btrfs_ordered_sum *sum = NULL;
2519         unsigned long index;
2520         unsigned long num_sectors;
2521
2522         while (!list_empty(&sctx->csum_list)) {
2523                 sum = list_first_entry(&sctx->csum_list,
2524                                        struct btrfs_ordered_sum, list);
2525                 if (sum->bytenr > logical)
2526                         return 0;
2527                 if (sum->bytenr + sum->len > logical)
2528                         break;
2529
2530                 ++sctx->stat.csum_discards;
2531                 list_del(&sum->list);
2532                 kfree(sum);
2533                 sum = NULL;
2534         }
2535         if (!sum)
2536                 return 0;
2537
2538         index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
2539         num_sectors = sum->len / sctx->sectorsize;
2540         memcpy(csum, sum->sums + index, sctx->csum_size);
2541         if (index == num_sectors - 1) {
2542                 list_del(&sum->list);
2543                 kfree(sum);
2544         }
2545         return 1;
2546 }
2547
2548 /* scrub extent tries to collect up to 64 kB for each bio */
2549 static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
2550                         u64 physical, struct btrfs_device *dev, u64 flags,
2551                         u64 gen, int mirror_num, u64 physical_for_dev_replace)
2552 {
2553         int ret;
2554         u8 csum[BTRFS_CSUM_SIZE];
2555         u32 blocksize;
2556
2557         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2558                 blocksize = sctx->sectorsize;
2559                 spin_lock(&sctx->stat_lock);
2560                 sctx->stat.data_extents_scrubbed++;
2561                 sctx->stat.data_bytes_scrubbed += len;
2562                 spin_unlock(&sctx->stat_lock);
2563         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2564                 blocksize = sctx->nodesize;
2565                 spin_lock(&sctx->stat_lock);
2566                 sctx->stat.tree_extents_scrubbed++;
2567                 sctx->stat.tree_bytes_scrubbed += len;
2568                 spin_unlock(&sctx->stat_lock);
2569         } else {
2570                 blocksize = sctx->sectorsize;
2571                 WARN_ON(1);
2572         }
2573
2574         while (len) {
2575                 u64 l = min_t(u64, len, blocksize);
2576                 int have_csum = 0;
2577
2578                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2579                         /* push csums to sbio */
2580                         have_csum = scrub_find_csum(sctx, logical, l, csum);
2581                         if (have_csum == 0)
2582                                 ++sctx->stat.no_csum;
2583                         if (sctx->is_dev_replace && !have_csum) {
2584                                 ret = copy_nocow_pages(sctx, logical, l,
2585                                                        mirror_num,
2586                                                       physical_for_dev_replace);
2587                                 goto behind_scrub_pages;
2588                         }
2589                 }
2590                 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2591                                   mirror_num, have_csum ? csum : NULL, 0,
2592                                   physical_for_dev_replace);
2593 behind_scrub_pages:
2594                 if (ret)
2595                         return ret;
2596                 len -= l;
2597                 logical += l;
2598                 physical += l;
2599                 physical_for_dev_replace += l;
2600         }
2601         return 0;
2602 }
2603
2604 static int scrub_pages_for_parity(struct scrub_parity *sparity,
2605                                   u64 logical, u64 len,
2606                                   u64 physical, struct btrfs_device *dev,
2607                                   u64 flags, u64 gen, int mirror_num, u8 *csum)
2608 {
2609         struct scrub_ctx *sctx = sparity->sctx;
2610         struct scrub_block *sblock;
2611         int index;
2612
2613         sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2614         if (!sblock) {
2615                 spin_lock(&sctx->stat_lock);
2616                 sctx->stat.malloc_errors++;
2617                 spin_unlock(&sctx->stat_lock);
2618                 return -ENOMEM;
2619         }
2620
2621         /* one ref inside this function, plus one for each page added to
2622          * a bio later on */
2623         atomic_set(&sblock->refs, 1);
2624         sblock->sctx = sctx;
2625         sblock->no_io_error_seen = 1;
2626         sblock->sparity = sparity;
2627         scrub_parity_get(sparity);
2628
2629         for (index = 0; len > 0; index++) {
2630                 struct scrub_page *spage;
2631                 u64 l = min_t(u64, len, PAGE_SIZE);
2632
2633                 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2634                 if (!spage) {
2635 leave_nomem:
2636                         spin_lock(&sctx->stat_lock);
2637                         sctx->stat.malloc_errors++;
2638                         spin_unlock(&sctx->stat_lock);
2639                         scrub_block_put(sblock);
2640                         return -ENOMEM;
2641                 }
2642                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2643                 /* For scrub block */
2644                 scrub_page_get(spage);
2645                 sblock->pagev[index] = spage;
2646                 /* For scrub parity */
2647                 scrub_page_get(spage);
2648                 list_add_tail(&spage->list, &sparity->spages);
2649                 spage->sblock = sblock;
2650                 spage->dev = dev;
2651                 spage->flags = flags;
2652                 spage->generation = gen;
2653                 spage->logical = logical;
2654                 spage->physical = physical;
2655                 spage->mirror_num = mirror_num;
2656                 if (csum) {
2657                         spage->have_csum = 1;
2658                         memcpy(spage->csum, csum, sctx->csum_size);
2659                 } else {
2660                         spage->have_csum = 0;
2661                 }
2662                 sblock->page_count++;
2663                 spage->page = alloc_page(GFP_NOFS);
2664                 if (!spage->page)
2665                         goto leave_nomem;
2666                 len -= l;
2667                 logical += l;
2668                 physical += l;
2669         }
2670
2671         WARN_ON(sblock->page_count == 0);
2672         for (index = 0; index < sblock->page_count; index++) {
2673                 struct scrub_page *spage = sblock->pagev[index];
2674                 int ret;
2675
2676                 ret = scrub_add_page_to_rd_bio(sctx, spage);
2677                 if (ret) {
2678                         scrub_block_put(sblock);
2679                         return ret;
2680                 }
2681         }
2682
2683         /* last one frees, either here or in bio completion for last page */
2684         scrub_block_put(sblock);
2685         return 0;
2686 }
2687
2688 static int scrub_extent_for_parity(struct scrub_parity *sparity,
2689                                    u64 logical, u64 len,
2690                                    u64 physical, struct btrfs_device *dev,
2691                                    u64 flags, u64 gen, int mirror_num)
2692 {
2693         struct scrub_ctx *sctx = sparity->sctx;
2694         int ret;
2695         u8 csum[BTRFS_CSUM_SIZE];
2696         u32 blocksize;
2697
2698         if (dev->missing) {
2699                 scrub_parity_mark_sectors_error(sparity, logical, len);
2700                 return 0;
2701         }
2702
2703         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2704                 blocksize = sctx->sectorsize;
2705         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2706                 blocksize = sctx->nodesize;
2707         } else {
2708                 blocksize = sctx->sectorsize;
2709                 WARN_ON(1);
2710         }
2711
2712         while (len) {
2713                 u64 l = min_t(u64, len, blocksize);
2714                 int have_csum = 0;
2715
2716                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2717                         /* push csums to sbio */
2718                         have_csum = scrub_find_csum(sctx, logical, l, csum);
2719                         if (have_csum == 0)
2720                                 goto skip;
2721                 }
2722                 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2723                                              flags, gen, mirror_num,
2724                                              have_csum ? csum : NULL);
2725                 if (ret)
2726                         return ret;
2727 skip:
2728                 len -= l;
2729                 logical += l;
2730                 physical += l;
2731         }
2732         return 0;
2733 }
2734
2735 /*
2736  * Given a physical address, this will calculate it's
2737  * logical offset. if this is a parity stripe, it will return
2738  * the most left data stripe's logical offset.
2739  *
2740  * return 0 if it is a data stripe, 1 means parity stripe.
2741  */
2742 static int get_raid56_logic_offset(u64 physical, int num,
2743                                    struct map_lookup *map, u64 *offset,
2744                                    u64 *stripe_start)
2745 {
2746         int i;
2747         int j = 0;
2748         u64 stripe_nr;
2749         u64 last_offset;
2750         u32 stripe_index;
2751         u32 rot;
2752
2753         last_offset = (physical - map->stripes[num].physical) *
2754                       nr_data_stripes(map);
2755         if (stripe_start)
2756                 *stripe_start = last_offset;
2757
2758         *offset = last_offset;
2759         for (i = 0; i < nr_data_stripes(map); i++) {
2760                 *offset = last_offset + i * map->stripe_len;
2761
2762                 stripe_nr = div_u64(*offset, map->stripe_len);
2763                 stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
2764
2765                 /* Work out the disk rotation on this stripe-set */
2766                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2767                 /* calculate which stripe this data locates */
2768                 rot += i;
2769                 stripe_index = rot % map->num_stripes;
2770                 if (stripe_index == num)
2771                         return 0;
2772                 if (stripe_index < num)
2773                         j++;
2774         }
2775         *offset = last_offset + j * map->stripe_len;
2776         return 1;
2777 }
2778
2779 static void scrub_free_parity(struct scrub_parity *sparity)
2780 {
2781         struct scrub_ctx *sctx = sparity->sctx;
2782         struct scrub_page *curr, *next;
2783         int nbits;
2784
2785         nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2786         if (nbits) {
2787                 spin_lock(&sctx->stat_lock);
2788                 sctx->stat.read_errors += nbits;
2789                 sctx->stat.uncorrectable_errors += nbits;
2790                 spin_unlock(&sctx->stat_lock);
2791         }
2792
2793         list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2794                 list_del_init(&curr->list);
2795                 scrub_page_put(curr);
2796         }
2797
2798         kfree(sparity);
2799 }
2800
2801 static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2802 {
2803         struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2804                                                     work);
2805         struct scrub_ctx *sctx = sparity->sctx;
2806
2807         scrub_free_parity(sparity);
2808         scrub_pending_bio_dec(sctx);
2809 }
2810
2811 static void scrub_parity_bio_endio(struct bio *bio)
2812 {
2813         struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2814
2815         if (bio->bi_error)
2816                 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2817                           sparity->nsectors);
2818
2819         bio_put(bio);
2820
2821         btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
2822                         scrub_parity_bio_endio_worker, NULL, NULL);
2823         btrfs_queue_work(sparity->sctx->dev_root->fs_info->scrub_parity_workers,
2824                          &sparity->work);
2825 }
2826
2827 static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2828 {
2829         struct scrub_ctx *sctx = sparity->sctx;
2830         struct bio *bio;
2831         struct btrfs_raid_bio *rbio;
2832         struct scrub_page *spage;
2833         struct btrfs_bio *bbio = NULL;
2834         u64 length;
2835         int ret;
2836
2837         if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2838                            sparity->nsectors))
2839                 goto out;
2840
2841         length = sparity->logic_end - sparity->logic_start;
2842         ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
2843                                sparity->logic_start,
2844                                &length, &bbio, 0, 1);
2845         if (ret || !bbio || !bbio->raid_map)
2846                 goto bbio_out;
2847
2848         bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2849         if (!bio)
2850                 goto bbio_out;
2851
2852         bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2853         bio->bi_private = sparity;
2854         bio->bi_end_io = scrub_parity_bio_endio;
2855
2856         rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
2857                                               length, sparity->scrub_dev,
2858                                               sparity->dbitmap,
2859                                               sparity->nsectors);
2860         if (!rbio)
2861                 goto rbio_out;
2862
2863         list_for_each_entry(spage, &sparity->spages, list)
2864                 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2865
2866         scrub_pending_bio_inc(sctx);
2867         raid56_parity_submit_scrub_rbio(rbio);
2868         return;
2869
2870 rbio_out:
2871         bio_put(bio);
2872 bbio_out:
2873         btrfs_put_bbio(bbio);
2874         bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2875                   sparity->nsectors);
2876         spin_lock(&sctx->stat_lock);
2877         sctx->stat.malloc_errors++;
2878         spin_unlock(&sctx->stat_lock);
2879 out:
2880         scrub_free_parity(sparity);
2881 }
2882
2883 static inline int scrub_calc_parity_bitmap_len(int nsectors)
2884 {
2885         return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * (BITS_PER_LONG / 8);
2886 }
2887
2888 static void scrub_parity_get(struct scrub_parity *sparity)
2889 {
2890         atomic_inc(&sparity->refs);
2891 }
2892
2893 static void scrub_parity_put(struct scrub_parity *sparity)
2894 {
2895         if (!atomic_dec_and_test(&sparity->refs))
2896                 return;
2897
2898         scrub_parity_check_and_repair(sparity);
2899 }
2900
2901 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2902                                                   struct map_lookup *map,
2903                                                   struct btrfs_device *sdev,
2904                                                   struct btrfs_path *path,
2905                                                   u64 logic_start,
2906                                                   u64 logic_end)
2907 {
2908         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2909         struct btrfs_root *root = fs_info->extent_root;
2910         struct btrfs_root *csum_root = fs_info->csum_root;
2911         struct btrfs_extent_item *extent;
2912         struct btrfs_bio *bbio = NULL;
2913         u64 flags;
2914         int ret;
2915         int slot;
2916         struct extent_buffer *l;
2917         struct btrfs_key key;
2918         u64 generation;
2919         u64 extent_logical;
2920         u64 extent_physical;
2921         u64 extent_len;
2922         u64 mapped_length;
2923         struct btrfs_device *extent_dev;
2924         struct scrub_parity *sparity;
2925         int nsectors;
2926         int bitmap_len;
2927         int extent_mirror_num;
2928         int stop_loop = 0;
2929
2930         nsectors = map->stripe_len / root->sectorsize;
2931         bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2932         sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2933                           GFP_NOFS);
2934         if (!sparity) {
2935                 spin_lock(&sctx->stat_lock);
2936                 sctx->stat.malloc_errors++;
2937                 spin_unlock(&sctx->stat_lock);
2938                 return -ENOMEM;
2939         }
2940
2941         sparity->stripe_len = map->stripe_len;
2942         sparity->nsectors = nsectors;
2943         sparity->sctx = sctx;
2944         sparity->scrub_dev = sdev;
2945         sparity->logic_start = logic_start;
2946         sparity->logic_end = logic_end;
2947         atomic_set(&sparity->refs, 1);
2948         INIT_LIST_HEAD(&sparity->spages);
2949         sparity->dbitmap = sparity->bitmap;
2950         sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2951
2952         ret = 0;
2953         while (logic_start < logic_end) {
2954                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2955                         key.type = BTRFS_METADATA_ITEM_KEY;
2956                 else
2957                         key.type = BTRFS_EXTENT_ITEM_KEY;
2958                 key.objectid = logic_start;
2959                 key.offset = (u64)-1;
2960
2961                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2962                 if (ret < 0)
2963                         goto out;
2964
2965                 if (ret > 0) {
2966                         ret = btrfs_previous_extent_item(root, path, 0);
2967                         if (ret < 0)
2968                                 goto out;
2969                         if (ret > 0) {
2970                                 btrfs_release_path(path);
2971                                 ret = btrfs_search_slot(NULL, root, &key,
2972                                                         path, 0, 0);
2973                                 if (ret < 0)
2974                                         goto out;
2975                         }
2976                 }
2977
2978                 stop_loop = 0;
2979                 while (1) {
2980                         u64 bytes;
2981
2982                         l = path->nodes[0];
2983                         slot = path->slots[0];
2984                         if (slot >= btrfs_header_nritems(l)) {
2985                                 ret = btrfs_next_leaf(root, path);
2986                                 if (ret == 0)
2987                                         continue;
2988                                 if (ret < 0)
2989                                         goto out;
2990
2991                                 stop_loop = 1;
2992                                 break;
2993                         }
2994                         btrfs_item_key_to_cpu(l, &key, slot);
2995
2996                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2997                             key.type != BTRFS_METADATA_ITEM_KEY)
2998                                 goto next;
2999
3000                         if (key.type == BTRFS_METADATA_ITEM_KEY)
3001                                 bytes = root->nodesize;
3002                         else
3003                                 bytes = key.offset;
3004
3005                         if (key.objectid + bytes <= logic_start)
3006                                 goto next;
3007
3008                         if (key.objectid >= logic_end) {
3009                                 stop_loop = 1;
3010                                 break;
3011                         }
3012
3013                         while (key.objectid >= logic_start + map->stripe_len)
3014                                 logic_start += map->stripe_len;
3015
3016                         extent = btrfs_item_ptr(l, slot,
3017                                                 struct btrfs_extent_item);
3018                         flags = btrfs_extent_flags(l, extent);
3019                         generation = btrfs_extent_generation(l, extent);
3020
3021                         if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3022                             (key.objectid < logic_start ||
3023                              key.objectid + bytes >
3024                              logic_start + map->stripe_len)) {
3025                                 btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3026                                           key.objectid, logic_start);
3027                                 spin_lock(&sctx->stat_lock);
3028                                 sctx->stat.uncorrectable_errors++;
3029                                 spin_unlock(&sctx->stat_lock);
3030                                 goto next;
3031                         }
3032 again:
3033                         extent_logical = key.objectid;
3034                         extent_len = bytes;
3035
3036                         if (extent_logical < logic_start) {
3037                                 extent_len -= logic_start - extent_logical;
3038                                 extent_logical = logic_start;
3039                         }
3040
3041                         if (extent_logical + extent_len >
3042                             logic_start + map->stripe_len)
3043                                 extent_len = logic_start + map->stripe_len -
3044                                              extent_logical;
3045
3046                         scrub_parity_mark_sectors_data(sparity, extent_logical,
3047                                                        extent_len);
3048
3049                         mapped_length = extent_len;
3050                         ret = btrfs_map_block(fs_info, READ, extent_logical,
3051                                               &mapped_length, &bbio, 0);
3052                         if (!ret) {
3053                                 if (!bbio || mapped_length < extent_len)
3054                                         ret = -EIO;
3055                         }
3056                         if (ret) {
3057                                 btrfs_put_bbio(bbio);
3058                                 goto out;
3059                         }
3060                         extent_physical = bbio->stripes[0].physical;
3061                         extent_mirror_num = bbio->mirror_num;
3062                         extent_dev = bbio->stripes[0].dev;
3063                         btrfs_put_bbio(bbio);
3064
3065                         ret = btrfs_lookup_csums_range(csum_root,
3066                                                 extent_logical,
3067                                                 extent_logical + extent_len - 1,
3068                                                 &sctx->csum_list, 1);
3069                         if (ret)
3070                                 goto out;
3071
3072                         ret = scrub_extent_for_parity(sparity, extent_logical,
3073                                                       extent_len,
3074                                                       extent_physical,
3075                                                       extent_dev, flags,
3076                                                       generation,
3077                                                       extent_mirror_num);
3078
3079                         scrub_free_csums(sctx);
3080
3081                         if (ret)
3082                                 goto out;
3083
3084                         if (extent_logical + extent_len <
3085                             key.objectid + bytes) {
3086                                 logic_start += map->stripe_len;
3087
3088                                 if (logic_start >= logic_end) {
3089                                         stop_loop = 1;
3090                                         break;
3091                                 }
3092
3093                                 if (logic_start < key.objectid + bytes) {
3094                                         cond_resched();
3095                                         goto again;
3096                                 }
3097                         }
3098 next:
3099                         path->slots[0]++;
3100                 }
3101
3102                 btrfs_release_path(path);
3103
3104                 if (stop_loop)
3105                         break;
3106
3107                 logic_start += map->stripe_len;
3108         }
3109 out:
3110         if (ret < 0)
3111                 scrub_parity_mark_sectors_error(sparity, logic_start,
3112                                                 logic_end - logic_start);
3113         scrub_parity_put(sparity);
3114         scrub_submit(sctx);
3115         mutex_lock(&sctx->wr_ctx.wr_lock);
3116         scrub_wr_submit(sctx);
3117         mutex_unlock(&sctx->wr_ctx.wr_lock);
3118
3119         btrfs_release_path(path);
3120         return ret < 0 ? ret : 0;
3121 }
3122
3123 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3124                                            struct map_lookup *map,
3125                                            struct btrfs_device *scrub_dev,
3126                                            int num, u64 base, u64 length,
3127                                            int is_dev_replace)
3128 {
3129         struct btrfs_path *path, *ppath;
3130         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
3131         struct btrfs_root *root = fs_info->extent_root;
3132         struct btrfs_root *csum_root = fs_info->csum_root;
3133         struct btrfs_extent_item *extent;
3134         struct blk_plug plug;
3135         u64 flags;
3136         int ret;
3137         int slot;
3138         u64 nstripes;
3139         struct extent_buffer *l;
3140         struct btrfs_key key;
3141         u64 physical;
3142         u64 logical;
3143         u64 logic_end;
3144         u64 physical_end;
3145         u64 generation;
3146         int mirror_num;
3147         struct reada_control *reada1;
3148         struct reada_control *reada2;
3149         struct btrfs_key key_start;
3150         struct btrfs_key key_end;
3151         u64 increment = map->stripe_len;
3152         u64 offset;
3153         u64 extent_logical;
3154         u64 extent_physical;
3155         u64 extent_len;
3156         u64 stripe_logical;
3157         u64 stripe_end;
3158         struct btrfs_device *extent_dev;
3159         int extent_mirror_num;
3160         int stop_loop = 0;
3161
3162         physical = map->stripes[num].physical;
3163         offset = 0;
3164         nstripes = div_u64(length, map->stripe_len);
3165         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3166                 offset = map->stripe_len * num;
3167                 increment = map->stripe_len * map->num_stripes;
3168                 mirror_num = 1;
3169         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3170                 int factor = map->num_stripes / map->sub_stripes;
3171                 offset = map->stripe_len * (num / map->sub_stripes);
3172                 increment = map->stripe_len * factor;
3173                 mirror_num = num % map->sub_stripes + 1;
3174         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3175                 increment = map->stripe_len;
3176                 mirror_num = num % map->num_stripes + 1;
3177         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3178                 increment = map->stripe_len;
3179                 mirror_num = num % map->num_stripes + 1;
3180         } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3181                 get_raid56_logic_offset(physical, num, map, &offset, NULL);
3182                 increment = map->stripe_len * nr_data_stripes(map);
3183                 mirror_num = 1;
3184         } else {
3185                 increment = map->stripe_len;
3186                 mirror_num = 1;
3187         }
3188
3189         path = btrfs_alloc_path();
3190         if (!path)
3191                 return -ENOMEM;
3192
3193         ppath = btrfs_alloc_path();
3194         if (!ppath) {
3195                 btrfs_free_path(path);
3196                 return -ENOMEM;
3197         }
3198
3199         /*
3200          * work on commit root. The related disk blocks are static as
3201          * long as COW is applied. This means, it is save to rewrite
3202          * them to repair disk errors without any race conditions
3203          */
3204         path->search_commit_root = 1;
3205         path->skip_locking = 1;
3206
3207         ppath->search_commit_root = 1;
3208         ppath->skip_locking = 1;
3209         /*
3210          * trigger the readahead for extent tree csum tree and wait for
3211          * completion. During readahead, the scrub is officially paused
3212          * to not hold off transaction commits
3213          */
3214         logical = base + offset;
3215         physical_end = physical + nstripes * map->stripe_len;
3216         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3217                 get_raid56_logic_offset(physical_end, num,
3218                                         map, &logic_end, NULL);
3219                 logic_end += base;
3220         } else {
3221                 logic_end = logical + increment * nstripes;
3222         }
3223         wait_event(sctx->list_wait,
3224                    atomic_read(&sctx->bios_in_flight) == 0);
3225         scrub_blocked_if_needed(fs_info);
3226
3227         /* FIXME it might be better to start readahead at commit root */
3228         key_start.objectid = logical;
3229         key_start.type = BTRFS_EXTENT_ITEM_KEY;
3230         key_start.offset = (u64)0;
3231         key_end.objectid = logic_end;
3232         key_end.type = BTRFS_METADATA_ITEM_KEY;
3233         key_end.offset = (u64)-1;
3234         reada1 = btrfs_reada_add(root, &key_start, &key_end);
3235
3236         key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3237         key_start.type = BTRFS_EXTENT_CSUM_KEY;
3238         key_start.offset = logical;
3239         key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3240         key_end.type = BTRFS_EXTENT_CSUM_KEY;
3241         key_end.offset = logic_end;
3242         reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
3243
3244         if (!IS_ERR(reada1))
3245                 btrfs_reada_wait(reada1);
3246         if (!IS_ERR(reada2))
3247                 btrfs_reada_wait(reada2);
3248
3249
3250         /*
3251          * collect all data csums for the stripe to avoid seeking during
3252          * the scrub. This might currently (crc32) end up to be about 1MB
3253          */
3254         blk_start_plug(&plug);
3255
3256         /*
3257          * now find all extents for each stripe and scrub them
3258          */
3259         ret = 0;
3260         while (physical < physical_end) {
3261                 /*
3262                  * canceled?
3263                  */
3264                 if (atomic_read(&fs_info->scrub_cancel_req) ||
3265                     atomic_read(&sctx->cancel_req)) {
3266                         ret = -ECANCELED;
3267                         goto out;
3268                 }
3269                 /*
3270                  * check to see if we have to pause
3271                  */
3272                 if (atomic_read(&fs_info->scrub_pause_req)) {
3273                         /* push queued extents */
3274                         atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3275                         scrub_submit(sctx);
3276                         mutex_lock(&sctx->wr_ctx.wr_lock);
3277                         scrub_wr_submit(sctx);
3278                         mutex_unlock(&sctx->wr_ctx.wr_lock);
3279                         wait_event(sctx->list_wait,
3280                                    atomic_read(&sctx->bios_in_flight) == 0);
3281                         atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3282                         scrub_blocked_if_needed(fs_info);
3283                 }
3284
3285                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3286                         ret = get_raid56_logic_offset(physical, num, map,
3287                                                       &logical,
3288                                                       &stripe_logical);
3289                         logical += base;
3290                         if (ret) {
3291                                 /* it is parity strip */
3292                                 stripe_logical += base;
3293                                 stripe_end = stripe_logical + increment;
3294                                 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3295                                                           ppath, stripe_logical,
3296                                                           stripe_end);
3297                                 if (ret)
3298                                         goto out;
3299                                 goto skip;
3300                         }
3301                 }
3302
3303                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3304                         key.type = BTRFS_METADATA_ITEM_KEY;
3305                 else
3306                         key.type = BTRFS_EXTENT_ITEM_KEY;
3307                 key.objectid = logical;
3308                 key.offset = (u64)-1;
3309
3310                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3311                 if (ret < 0)
3312                         goto out;
3313
3314                 if (ret > 0) {
3315                         ret = btrfs_previous_extent_item(root, path, 0);
3316                         if (ret < 0)
3317                                 goto out;
3318                         if (ret > 0) {
3319                                 /* there's no smaller item, so stick with the
3320                                  * larger one */
3321                                 btrfs_release_path(path);
3322                                 ret = btrfs_search_slot(NULL, root, &key,
3323                                                         path, 0, 0);
3324                                 if (ret < 0)
3325                                         goto out;
3326                         }
3327                 }
3328
3329                 stop_loop = 0;
3330                 while (1) {
3331                         u64 bytes;
3332
3333                         l = path->nodes[0];
3334                         slot = path->slots[0];
3335                         if (slot >= btrfs_header_nritems(l)) {
3336                                 ret = btrfs_next_leaf(root, path);
3337                                 if (ret == 0)
3338                                         continue;
3339                                 if (ret < 0)
3340                                         goto out;
3341
3342                                 stop_loop = 1;
3343                                 break;
3344                         }
3345                         btrfs_item_key_to_cpu(l, &key, slot);
3346
3347                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3348                             key.type != BTRFS_METADATA_ITEM_KEY)
3349                                 goto next;
3350
3351                         if (key.type == BTRFS_METADATA_ITEM_KEY)
3352                                 bytes = root->nodesize;
3353                         else
3354                                 bytes = key.offset;
3355
3356                         if (key.objectid + bytes <= logical)
3357                                 goto next;
3358
3359                         if (key.objectid >= logical + map->stripe_len) {
3360                                 /* out of this device extent */
3361                                 if (key.objectid >= logic_end)
3362                                         stop_loop = 1;
3363                                 break;
3364                         }
3365
3366                         extent = btrfs_item_ptr(l, slot,
3367                                                 struct btrfs_extent_item);
3368                         flags = btrfs_extent_flags(l, extent);
3369                         generation = btrfs_extent_generation(l, extent);
3370
3371                         if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3372                             (key.objectid < logical ||
3373                              key.objectid + bytes >
3374                              logical + map->stripe_len)) {
3375                                 btrfs_err(fs_info,
3376                                            "scrub: tree block %llu spanning "
3377                                            "stripes, ignored. logical=%llu",
3378                                        key.objectid, logical);
3379                                 spin_lock(&sctx->stat_lock);
3380                                 sctx->stat.uncorrectable_errors++;
3381                                 spin_unlock(&sctx->stat_lock);
3382                                 goto next;
3383                         }
3384
3385 again:
3386                         extent_logical = key.objectid;
3387                         extent_len = bytes;
3388
3389                         /*
3390                          * trim extent to this stripe
3391                          */
3392                         if (extent_logical < logical) {
3393                                 extent_len -= logical - extent_logical;
3394                                 extent_logical = logical;
3395                         }
3396                         if (extent_logical + extent_len >
3397                             logical + map->stripe_len) {
3398                                 extent_len = logical + map->stripe_len -
3399                                              extent_logical;
3400                         }
3401
3402                         extent_physical = extent_logical - logical + physical;
3403                         extent_dev = scrub_dev;
3404                         extent_mirror_num = mirror_num;
3405                         if (is_dev_replace)
3406                                 scrub_remap_extent(fs_info, extent_logical,
3407                                                    extent_len, &extent_physical,
3408                                                    &extent_dev,
3409                                                    &extent_mirror_num);
3410
3411                         ret = btrfs_lookup_csums_range(csum_root,
3412                                                        extent_logical,
3413                                                        extent_logical +
3414                                                        extent_len - 1,
3415                                                        &sctx->csum_list, 1);
3416                         if (ret)
3417                                 goto out;
3418
3419                         ret = scrub_extent(sctx, extent_logical, extent_len,
3420                                            extent_physical, extent_dev, flags,
3421                                            generation, extent_mirror_num,
3422                                            extent_logical - logical + physical);
3423
3424                         scrub_free_csums(sctx);
3425
3426                         if (ret)
3427                                 goto out;
3428
3429                         if (extent_logical + extent_len <
3430                             key.objectid + bytes) {
3431                                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3432                                         /*
3433                                          * loop until we find next data stripe
3434                                          * or we have finished all stripes.
3435                                          */
3436 loop:
3437                                         physical += map->stripe_len;
3438                                         ret = get_raid56_logic_offset(physical,
3439                                                         num, map, &logical,
3440                                                         &stripe_logical);
3441                                         logical += base;
3442
3443                                         if (ret && physical < physical_end) {
3444                                                 stripe_logical += base;
3445                                                 stripe_end = stripe_logical +
3446                                                                 increment;
3447                                                 ret = scrub_raid56_parity(sctx,
3448                                                         map, scrub_dev, ppath,
3449                                                         stripe_logical,
3450                                                         stripe_end);
3451                                                 if (ret)
3452                                                         goto out;
3453                                                 goto loop;
3454                                         }
3455                                 } else {
3456                                         physical += map->stripe_len;
3457                                         logical += increment;
3458                                 }
3459                                 if (logical < key.objectid + bytes) {
3460                                         cond_resched();
3461                                         goto again;
3462                                 }
3463
3464                                 if (physical >= physical_end) {
3465                                         stop_loop = 1;
3466                                         break;
3467                                 }
3468                         }
3469 next:
3470                         path->slots[0]++;
3471                 }
3472                 btrfs_release_path(path);
3473 skip:
3474                 logical += increment;
3475                 physical += map->stripe_len;
3476                 spin_lock(&sctx->stat_lock);
3477                 if (stop_loop)
3478                         sctx->stat.last_physical = map->stripes[num].physical +
3479                                                    length;
3480                 else
3481                         sctx->stat.last_physical = physical;
3482                 spin_unlock(&sctx->stat_lock);
3483                 if (stop_loop)
3484                         break;
3485         }
3486 out:
3487         /* push queued extents */
3488         scrub_submit(sctx);
3489         mutex_lock(&sctx->wr_ctx.wr_lock);
3490         scrub_wr_submit(sctx);
3491         mutex_unlock(&sctx->wr_ctx.wr_lock);
3492
3493         blk_finish_plug(&plug);
3494         btrfs_free_path(path);
3495         btrfs_free_path(ppath);
3496         return ret < 0 ? ret : 0;
3497 }
3498
3499 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3500                                           struct btrfs_device *scrub_dev,
3501                                           u64 chunk_offset, u64 length,
3502                                           u64 dev_offset, int is_dev_replace)
3503 {
3504         struct btrfs_mapping_tree *map_tree =
3505                 &sctx->dev_root->fs_info->mapping_tree;
3506         struct map_lookup *map;
3507         struct extent_map *em;
3508         int i;
3509         int ret = 0;
3510
3511         read_lock(&map_tree->map_tree.lock);
3512         em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3513         read_unlock(&map_tree->map_tree.lock);
3514
3515         if (!em)
3516                 return -EINVAL;
3517
3518         map = (struct map_lookup *)em->bdev;
3519         if (em->start != chunk_offset)
3520                 goto out;
3521
3522         if (em->len < length)
3523                 goto out;
3524
3525         for (i = 0; i < map->num_stripes; ++i) {
3526                 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3527                     map->stripes[i].physical == dev_offset) {
3528                         ret = scrub_stripe(sctx, map, scrub_dev, i,
3529                                            chunk_offset, length,
3530                                            is_dev_replace);
3531                         if (ret)
3532                                 goto out;
3533                 }
3534         }
3535 out:
3536         free_extent_map(em);
3537
3538         return ret;
3539 }
3540
3541 static noinline_for_stack
3542 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3543                            struct btrfs_device *scrub_dev, u64 start, u64 end,
3544                            int is_dev_replace)
3545 {
3546         struct btrfs_dev_extent *dev_extent = NULL;
3547         struct btrfs_path *path;
3548         struct btrfs_root *root = sctx->dev_root;
3549         struct btrfs_fs_info *fs_info = root->fs_info;
3550         u64 length;
3551         u64 chunk_offset;
3552         int ret = 0;
3553         int slot;
3554         struct extent_buffer *l;
3555         struct btrfs_key key;
3556         struct btrfs_key found_key;
3557         struct btrfs_block_group_cache *cache;
3558         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
3559
3560         path = btrfs_alloc_path();
3561         if (!path)
3562                 return -ENOMEM;
3563
3564         path->reada = 2;
3565         path->search_commit_root = 1;
3566         path->skip_locking = 1;
3567
3568         key.objectid = scrub_dev->devid;
3569         key.offset = 0ull;
3570         key.type = BTRFS_DEV_EXTENT_KEY;
3571
3572         while (1) {
3573                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3574                 if (ret < 0)
3575                         break;
3576                 if (ret > 0) {
3577                         if (path->slots[0] >=
3578                             btrfs_header_nritems(path->nodes[0])) {
3579                                 ret = btrfs_next_leaf(root, path);
3580                                 if (ret < 0)
3581                                         break;
3582                                 if (ret > 0) {
3583                                         ret = 0;
3584                                         break;
3585                                 }
3586                         } else {
3587                                 ret = 0;
3588                         }
3589                 }
3590
3591                 l = path->nodes[0];
3592                 slot = path->slots[0];
3593
3594                 btrfs_item_key_to_cpu(l, &found_key, slot);
3595
3596                 if (found_key.objectid != scrub_dev->devid)
3597                         break;
3598
3599                 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
3600                         break;
3601
3602                 if (found_key.offset >= end)
3603                         break;
3604
3605                 if (found_key.offset < key.offset)
3606                         break;
3607
3608                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3609                 length = btrfs_dev_extent_length(l, dev_extent);
3610
3611                 if (found_key.offset + length <= start)
3612                         goto skip;
3613
3614                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3615
3616                 /*
3617                  * get a reference on the corresponding block group to prevent
3618                  * the chunk from going away while we scrub it
3619                  */
3620                 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3621
3622                 /* some chunks are removed but not committed to disk yet,
3623                  * continue scrubbing */
3624                 if (!cache)
3625                         goto skip;
3626
3627                 /*
3628                  * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3629                  * to avoid deadlock caused by:
3630                  * btrfs_inc_block_group_ro()
3631                  * -> btrfs_wait_for_commit()
3632                  * -> btrfs_commit_transaction()
3633                  * -> btrfs_scrub_pause()
3634                  */
3635                 scrub_pause_on(fs_info);
3636                 ret = btrfs_inc_block_group_ro(root, cache);
3637                 scrub_pause_off(fs_info);
3638                 if (ret) {
3639                         btrfs_put_block_group(cache);
3640                         break;
3641                 }
3642
3643                 dev_replace->cursor_right = found_key.offset + length;
3644                 dev_replace->cursor_left = found_key.offset;
3645                 dev_replace->item_needs_writeback = 1;
3646                 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3647                                   found_key.offset, is_dev_replace);
3648
3649                 /*
3650                  * flush, submit all pending read and write bios, afterwards
3651                  * wait for them.
3652                  * Note that in the dev replace case, a read request causes
3653                  * write requests that are submitted in the read completion
3654                  * worker. Therefore in the current situation, it is required
3655                  * that all write requests are flushed, so that all read and
3656                  * write requests are really completed when bios_in_flight
3657                  * changes to 0.
3658                  */
3659                 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3660                 scrub_submit(sctx);
3661                 mutex_lock(&sctx->wr_ctx.wr_lock);
3662                 scrub_wr_submit(sctx);
3663                 mutex_unlock(&sctx->wr_ctx.wr_lock);
3664
3665                 wait_event(sctx->list_wait,
3666                            atomic_read(&sctx->bios_in_flight) == 0);
3667
3668                 scrub_pause_on(fs_info);
3669
3670                 /*
3671                  * must be called before we decrease @scrub_paused.
3672                  * make sure we don't block transaction commit while
3673                  * we are waiting pending workers finished.
3674                  */
3675                 wait_event(sctx->list_wait,
3676                            atomic_read(&sctx->workers_pending) == 0);
3677                 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3678
3679                 scrub_pause_off(fs_info);
3680
3681                 btrfs_dec_block_group_ro(root, cache);
3682
3683                 btrfs_put_block_group(cache);
3684                 if (ret)
3685                         break;
3686                 if (is_dev_replace &&
3687                     atomic64_read(&dev_replace->num_write_errors) > 0) {
3688                         ret = -EIO;
3689                         break;
3690                 }
3691                 if (sctx->stat.malloc_errors > 0) {
3692                         ret = -ENOMEM;
3693                         break;
3694                 }
3695
3696                 dev_replace->cursor_left = dev_replace->cursor_right;
3697                 dev_replace->item_needs_writeback = 1;
3698 skip:
3699                 key.offset = found_key.offset + length;
3700                 btrfs_release_path(path);
3701         }
3702
3703         btrfs_free_path(path);
3704
3705         return ret;
3706 }
3707
3708 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3709                                            struct btrfs_device *scrub_dev)
3710 {
3711         int     i;
3712         u64     bytenr;
3713         u64     gen;
3714         int     ret;
3715         struct btrfs_root *root = sctx->dev_root;
3716
3717         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
3718                 return -EIO;
3719
3720         /* Seed devices of a new filesystem has their own generation. */
3721         if (scrub_dev->fs_devices != root->fs_info->fs_devices)
3722                 gen = scrub_dev->generation;
3723         else
3724                 gen = root->fs_info->last_trans_committed;
3725
3726         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3727                 bytenr = btrfs_sb_offset(i);
3728                 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3729                     scrub_dev->commit_total_bytes)
3730                         break;
3731
3732                 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
3733                                   scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
3734                                   NULL, 1, bytenr);
3735                 if (ret)
3736                         return ret;
3737         }
3738         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3739
3740         return 0;
3741 }
3742
3743 /*
3744  * get a reference count on fs_info->scrub_workers. start worker if necessary
3745  */
3746 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3747                                                 int is_dev_replace)
3748 {
3749         unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
3750         int max_active = fs_info->thread_pool_size;
3751
3752         if (fs_info->scrub_workers_refcnt == 0) {
3753                 if (is_dev_replace)
3754                         fs_info->scrub_workers =
3755                                 btrfs_alloc_workqueue("btrfs-scrub", flags,
3756                                                       1, 4);
3757                 else
3758                         fs_info->scrub_workers =
3759                                 btrfs_alloc_workqueue("btrfs-scrub", flags,
3760                                                       max_active, 4);
3761                 if (!fs_info->scrub_workers)
3762                         goto fail_scrub_workers;
3763
3764                 fs_info->scrub_wr_completion_workers =
3765                         btrfs_alloc_workqueue("btrfs-scrubwrc", flags,
3766                                               max_active, 2);
3767                 if (!fs_info->scrub_wr_completion_workers)
3768                         goto fail_scrub_wr_completion_workers;
3769
3770                 fs_info->scrub_nocow_workers =
3771                         btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);
3772                 if (!fs_info->scrub_nocow_workers)
3773                         goto fail_scrub_nocow_workers;
3774                 fs_info->scrub_parity_workers =
3775                         btrfs_alloc_workqueue("btrfs-scrubparity", flags,
3776                                               max_active, 2);
3777                 if (!fs_info->scrub_parity_workers)
3778                         goto fail_scrub_parity_workers;
3779         }
3780         ++fs_info->scrub_workers_refcnt;
3781         return 0;
3782
3783 fail_scrub_parity_workers:
3784         btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3785 fail_scrub_nocow_workers:
3786         btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3787 fail_scrub_wr_completion_workers:
3788         btrfs_destroy_workqueue(fs_info->scrub_workers);
3789 fail_scrub_workers:
3790         return -ENOMEM;
3791 }
3792
3793 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
3794 {
3795         if (--fs_info->scrub_workers_refcnt == 0) {
3796                 btrfs_destroy_workqueue(fs_info->scrub_workers);
3797                 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3798                 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3799                 btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
3800         }
3801         WARN_ON(fs_info->scrub_workers_refcnt < 0);
3802 }
3803
3804 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3805                     u64 end, struct btrfs_scrub_progress *progress,
3806                     int readonly, int is_dev_replace)
3807 {
3808         struct scrub_ctx *sctx;
3809         int ret;
3810         struct btrfs_device *dev;
3811         struct rcu_string *name;
3812
3813         if (btrfs_fs_closing(fs_info))
3814                 return -EINVAL;
3815
3816         if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
3817                 /*
3818                  * in this case scrub is unable to calculate the checksum
3819                  * the way scrub is implemented. Do not handle this
3820                  * situation at all because it won't ever happen.
3821                  */
3822                 btrfs_err(fs_info,
3823                            "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3824                        fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
3825                 return -EINVAL;
3826         }
3827
3828         if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
3829                 /* not supported for data w/o checksums */
3830                 btrfs_err(fs_info,
3831                            "scrub: size assumption sectorsize != PAGE_SIZE "
3832                            "(%d != %lu) fails",
3833                        fs_info->chunk_root->sectorsize, PAGE_SIZE);
3834                 return -EINVAL;
3835         }
3836
3837         if (fs_info->chunk_root->nodesize >
3838             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3839             fs_info->chunk_root->sectorsize >
3840             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3841                 /*
3842                  * would exhaust the array bounds of pagev member in
3843                  * struct scrub_block
3844                  */
3845                 btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
3846                            "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3847                        fs_info->chunk_root->nodesize,
3848                        SCRUB_MAX_PAGES_PER_BLOCK,
3849                        fs_info->chunk_root->sectorsize,
3850                        SCRUB_MAX_PAGES_PER_BLOCK);
3851                 return -EINVAL;
3852         }
3853
3854
3855         mutex_lock(&fs_info->fs_devices->device_list_mutex);
3856         dev = btrfs_find_device(fs_info, devid, NULL, NULL);
3857         if (!dev || (dev->missing && !is_dev_replace)) {
3858                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3859                 return -ENODEV;
3860         }
3861
3862         if (!is_dev_replace && !readonly && !dev->writeable) {
3863                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3864                 rcu_read_lock();
3865                 name = rcu_dereference(dev->name);
3866                 btrfs_err(fs_info, "scrub: device %s is not writable",
3867                           name->str);
3868                 rcu_read_unlock();
3869                 return -EROFS;
3870         }
3871
3872         mutex_lock(&fs_info->scrub_lock);
3873         if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
3874                 mutex_unlock(&fs_info->scrub_lock);
3875                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3876                 return -EIO;
3877         }
3878
3879         btrfs_dev_replace_lock(&fs_info->dev_replace);
3880         if (dev->scrub_device ||
3881             (!is_dev_replace &&
3882              btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
3883                 btrfs_dev_replace_unlock(&fs_info->dev_replace);
3884                 mutex_unlock(&fs_info->scrub_lock);
3885                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3886                 return -EINPROGRESS;
3887         }
3888         btrfs_dev_replace_unlock(&fs_info->dev_replace);
3889
3890         ret = scrub_workers_get(fs_info, is_dev_replace);
3891         if (ret) {
3892                 mutex_unlock(&fs_info->scrub_lock);
3893                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3894                 return ret;
3895         }
3896
3897         sctx = scrub_setup_ctx(dev, is_dev_replace);
3898         if (IS_ERR(sctx)) {
3899                 mutex_unlock(&fs_info->scrub_lock);
3900                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3901                 scrub_workers_put(fs_info);
3902                 return PTR_ERR(sctx);
3903         }
3904         sctx->readonly = readonly;
3905         dev->scrub_device = sctx;
3906         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3907
3908         /*
3909          * checking @scrub_pause_req here, we can avoid
3910          * race between committing transaction and scrubbing.
3911          */
3912         __scrub_blocked_if_needed(fs_info);
3913         atomic_inc(&fs_info->scrubs_running);
3914         mutex_unlock(&fs_info->scrub_lock);
3915
3916         if (!is_dev_replace) {
3917                 /*
3918                  * by holding device list mutex, we can
3919                  * kick off writing super in log tree sync.
3920                  */
3921                 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3922                 ret = scrub_supers(sctx, dev);
3923                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3924         }
3925
3926         if (!ret)
3927                 ret = scrub_enumerate_chunks(sctx, dev, start, end,
3928                                              is_dev_replace);
3929
3930         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3931         atomic_dec(&fs_info->scrubs_running);
3932         wake_up(&fs_info->scrub_pause_wait);
3933
3934         wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
3935
3936         if (progress)
3937                 memcpy(progress, &sctx->stat, sizeof(*progress));
3938
3939         mutex_lock(&fs_info->scrub_lock);
3940         dev->scrub_device = NULL;
3941         scrub_workers_put(fs_info);
3942         mutex_unlock(&fs_info->scrub_lock);
3943
3944         scrub_put_ctx(sctx);
3945
3946         return ret;
3947 }
3948
3949 void btrfs_scrub_pause(struct btrfs_root *root)
3950 {
3951         struct btrfs_fs_info *fs_info = root->fs_info;
3952
3953         mutex_lock(&fs_info->scrub_lock);
3954         atomic_inc(&fs_info->scrub_pause_req);
3955         while (atomic_read(&fs_info->scrubs_paused) !=
3956                atomic_read(&fs_info->scrubs_running)) {
3957                 mutex_unlock(&fs_info->scrub_lock);
3958                 wait_event(fs_info->scrub_pause_wait,
3959                            atomic_read(&fs_info->scrubs_paused) ==
3960                            atomic_read(&fs_info->scrubs_running));
3961                 mutex_lock(&fs_info->scrub_lock);
3962         }
3963         mutex_unlock(&fs_info->scrub_lock);
3964 }
3965
3966 void btrfs_scrub_continue(struct btrfs_root *root)
3967 {
3968         struct btrfs_fs_info *fs_info = root->fs_info;
3969
3970         atomic_dec(&fs_info->scrub_pause_req);
3971         wake_up(&fs_info->scrub_pause_wait);
3972 }
3973
3974 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
3975 {
3976         mutex_lock(&fs_info->scrub_lock);
3977         if (!atomic_read(&fs_info->scrubs_running)) {
3978                 mutex_unlock(&fs_info->scrub_lock);
3979                 return -ENOTCONN;
3980         }
3981
3982         atomic_inc(&fs_info->scrub_cancel_req);
3983         while (atomic_read(&fs_info->scrubs_running)) {
3984                 mutex_unlock(&fs_info->scrub_lock);
3985                 wait_event(fs_info->scrub_pause_wait,
3986                            atomic_read(&fs_info->scrubs_running) == 0);
3987                 mutex_lock(&fs_info->scrub_lock);
3988         }
3989         atomic_dec(&fs_info->scrub_cancel_req);
3990         mutex_unlock(&fs_info->scrub_lock);
3991
3992         return 0;
3993 }
3994
3995 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
3996                            struct btrfs_device *dev)
3997 {
3998         struct scrub_ctx *sctx;
3999
4000         mutex_lock(&fs_info->scrub_lock);
4001         sctx = dev->scrub_device;
4002         if (!sctx) {
4003                 mutex_unlock(&fs_info->scrub_lock);
4004                 return -ENOTCONN;
4005         }
4006         atomic_inc(&sctx->cancel_req);
4007         while (dev->scrub_device) {
4008                 mutex_unlock(&fs_info->scrub_lock);
4009                 wait_event(fs_info->scrub_pause_wait,
4010                            dev->scrub_device == NULL);
4011                 mutex_lock(&fs_info->scrub_lock);
4012         }
4013         mutex_unlock(&fs_info->scrub_lock);
4014
4015         return 0;
4016 }
4017
4018 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
4019                          struct btrfs_scrub_progress *progress)
4020 {
4021         struct btrfs_device *dev;
4022         struct scrub_ctx *sctx = NULL;
4023
4024         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
4025         dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
4026         if (dev)
4027                 sctx = dev->scrub_device;
4028         if (sctx)
4029                 memcpy(progress, &sctx->stat, sizeof(*progress));
4030         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
4031
4032         return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
4033 }
4034
4035 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4036                                u64 extent_logical, u64 extent_len,
4037                                u64 *extent_physical,
4038                                struct btrfs_device **extent_dev,
4039                                int *extent_mirror_num)
4040 {
4041         u64 mapped_length;
4042         struct btrfs_bio *bbio = NULL;
4043         int ret;
4044
4045         mapped_length = extent_len;
4046         ret = btrfs_map_block(fs_info, READ, extent_logical,
4047                               &mapped_length, &bbio, 0);
4048         if (ret || !bbio || mapped_length < extent_len ||
4049             !bbio->stripes[0].dev->bdev) {
4050                 btrfs_put_bbio(bbio);
4051                 return;
4052         }
4053
4054         *extent_physical = bbio->stripes[0].physical;
4055         *extent_mirror_num = bbio->mirror_num;
4056         *extent_dev = bbio->stripes[0].dev;
4057         btrfs_put_bbio(bbio);
4058 }
4059
4060 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
4061                               struct scrub_wr_ctx *wr_ctx,
4062                               struct btrfs_fs_info *fs_info,
4063                               struct btrfs_device *dev,
4064                               int is_dev_replace)
4065 {
4066         WARN_ON(wr_ctx->wr_curr_bio != NULL);
4067
4068         mutex_init(&wr_ctx->wr_lock);
4069         wr_ctx->wr_curr_bio = NULL;
4070         if (!is_dev_replace)
4071                 return 0;
4072
4073         WARN_ON(!dev->bdev);
4074         wr_ctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
4075         wr_ctx->tgtdev = dev;
4076         atomic_set(&wr_ctx->flush_all_writes, 0);
4077         return 0;
4078 }
4079
4080 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
4081 {
4082         mutex_lock(&wr_ctx->wr_lock);
4083         kfree(wr_ctx->wr_curr_bio);
4084         wr_ctx->wr_curr_bio = NULL;
4085         mutex_unlock(&wr_ctx->wr_lock);
4086 }
4087
4088 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
4089                             int mirror_num, u64 physical_for_dev_replace)
4090 {
4091         struct scrub_copy_nocow_ctx *nocow_ctx;
4092         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
4093
4094         nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
4095         if (!nocow_ctx) {
4096                 spin_lock(&sctx->stat_lock);
4097                 sctx->stat.malloc_errors++;
4098                 spin_unlock(&sctx->stat_lock);
4099                 return -ENOMEM;
4100         }
4101
4102         scrub_pending_trans_workers_inc(sctx);
4103
4104         nocow_ctx->sctx = sctx;
4105         nocow_ctx->logical = logical;
4106         nocow_ctx->len = len;
4107         nocow_ctx->mirror_num = mirror_num;
4108         nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
4109         btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
4110                         copy_nocow_pages_worker, NULL, NULL);
4111         INIT_LIST_HEAD(&nocow_ctx->inodes);
4112         btrfs_queue_work(fs_info->scrub_nocow_workers,
4113                          &nocow_ctx->work);
4114
4115         return 0;
4116 }
4117
4118 static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
4119 {
4120         struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
4121         struct scrub_nocow_inode *nocow_inode;
4122
4123         nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
4124         if (!nocow_inode)
4125                 return -ENOMEM;
4126         nocow_inode->inum = inum;
4127         nocow_inode->offset = offset;
4128         nocow_inode->root = root;
4129         list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
4130         return 0;
4131 }
4132
4133 #define COPY_COMPLETE 1
4134
4135 static void copy_nocow_pages_worker(struct btrfs_work *work)
4136 {
4137         struct scrub_copy_nocow_ctx *nocow_ctx =
4138                 container_of(work, struct scrub_copy_nocow_ctx, work);
4139         struct scrub_ctx *sctx = nocow_ctx->sctx;
4140         u64 logical = nocow_ctx->logical;
4141         u64 len = nocow_ctx->len;
4142         int mirror_num = nocow_ctx->mirror_num;
4143         u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4144         int ret;
4145         struct btrfs_trans_handle *trans = NULL;
4146         struct btrfs_fs_info *fs_info;
4147         struct btrfs_path *path;
4148         struct btrfs_root *root;
4149         int not_written = 0;
4150
4151         fs_info = sctx->dev_root->fs_info;
4152         root = fs_info->extent_root;
4153
4154         path = btrfs_alloc_path();
4155         if (!path) {
4156                 spin_lock(&sctx->stat_lock);
4157                 sctx->stat.malloc_errors++;
4158                 spin_unlock(&sctx->stat_lock);
4159                 not_written = 1;
4160                 goto out;
4161         }
4162
4163         trans = btrfs_join_transaction(root);
4164         if (IS_ERR(trans)) {
4165                 not_written = 1;
4166                 goto out;
4167         }
4168
4169         ret = iterate_inodes_from_logical(logical, fs_info, path,
4170                                           record_inode_for_nocow, nocow_ctx);
4171         if (ret != 0 && ret != -ENOENT) {
4172                 btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
4173                         "phys %llu, len %llu, mir %u, ret %d",
4174                         logical, physical_for_dev_replace, len, mirror_num,
4175                         ret);
4176                 not_written = 1;
4177                 goto out;
4178         }
4179
4180         btrfs_end_transaction(trans, root);
4181         trans = NULL;
4182         while (!list_empty(&nocow_ctx->inodes)) {
4183                 struct scrub_nocow_inode *entry;
4184                 entry = list_first_entry(&nocow_ctx->inodes,
4185                                          struct scrub_nocow_inode,
4186                                          list);
4187                 list_del_init(&entry->list);
4188                 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
4189                                                  entry->root, nocow_ctx);
4190                 kfree(entry);
4191                 if (ret == COPY_COMPLETE) {
4192                         ret = 0;
4193                         break;
4194                 } else if (ret) {
4195                         break;
4196                 }
4197         }
4198 out:
4199         while (!list_empty(&nocow_ctx->inodes)) {
4200                 struct scrub_nocow_inode *entry;
4201                 entry = list_first_entry(&nocow_ctx->inodes,
4202                                          struct scrub_nocow_inode,
4203                                          list);
4204                 list_del_init(&entry->list);
4205                 kfree(entry);
4206         }
4207         if (trans && !IS_ERR(trans))
4208                 btrfs_end_transaction(trans, root);
4209         if (not_written)
4210                 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
4211                                             num_uncorrectable_read_errors);
4212
4213         btrfs_free_path(path);
4214         kfree(nocow_ctx);
4215
4216         scrub_pending_trans_workers_dec(sctx);
4217 }
4218
4219 static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
4220                                  u64 logical)
4221 {
4222         struct extent_state *cached_state = NULL;
4223         struct btrfs_ordered_extent *ordered;
4224         struct extent_io_tree *io_tree;
4225         struct extent_map *em;
4226         u64 lockstart = start, lockend = start + len - 1;
4227         int ret = 0;
4228
4229         io_tree = &BTRFS_I(inode)->io_tree;
4230
4231         lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
4232         ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
4233         if (ordered) {
4234                 btrfs_put_ordered_extent(ordered);
4235                 ret = 1;
4236                 goto out_unlock;
4237         }
4238
4239         em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4240         if (IS_ERR(em)) {
4241                 ret = PTR_ERR(em);
4242                 goto out_unlock;
4243         }
4244
4245         /*
4246          * This extent does not actually cover the logical extent anymore,
4247          * move on to the next inode.
4248          */
4249         if (em->block_start > logical ||
4250             em->block_start + em->block_len < logical + len) {
4251                 free_extent_map(em);
4252                 ret = 1;
4253                 goto out_unlock;
4254         }
4255         free_extent_map(em);
4256
4257 out_unlock:
4258         unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
4259                              GFP_NOFS);
4260         return ret;
4261 }
4262
4263 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4264                                       struct scrub_copy_nocow_ctx *nocow_ctx)
4265 {
4266         struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
4267         struct btrfs_key key;
4268         struct inode *inode;
4269         struct page *page;
4270         struct btrfs_root *local_root;
4271         struct extent_io_tree *io_tree;
4272         u64 physical_for_dev_replace;
4273         u64 nocow_ctx_logical;
4274         u64 len = nocow_ctx->len;
4275         unsigned long index;
4276         int srcu_index;
4277         int ret = 0;
4278         int err = 0;
4279
4280         key.objectid = root;
4281         key.type = BTRFS_ROOT_ITEM_KEY;
4282         key.offset = (u64)-1;
4283
4284         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
4285
4286         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
4287         if (IS_ERR(local_root)) {
4288                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4289                 return PTR_ERR(local_root);
4290         }
4291
4292         key.type = BTRFS_INODE_ITEM_KEY;
4293         key.objectid = inum;
4294         key.offset = 0;
4295         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
4296         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4297         if (IS_ERR(inode))
4298                 return PTR_ERR(inode);
4299
4300         /* Avoid truncate/dio/punch hole.. */
4301         mutex_lock(&inode->i_mutex);
4302         inode_dio_wait(inode);
4303
4304         physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4305         io_tree = &BTRFS_I(inode)->io_tree;
4306         nocow_ctx_logical = nocow_ctx->logical;
4307
4308         ret = check_extent_to_block(inode, offset, len, nocow_ctx_logical);
4309         if (ret) {
4310                 ret = ret > 0 ? 0 : ret;
4311                 goto out;
4312         }
4313
4314         while (len >= PAGE_CACHE_SIZE) {
4315                 index = offset >> PAGE_CACHE_SHIFT;
4316 again:
4317                 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4318                 if (!page) {
4319                         btrfs_err(fs_info, "find_or_create_page() failed");
4320                         ret = -ENOMEM;
4321                         goto out;
4322                 }
4323
4324                 if (PageUptodate(page)) {
4325                         if (PageDirty(page))
4326                                 goto next_page;
4327                 } else {
4328                         ClearPageError(page);
4329                         err = extent_read_full_page(io_tree, page,
4330                                                            btrfs_get_extent,
4331                                                            nocow_ctx->mirror_num);
4332                         if (err) {
4333                                 ret = err;
4334                                 goto next_page;
4335                         }
4336
4337                         lock_page(page);
4338                         /*
4339                          * If the page has been remove from the page cache,
4340                          * the data on it is meaningless, because it may be
4341                          * old one, the new data may be written into the new
4342                          * page in the page cache.
4343                          */
4344                         if (page->mapping != inode->i_mapping) {
4345                                 unlock_page(page);
4346                                 page_cache_release(page);
4347                                 goto again;
4348                         }
4349                         if (!PageUptodate(page)) {
4350                                 ret = -EIO;
4351                                 goto next_page;
4352                         }
4353                 }
4354
4355                 ret = check_extent_to_block(inode, offset, len,
4356                                             nocow_ctx_logical);
4357                 if (ret) {
4358                         ret = ret > 0 ? 0 : ret;
4359                         goto next_page;
4360                 }
4361
4362                 err = write_page_nocow(nocow_ctx->sctx,
4363                                        physical_for_dev_replace, page);
4364                 if (err)
4365                         ret = err;
4366 next_page:
4367                 unlock_page(page);
4368                 page_cache_release(page);
4369
4370                 if (ret)
4371                         break;
4372
4373                 offset += PAGE_CACHE_SIZE;
4374                 physical_for_dev_replace += PAGE_CACHE_SIZE;
4375                 nocow_ctx_logical += PAGE_CACHE_SIZE;
4376                 len -= PAGE_CACHE_SIZE;
4377         }
4378         ret = COPY_COMPLETE;
4379 out:
4380         mutex_unlock(&inode->i_mutex);
4381         iput(inode);
4382         return ret;
4383 }
4384
4385 static int write_page_nocow(struct scrub_ctx *sctx,
4386                             u64 physical_for_dev_replace, struct page *page)
4387 {
4388         struct bio *bio;
4389         struct btrfs_device *dev;
4390         int ret;
4391
4392         dev = sctx->wr_ctx.tgtdev;
4393         if (!dev)
4394                 return -EIO;
4395         if (!dev->bdev) {
4396                 btrfs_warn_rl(dev->dev_root->fs_info,
4397                         "scrub write_page_nocow(bdev == NULL) is unexpected");
4398                 return -EIO;
4399         }
4400         bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
4401         if (!bio) {
4402                 spin_lock(&sctx->stat_lock);
4403                 sctx->stat.malloc_errors++;
4404                 spin_unlock(&sctx->stat_lock);
4405                 return -ENOMEM;
4406         }
4407         bio->bi_iter.bi_size = 0;
4408         bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
4409         bio->bi_bdev = dev->bdev;
4410         ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
4411         if (ret != PAGE_CACHE_SIZE) {
4412 leave_with_eio:
4413                 bio_put(bio);
4414                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4415                 return -EIO;
4416         }
4417
4418         if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
4419                 goto leave_with_eio;
4420
4421         bio_put(bio);
4422         return 0;
4423 }