]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/btrfs/scrub.c
Merge remote-tracking branches 'regulator/fix/88pm800', 'regulator/fix/max8973',...
[karo-tx-linux.git] / fs / btrfs / scrub.c
1 /*
2  * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
21 #include "ctree.h"
22 #include "volumes.h"
23 #include "disk-io.h"
24 #include "ordered-data.h"
25 #include "transaction.h"
26 #include "backref.h"
27 #include "extent_io.h"
28 #include "dev-replace.h"
29 #include "check-integrity.h"
30 #include "rcu-string.h"
31 #include "raid56.h"
32
33 /*
34  * This is only the first step towards a full-features scrub. It reads all
35  * extent and super block and verifies the checksums. In case a bad checksum
36  * is found or the extent cannot be read, good data will be written back if
37  * any can be found.
38  *
39  * Future enhancements:
40  *  - In case an unrepairable extent is encountered, track which files are
41  *    affected and report them
42  *  - track and record media errors, throw out bad devices
43  *  - add a mode to also read unallocated space
44  */
45
46 struct scrub_block;
47 struct scrub_ctx;
48
49 /*
50  * the following three values only influence the performance.
51  * The last one configures the number of parallel and outstanding I/O
52  * operations. The first two values configure an upper limit for the number
53  * of (dynamically allocated) pages that are added to a bio.
54  */
55 #define SCRUB_PAGES_PER_RD_BIO  32      /* 128k per bio */
56 #define SCRUB_PAGES_PER_WR_BIO  32      /* 128k per bio */
57 #define SCRUB_BIOS_PER_SCTX     64      /* 8MB per device in flight */
58
59 /*
60  * the following value times PAGE_SIZE needs to be large enough to match the
61  * largest node/leaf/sector size that shall be supported.
62  * Values larger than BTRFS_STRIPE_LEN are not supported.
63  */
64 #define SCRUB_MAX_PAGES_PER_BLOCK       16      /* 64k per node/leaf/sector */
65
66 struct scrub_recover {
67         atomic_t                refs;
68         struct btrfs_bio        *bbio;
69         u64                     map_length;
70 };
71
72 struct scrub_page {
73         struct scrub_block      *sblock;
74         struct page             *page;
75         struct btrfs_device     *dev;
76         struct list_head        list;
77         u64                     flags;  /* extent flags */
78         u64                     generation;
79         u64                     logical;
80         u64                     physical;
81         u64                     physical_for_dev_replace;
82         atomic_t                refs;
83         struct {
84                 unsigned int    mirror_num:8;
85                 unsigned int    have_csum:1;
86                 unsigned int    io_error:1;
87         };
88         u8                      csum[BTRFS_CSUM_SIZE];
89
90         struct scrub_recover    *recover;
91 };
92
93 struct scrub_bio {
94         int                     index;
95         struct scrub_ctx        *sctx;
96         struct btrfs_device     *dev;
97         struct bio              *bio;
98         int                     err;
99         u64                     logical;
100         u64                     physical;
101 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
102         struct scrub_page       *pagev[SCRUB_PAGES_PER_WR_BIO];
103 #else
104         struct scrub_page       *pagev[SCRUB_PAGES_PER_RD_BIO];
105 #endif
106         int                     page_count;
107         int                     next_free;
108         struct btrfs_work       work;
109 };
110
111 struct scrub_block {
112         struct scrub_page       *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
113         int                     page_count;
114         atomic_t                outstanding_pages;
115         atomic_t                refs; /* free mem on transition to zero */
116         struct scrub_ctx        *sctx;
117         struct scrub_parity     *sparity;
118         struct {
119                 unsigned int    header_error:1;
120                 unsigned int    checksum_error:1;
121                 unsigned int    no_io_error_seen:1;
122                 unsigned int    generation_error:1; /* also sets header_error */
123
124                 /* The following is for the data used to check parity */
125                 /* It is for the data with checksum */
126                 unsigned int    data_corrected:1;
127         };
128 };
129
130 /* Used for the chunks with parity stripe such RAID5/6 */
131 struct scrub_parity {
132         struct scrub_ctx        *sctx;
133
134         struct btrfs_device     *scrub_dev;
135
136         u64                     logic_start;
137
138         u64                     logic_end;
139
140         int                     nsectors;
141
142         int                     stripe_len;
143
144         atomic_t                refs;
145
146         struct list_head        spages;
147
148         /* Work of parity check and repair */
149         struct btrfs_work       work;
150
151         /* Mark the parity blocks which have data */
152         unsigned long           *dbitmap;
153
154         /*
155          * Mark the parity blocks which have data, but errors happen when
156          * read data or check data
157          */
158         unsigned long           *ebitmap;
159
160         unsigned long           bitmap[0];
161 };
162
163 struct scrub_wr_ctx {
164         struct scrub_bio *wr_curr_bio;
165         struct btrfs_device *tgtdev;
166         int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
167         atomic_t flush_all_writes;
168         struct mutex wr_lock;
169 };
170
171 struct scrub_ctx {
172         struct scrub_bio        *bios[SCRUB_BIOS_PER_SCTX];
173         struct btrfs_root       *dev_root;
174         int                     first_free;
175         int                     curr;
176         atomic_t                bios_in_flight;
177         atomic_t                workers_pending;
178         spinlock_t              list_lock;
179         wait_queue_head_t       list_wait;
180         u16                     csum_size;
181         struct list_head        csum_list;
182         atomic_t                cancel_req;
183         int                     readonly;
184         int                     pages_per_rd_bio;
185         u32                     sectorsize;
186         u32                     nodesize;
187
188         int                     is_dev_replace;
189         struct scrub_wr_ctx     wr_ctx;
190
191         /*
192          * statistics
193          */
194         struct btrfs_scrub_progress stat;
195         spinlock_t              stat_lock;
196
197         /*
198          * Use a ref counter to avoid use-after-free issues. Scrub workers
199          * decrement bios_in_flight and workers_pending and then do a wakeup
200          * on the list_wait wait queue. We must ensure the main scrub task
201          * doesn't free the scrub context before or while the workers are
202          * doing the wakeup() call.
203          */
204         atomic_t                refs;
205 };
206
207 struct scrub_fixup_nodatasum {
208         struct scrub_ctx        *sctx;
209         struct btrfs_device     *dev;
210         u64                     logical;
211         struct btrfs_root       *root;
212         struct btrfs_work       work;
213         int                     mirror_num;
214 };
215
216 struct scrub_nocow_inode {
217         u64                     inum;
218         u64                     offset;
219         u64                     root;
220         struct list_head        list;
221 };
222
223 struct scrub_copy_nocow_ctx {
224         struct scrub_ctx        *sctx;
225         u64                     logical;
226         u64                     len;
227         int                     mirror_num;
228         u64                     physical_for_dev_replace;
229         struct list_head        inodes;
230         struct btrfs_work       work;
231 };
232
233 struct scrub_warning {
234         struct btrfs_path       *path;
235         u64                     extent_item_size;
236         const char              *errstr;
237         sector_t                sector;
238         u64                     logical;
239         struct btrfs_device     *dev;
240 };
241
242 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
243 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
244 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
245 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
246 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
247 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
248                                      struct scrub_block *sblocks_for_recheck);
249 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
250                                 struct scrub_block *sblock, int is_metadata,
251                                 int have_csum, u8 *csum, u64 generation,
252                                 u16 csum_size, int retry_failed_mirror);
253 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
254                                          struct scrub_block *sblock,
255                                          int is_metadata, int have_csum,
256                                          const u8 *csum, u64 generation,
257                                          u16 csum_size);
258 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
259                                              struct scrub_block *sblock_good);
260 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
261                                             struct scrub_block *sblock_good,
262                                             int page_num, int force_write);
263 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
264 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
265                                            int page_num);
266 static int scrub_checksum_data(struct scrub_block *sblock);
267 static int scrub_checksum_tree_block(struct scrub_block *sblock);
268 static int scrub_checksum_super(struct scrub_block *sblock);
269 static void scrub_block_get(struct scrub_block *sblock);
270 static void scrub_block_put(struct scrub_block *sblock);
271 static void scrub_page_get(struct scrub_page *spage);
272 static void scrub_page_put(struct scrub_page *spage);
273 static void scrub_parity_get(struct scrub_parity *sparity);
274 static void scrub_parity_put(struct scrub_parity *sparity);
275 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
276                                     struct scrub_page *spage);
277 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
278                        u64 physical, struct btrfs_device *dev, u64 flags,
279                        u64 gen, int mirror_num, u8 *csum, int force,
280                        u64 physical_for_dev_replace);
281 static void scrub_bio_end_io(struct bio *bio, int err);
282 static void scrub_bio_end_io_worker(struct btrfs_work *work);
283 static void scrub_block_complete(struct scrub_block *sblock);
284 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
285                                u64 extent_logical, u64 extent_len,
286                                u64 *extent_physical,
287                                struct btrfs_device **extent_dev,
288                                int *extent_mirror_num);
289 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
290                               struct scrub_wr_ctx *wr_ctx,
291                               struct btrfs_fs_info *fs_info,
292                               struct btrfs_device *dev,
293                               int is_dev_replace);
294 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
295 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
296                                     struct scrub_page *spage);
297 static void scrub_wr_submit(struct scrub_ctx *sctx);
298 static void scrub_wr_bio_end_io(struct bio *bio, int err);
299 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
300 static int write_page_nocow(struct scrub_ctx *sctx,
301                             u64 physical_for_dev_replace, struct page *page);
302 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
303                                       struct scrub_copy_nocow_ctx *ctx);
304 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
305                             int mirror_num, u64 physical_for_dev_replace);
306 static void copy_nocow_pages_worker(struct btrfs_work *work);
307 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
308 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
309 static void scrub_put_ctx(struct scrub_ctx *sctx);
310
311
312 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
313 {
314         atomic_inc(&sctx->refs);
315         atomic_inc(&sctx->bios_in_flight);
316 }
317
318 static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
319 {
320         atomic_dec(&sctx->bios_in_flight);
321         wake_up(&sctx->list_wait);
322         scrub_put_ctx(sctx);
323 }
324
325 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
326 {
327         while (atomic_read(&fs_info->scrub_pause_req)) {
328                 mutex_unlock(&fs_info->scrub_lock);
329                 wait_event(fs_info->scrub_pause_wait,
330                    atomic_read(&fs_info->scrub_pause_req) == 0);
331                 mutex_lock(&fs_info->scrub_lock);
332         }
333 }
334
335 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
336 {
337         atomic_inc(&fs_info->scrubs_paused);
338         wake_up(&fs_info->scrub_pause_wait);
339
340         mutex_lock(&fs_info->scrub_lock);
341         __scrub_blocked_if_needed(fs_info);
342         atomic_dec(&fs_info->scrubs_paused);
343         mutex_unlock(&fs_info->scrub_lock);
344
345         wake_up(&fs_info->scrub_pause_wait);
346 }
347
348 /*
349  * used for workers that require transaction commits (i.e., for the
350  * NOCOW case)
351  */
352 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
353 {
354         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
355
356         atomic_inc(&sctx->refs);
357         /*
358          * increment scrubs_running to prevent cancel requests from
359          * completing as long as a worker is running. we must also
360          * increment scrubs_paused to prevent deadlocking on pause
361          * requests used for transactions commits (as the worker uses a
362          * transaction context). it is safe to regard the worker
363          * as paused for all matters practical. effectively, we only
364          * avoid cancellation requests from completing.
365          */
366         mutex_lock(&fs_info->scrub_lock);
367         atomic_inc(&fs_info->scrubs_running);
368         atomic_inc(&fs_info->scrubs_paused);
369         mutex_unlock(&fs_info->scrub_lock);
370
371         /*
372          * check if @scrubs_running=@scrubs_paused condition
373          * inside wait_event() is not an atomic operation.
374          * which means we may inc/dec @scrub_running/paused
375          * at any time. Let's wake up @scrub_pause_wait as
376          * much as we can to let commit transaction blocked less.
377          */
378         wake_up(&fs_info->scrub_pause_wait);
379
380         atomic_inc(&sctx->workers_pending);
381 }
382
383 /* used for workers that require transaction commits */
384 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
385 {
386         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
387
388         /*
389          * see scrub_pending_trans_workers_inc() why we're pretending
390          * to be paused in the scrub counters
391          */
392         mutex_lock(&fs_info->scrub_lock);
393         atomic_dec(&fs_info->scrubs_running);
394         atomic_dec(&fs_info->scrubs_paused);
395         mutex_unlock(&fs_info->scrub_lock);
396         atomic_dec(&sctx->workers_pending);
397         wake_up(&fs_info->scrub_pause_wait);
398         wake_up(&sctx->list_wait);
399         scrub_put_ctx(sctx);
400 }
401
402 static void scrub_free_csums(struct scrub_ctx *sctx)
403 {
404         while (!list_empty(&sctx->csum_list)) {
405                 struct btrfs_ordered_sum *sum;
406                 sum = list_first_entry(&sctx->csum_list,
407                                        struct btrfs_ordered_sum, list);
408                 list_del(&sum->list);
409                 kfree(sum);
410         }
411 }
412
413 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
414 {
415         int i;
416
417         if (!sctx)
418                 return;
419
420         scrub_free_wr_ctx(&sctx->wr_ctx);
421
422         /* this can happen when scrub is cancelled */
423         if (sctx->curr != -1) {
424                 struct scrub_bio *sbio = sctx->bios[sctx->curr];
425
426                 for (i = 0; i < sbio->page_count; i++) {
427                         WARN_ON(!sbio->pagev[i]->page);
428                         scrub_block_put(sbio->pagev[i]->sblock);
429                 }
430                 bio_put(sbio->bio);
431         }
432
433         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
434                 struct scrub_bio *sbio = sctx->bios[i];
435
436                 if (!sbio)
437                         break;
438                 kfree(sbio);
439         }
440
441         scrub_free_csums(sctx);
442         kfree(sctx);
443 }
444
445 static void scrub_put_ctx(struct scrub_ctx *sctx)
446 {
447         if (atomic_dec_and_test(&sctx->refs))
448                 scrub_free_ctx(sctx);
449 }
450
451 static noinline_for_stack
452 struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
453 {
454         struct scrub_ctx *sctx;
455         int             i;
456         struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
457         int pages_per_rd_bio;
458         int ret;
459
460         /*
461          * the setting of pages_per_rd_bio is correct for scrub but might
462          * be wrong for the dev_replace code where we might read from
463          * different devices in the initial huge bios. However, that
464          * code is able to correctly handle the case when adding a page
465          * to a bio fails.
466          */
467         if (dev->bdev)
468                 pages_per_rd_bio = min_t(int, SCRUB_PAGES_PER_RD_BIO,
469                                          bio_get_nr_vecs(dev->bdev));
470         else
471                 pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
472         sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
473         if (!sctx)
474                 goto nomem;
475         atomic_set(&sctx->refs, 1);
476         sctx->is_dev_replace = is_dev_replace;
477         sctx->pages_per_rd_bio = pages_per_rd_bio;
478         sctx->curr = -1;
479         sctx->dev_root = dev->dev_root;
480         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
481                 struct scrub_bio *sbio;
482
483                 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
484                 if (!sbio)
485                         goto nomem;
486                 sctx->bios[i] = sbio;
487
488                 sbio->index = i;
489                 sbio->sctx = sctx;
490                 sbio->page_count = 0;
491                 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
492                                 scrub_bio_end_io_worker, NULL, NULL);
493
494                 if (i != SCRUB_BIOS_PER_SCTX - 1)
495                         sctx->bios[i]->next_free = i + 1;
496                 else
497                         sctx->bios[i]->next_free = -1;
498         }
499         sctx->first_free = 0;
500         sctx->nodesize = dev->dev_root->nodesize;
501         sctx->sectorsize = dev->dev_root->sectorsize;
502         atomic_set(&sctx->bios_in_flight, 0);
503         atomic_set(&sctx->workers_pending, 0);
504         atomic_set(&sctx->cancel_req, 0);
505         sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
506         INIT_LIST_HEAD(&sctx->csum_list);
507
508         spin_lock_init(&sctx->list_lock);
509         spin_lock_init(&sctx->stat_lock);
510         init_waitqueue_head(&sctx->list_wait);
511
512         ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
513                                  fs_info->dev_replace.tgtdev, is_dev_replace);
514         if (ret) {
515                 scrub_free_ctx(sctx);
516                 return ERR_PTR(ret);
517         }
518         return sctx;
519
520 nomem:
521         scrub_free_ctx(sctx);
522         return ERR_PTR(-ENOMEM);
523 }
524
525 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
526                                      void *warn_ctx)
527 {
528         u64 isize;
529         u32 nlink;
530         int ret;
531         int i;
532         struct extent_buffer *eb;
533         struct btrfs_inode_item *inode_item;
534         struct scrub_warning *swarn = warn_ctx;
535         struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
536         struct inode_fs_paths *ipath = NULL;
537         struct btrfs_root *local_root;
538         struct btrfs_key root_key;
539         struct btrfs_key key;
540
541         root_key.objectid = root;
542         root_key.type = BTRFS_ROOT_ITEM_KEY;
543         root_key.offset = (u64)-1;
544         local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
545         if (IS_ERR(local_root)) {
546                 ret = PTR_ERR(local_root);
547                 goto err;
548         }
549
550         /*
551          * this makes the path point to (inum INODE_ITEM ioff)
552          */
553         key.objectid = inum;
554         key.type = BTRFS_INODE_ITEM_KEY;
555         key.offset = 0;
556
557         ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
558         if (ret) {
559                 btrfs_release_path(swarn->path);
560                 goto err;
561         }
562
563         eb = swarn->path->nodes[0];
564         inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
565                                         struct btrfs_inode_item);
566         isize = btrfs_inode_size(eb, inode_item);
567         nlink = btrfs_inode_nlink(eb, inode_item);
568         btrfs_release_path(swarn->path);
569
570         ipath = init_ipath(4096, local_root, swarn->path);
571         if (IS_ERR(ipath)) {
572                 ret = PTR_ERR(ipath);
573                 ipath = NULL;
574                 goto err;
575         }
576         ret = paths_from_inode(inum, ipath);
577
578         if (ret < 0)
579                 goto err;
580
581         /*
582          * we deliberately ignore the bit ipath might have been too small to
583          * hold all of the paths here
584          */
585         for (i = 0; i < ipath->fspath->elem_cnt; ++i)
586                 printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
587                         "%s, sector %llu, root %llu, inode %llu, offset %llu, "
588                         "length %llu, links %u (path: %s)\n", swarn->errstr,
589                         swarn->logical, rcu_str_deref(swarn->dev->name),
590                         (unsigned long long)swarn->sector, root, inum, offset,
591                         min(isize - offset, (u64)PAGE_SIZE), nlink,
592                         (char *)(unsigned long)ipath->fspath->val[i]);
593
594         free_ipath(ipath);
595         return 0;
596
597 err:
598         printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
599                 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
600                 "resolving failed with ret=%d\n", swarn->errstr,
601                 swarn->logical, rcu_str_deref(swarn->dev->name),
602                 (unsigned long long)swarn->sector, root, inum, offset, ret);
603
604         free_ipath(ipath);
605         return 0;
606 }
607
608 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
609 {
610         struct btrfs_device *dev;
611         struct btrfs_fs_info *fs_info;
612         struct btrfs_path *path;
613         struct btrfs_key found_key;
614         struct extent_buffer *eb;
615         struct btrfs_extent_item *ei;
616         struct scrub_warning swarn;
617         unsigned long ptr = 0;
618         u64 extent_item_pos;
619         u64 flags = 0;
620         u64 ref_root;
621         u32 item_size;
622         u8 ref_level;
623         int ret;
624
625         WARN_ON(sblock->page_count < 1);
626         dev = sblock->pagev[0]->dev;
627         fs_info = sblock->sctx->dev_root->fs_info;
628
629         path = btrfs_alloc_path();
630         if (!path)
631                 return;
632
633         swarn.sector = (sblock->pagev[0]->physical) >> 9;
634         swarn.logical = sblock->pagev[0]->logical;
635         swarn.errstr = errstr;
636         swarn.dev = NULL;
637
638         ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
639                                   &flags);
640         if (ret < 0)
641                 goto out;
642
643         extent_item_pos = swarn.logical - found_key.objectid;
644         swarn.extent_item_size = found_key.offset;
645
646         eb = path->nodes[0];
647         ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
648         item_size = btrfs_item_size_nr(eb, path->slots[0]);
649
650         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
651                 do {
652                         ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
653                                                       item_size, &ref_root,
654                                                       &ref_level);
655                         printk_in_rcu(KERN_WARNING
656                                 "BTRFS: %s at logical %llu on dev %s, "
657                                 "sector %llu: metadata %s (level %d) in tree "
658                                 "%llu\n", errstr, swarn.logical,
659                                 rcu_str_deref(dev->name),
660                                 (unsigned long long)swarn.sector,
661                                 ref_level ? "node" : "leaf",
662                                 ret < 0 ? -1 : ref_level,
663                                 ret < 0 ? -1 : ref_root);
664                 } while (ret != 1);
665                 btrfs_release_path(path);
666         } else {
667                 btrfs_release_path(path);
668                 swarn.path = path;
669                 swarn.dev = dev;
670                 iterate_extent_inodes(fs_info, found_key.objectid,
671                                         extent_item_pos, 1,
672                                         scrub_print_warning_inode, &swarn);
673         }
674
675 out:
676         btrfs_free_path(path);
677 }
678
679 static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
680 {
681         struct page *page = NULL;
682         unsigned long index;
683         struct scrub_fixup_nodatasum *fixup = fixup_ctx;
684         int ret;
685         int corrected = 0;
686         struct btrfs_key key;
687         struct inode *inode = NULL;
688         struct btrfs_fs_info *fs_info;
689         u64 end = offset + PAGE_SIZE - 1;
690         struct btrfs_root *local_root;
691         int srcu_index;
692
693         key.objectid = root;
694         key.type = BTRFS_ROOT_ITEM_KEY;
695         key.offset = (u64)-1;
696
697         fs_info = fixup->root->fs_info;
698         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
699
700         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
701         if (IS_ERR(local_root)) {
702                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
703                 return PTR_ERR(local_root);
704         }
705
706         key.type = BTRFS_INODE_ITEM_KEY;
707         key.objectid = inum;
708         key.offset = 0;
709         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
710         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
711         if (IS_ERR(inode))
712                 return PTR_ERR(inode);
713
714         index = offset >> PAGE_CACHE_SHIFT;
715
716         page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
717         if (!page) {
718                 ret = -ENOMEM;
719                 goto out;
720         }
721
722         if (PageUptodate(page)) {
723                 if (PageDirty(page)) {
724                         /*
725                          * we need to write the data to the defect sector. the
726                          * data that was in that sector is not in memory,
727                          * because the page was modified. we must not write the
728                          * modified page to that sector.
729                          *
730                          * TODO: what could be done here: wait for the delalloc
731                          *       runner to write out that page (might involve
732                          *       COW) and see whether the sector is still
733                          *       referenced afterwards.
734                          *
735                          * For the meantime, we'll treat this error
736                          * incorrectable, although there is a chance that a
737                          * later scrub will find the bad sector again and that
738                          * there's no dirty page in memory, then.
739                          */
740                         ret = -EIO;
741                         goto out;
742                 }
743                 ret = repair_io_failure(inode, offset, PAGE_SIZE,
744                                         fixup->logical, page,
745                                         offset - page_offset(page),
746                                         fixup->mirror_num);
747                 unlock_page(page);
748                 corrected = !ret;
749         } else {
750                 /*
751                  * we need to get good data first. the general readpage path
752                  * will call repair_io_failure for us, we just have to make
753                  * sure we read the bad mirror.
754                  */
755                 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
756                                         EXTENT_DAMAGED, GFP_NOFS);
757                 if (ret) {
758                         /* set_extent_bits should give proper error */
759                         WARN_ON(ret > 0);
760                         if (ret > 0)
761                                 ret = -EFAULT;
762                         goto out;
763                 }
764
765                 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
766                                                 btrfs_get_extent,
767                                                 fixup->mirror_num);
768                 wait_on_page_locked(page);
769
770                 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
771                                                 end, EXTENT_DAMAGED, 0, NULL);
772                 if (!corrected)
773                         clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
774                                                 EXTENT_DAMAGED, GFP_NOFS);
775         }
776
777 out:
778         if (page)
779                 put_page(page);
780
781         iput(inode);
782
783         if (ret < 0)
784                 return ret;
785
786         if (ret == 0 && corrected) {
787                 /*
788                  * we only need to call readpage for one of the inodes belonging
789                  * to this extent. so make iterate_extent_inodes stop
790                  */
791                 return 1;
792         }
793
794         return -EIO;
795 }
796
797 static void scrub_fixup_nodatasum(struct btrfs_work *work)
798 {
799         int ret;
800         struct scrub_fixup_nodatasum *fixup;
801         struct scrub_ctx *sctx;
802         struct btrfs_trans_handle *trans = NULL;
803         struct btrfs_path *path;
804         int uncorrectable = 0;
805
806         fixup = container_of(work, struct scrub_fixup_nodatasum, work);
807         sctx = fixup->sctx;
808
809         path = btrfs_alloc_path();
810         if (!path) {
811                 spin_lock(&sctx->stat_lock);
812                 ++sctx->stat.malloc_errors;
813                 spin_unlock(&sctx->stat_lock);
814                 uncorrectable = 1;
815                 goto out;
816         }
817
818         trans = btrfs_join_transaction(fixup->root);
819         if (IS_ERR(trans)) {
820                 uncorrectable = 1;
821                 goto out;
822         }
823
824         /*
825          * the idea is to trigger a regular read through the standard path. we
826          * read a page from the (failed) logical address by specifying the
827          * corresponding copynum of the failed sector. thus, that readpage is
828          * expected to fail.
829          * that is the point where on-the-fly error correction will kick in
830          * (once it's finished) and rewrite the failed sector if a good copy
831          * can be found.
832          */
833         ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
834                                                 path, scrub_fixup_readpage,
835                                                 fixup);
836         if (ret < 0) {
837                 uncorrectable = 1;
838                 goto out;
839         }
840         WARN_ON(ret != 1);
841
842         spin_lock(&sctx->stat_lock);
843         ++sctx->stat.corrected_errors;
844         spin_unlock(&sctx->stat_lock);
845
846 out:
847         if (trans && !IS_ERR(trans))
848                 btrfs_end_transaction(trans, fixup->root);
849         if (uncorrectable) {
850                 spin_lock(&sctx->stat_lock);
851                 ++sctx->stat.uncorrectable_errors;
852                 spin_unlock(&sctx->stat_lock);
853                 btrfs_dev_replace_stats_inc(
854                         &sctx->dev_root->fs_info->dev_replace.
855                         num_uncorrectable_read_errors);
856                 printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
857                     "unable to fixup (nodatasum) error at logical %llu on dev %s\n",
858                         fixup->logical, rcu_str_deref(fixup->dev->name));
859         }
860
861         btrfs_free_path(path);
862         kfree(fixup);
863
864         scrub_pending_trans_workers_dec(sctx);
865 }
866
867 static inline void scrub_get_recover(struct scrub_recover *recover)
868 {
869         atomic_inc(&recover->refs);
870 }
871
872 static inline void scrub_put_recover(struct scrub_recover *recover)
873 {
874         if (atomic_dec_and_test(&recover->refs)) {
875                 btrfs_put_bbio(recover->bbio);
876                 kfree(recover);
877         }
878 }
879
880 /*
881  * scrub_handle_errored_block gets called when either verification of the
882  * pages failed or the bio failed to read, e.g. with EIO. In the latter
883  * case, this function handles all pages in the bio, even though only one
884  * may be bad.
885  * The goal of this function is to repair the errored block by using the
886  * contents of one of the mirrors.
887  */
888 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
889 {
890         struct scrub_ctx *sctx = sblock_to_check->sctx;
891         struct btrfs_device *dev;
892         struct btrfs_fs_info *fs_info;
893         u64 length;
894         u64 logical;
895         u64 generation;
896         unsigned int failed_mirror_index;
897         unsigned int is_metadata;
898         unsigned int have_csum;
899         u8 *csum;
900         struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
901         struct scrub_block *sblock_bad;
902         int ret;
903         int mirror_index;
904         int page_num;
905         int success;
906         static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
907                                       DEFAULT_RATELIMIT_BURST);
908
909         BUG_ON(sblock_to_check->page_count < 1);
910         fs_info = sctx->dev_root->fs_info;
911         if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
912                 /*
913                  * if we find an error in a super block, we just report it.
914                  * They will get written with the next transaction commit
915                  * anyway
916                  */
917                 spin_lock(&sctx->stat_lock);
918                 ++sctx->stat.super_errors;
919                 spin_unlock(&sctx->stat_lock);
920                 return 0;
921         }
922         length = sblock_to_check->page_count * PAGE_SIZE;
923         logical = sblock_to_check->pagev[0]->logical;
924         generation = sblock_to_check->pagev[0]->generation;
925         BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
926         failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
927         is_metadata = !(sblock_to_check->pagev[0]->flags &
928                         BTRFS_EXTENT_FLAG_DATA);
929         have_csum = sblock_to_check->pagev[0]->have_csum;
930         csum = sblock_to_check->pagev[0]->csum;
931         dev = sblock_to_check->pagev[0]->dev;
932
933         if (sctx->is_dev_replace && !is_metadata && !have_csum) {
934                 sblocks_for_recheck = NULL;
935                 goto nodatasum_case;
936         }
937
938         /*
939          * read all mirrors one after the other. This includes to
940          * re-read the extent or metadata block that failed (that was
941          * the cause that this fixup code is called) another time,
942          * page by page this time in order to know which pages
943          * caused I/O errors and which ones are good (for all mirrors).
944          * It is the goal to handle the situation when more than one
945          * mirror contains I/O errors, but the errors do not
946          * overlap, i.e. the data can be repaired by selecting the
947          * pages from those mirrors without I/O error on the
948          * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
949          * would be that mirror #1 has an I/O error on the first page,
950          * the second page is good, and mirror #2 has an I/O error on
951          * the second page, but the first page is good.
952          * Then the first page of the first mirror can be repaired by
953          * taking the first page of the second mirror, and the
954          * second page of the second mirror can be repaired by
955          * copying the contents of the 2nd page of the 1st mirror.
956          * One more note: if the pages of one mirror contain I/O
957          * errors, the checksum cannot be verified. In order to get
958          * the best data for repairing, the first attempt is to find
959          * a mirror without I/O errors and with a validated checksum.
960          * Only if this is not possible, the pages are picked from
961          * mirrors with I/O errors without considering the checksum.
962          * If the latter is the case, at the end, the checksum of the
963          * repaired area is verified in order to correctly maintain
964          * the statistics.
965          */
966
967         sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
968                                       sizeof(*sblocks_for_recheck), GFP_NOFS);
969         if (!sblocks_for_recheck) {
970                 spin_lock(&sctx->stat_lock);
971                 sctx->stat.malloc_errors++;
972                 sctx->stat.read_errors++;
973                 sctx->stat.uncorrectable_errors++;
974                 spin_unlock(&sctx->stat_lock);
975                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
976                 goto out;
977         }
978
979         /* setup the context, map the logical blocks and alloc the pages */
980         ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
981         if (ret) {
982                 spin_lock(&sctx->stat_lock);
983                 sctx->stat.read_errors++;
984                 sctx->stat.uncorrectable_errors++;
985                 spin_unlock(&sctx->stat_lock);
986                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
987                 goto out;
988         }
989         BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
990         sblock_bad = sblocks_for_recheck + failed_mirror_index;
991
992         /* build and submit the bios for the failed mirror, check checksums */
993         scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
994                             csum, generation, sctx->csum_size, 1);
995
996         if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
997             sblock_bad->no_io_error_seen) {
998                 /*
999                  * the error disappeared after reading page by page, or
1000                  * the area was part of a huge bio and other parts of the
1001                  * bio caused I/O errors, or the block layer merged several
1002                  * read requests into one and the error is caused by a
1003                  * different bio (usually one of the two latter cases is
1004                  * the cause)
1005                  */
1006                 spin_lock(&sctx->stat_lock);
1007                 sctx->stat.unverified_errors++;
1008                 sblock_to_check->data_corrected = 1;
1009                 spin_unlock(&sctx->stat_lock);
1010
1011                 if (sctx->is_dev_replace)
1012                         scrub_write_block_to_dev_replace(sblock_bad);
1013                 goto out;
1014         }
1015
1016         if (!sblock_bad->no_io_error_seen) {
1017                 spin_lock(&sctx->stat_lock);
1018                 sctx->stat.read_errors++;
1019                 spin_unlock(&sctx->stat_lock);
1020                 if (__ratelimit(&_rs))
1021                         scrub_print_warning("i/o error", sblock_to_check);
1022                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1023         } else if (sblock_bad->checksum_error) {
1024                 spin_lock(&sctx->stat_lock);
1025                 sctx->stat.csum_errors++;
1026                 spin_unlock(&sctx->stat_lock);
1027                 if (__ratelimit(&_rs))
1028                         scrub_print_warning("checksum error", sblock_to_check);
1029                 btrfs_dev_stat_inc_and_print(dev,
1030                                              BTRFS_DEV_STAT_CORRUPTION_ERRS);
1031         } else if (sblock_bad->header_error) {
1032                 spin_lock(&sctx->stat_lock);
1033                 sctx->stat.verify_errors++;
1034                 spin_unlock(&sctx->stat_lock);
1035                 if (__ratelimit(&_rs))
1036                         scrub_print_warning("checksum/header error",
1037                                             sblock_to_check);
1038                 if (sblock_bad->generation_error)
1039                         btrfs_dev_stat_inc_and_print(dev,
1040                                 BTRFS_DEV_STAT_GENERATION_ERRS);
1041                 else
1042                         btrfs_dev_stat_inc_and_print(dev,
1043                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1044         }
1045
1046         if (sctx->readonly) {
1047                 ASSERT(!sctx->is_dev_replace);
1048                 goto out;
1049         }
1050
1051         if (!is_metadata && !have_csum) {
1052                 struct scrub_fixup_nodatasum *fixup_nodatasum;
1053
1054                 WARN_ON(sctx->is_dev_replace);
1055
1056 nodatasum_case:
1057
1058                 /*
1059                  * !is_metadata and !have_csum, this means that the data
1060                  * might not be COW'ed, that it might be modified
1061                  * concurrently. The general strategy to work on the
1062                  * commit root does not help in the case when COW is not
1063                  * used.
1064                  */
1065                 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1066                 if (!fixup_nodatasum)
1067                         goto did_not_correct_error;
1068                 fixup_nodatasum->sctx = sctx;
1069                 fixup_nodatasum->dev = dev;
1070                 fixup_nodatasum->logical = logical;
1071                 fixup_nodatasum->root = fs_info->extent_root;
1072                 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
1073                 scrub_pending_trans_workers_inc(sctx);
1074                 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1075                                 scrub_fixup_nodatasum, NULL, NULL);
1076                 btrfs_queue_work(fs_info->scrub_workers,
1077                                  &fixup_nodatasum->work);
1078                 goto out;
1079         }
1080
1081         /*
1082          * now build and submit the bios for the other mirrors, check
1083          * checksums.
1084          * First try to pick the mirror which is completely without I/O
1085          * errors and also does not have a checksum error.
1086          * If one is found, and if a checksum is present, the full block
1087          * that is known to contain an error is rewritten. Afterwards
1088          * the block is known to be corrected.
1089          * If a mirror is found which is completely correct, and no
1090          * checksum is present, only those pages are rewritten that had
1091          * an I/O error in the block to be repaired, since it cannot be
1092          * determined, which copy of the other pages is better (and it
1093          * could happen otherwise that a correct page would be
1094          * overwritten by a bad one).
1095          */
1096         for (mirror_index = 0;
1097              mirror_index < BTRFS_MAX_MIRRORS &&
1098              sblocks_for_recheck[mirror_index].page_count > 0;
1099              mirror_index++) {
1100                 struct scrub_block *sblock_other;
1101
1102                 if (mirror_index == failed_mirror_index)
1103                         continue;
1104                 sblock_other = sblocks_for_recheck + mirror_index;
1105
1106                 /* build and submit the bios, check checksums */
1107                 scrub_recheck_block(fs_info, sblock_other, is_metadata,
1108                                     have_csum, csum, generation,
1109                                     sctx->csum_size, 0);
1110
1111                 if (!sblock_other->header_error &&
1112                     !sblock_other->checksum_error &&
1113                     sblock_other->no_io_error_seen) {
1114                         if (sctx->is_dev_replace) {
1115                                 scrub_write_block_to_dev_replace(sblock_other);
1116                                 goto corrected_error;
1117                         } else {
1118                                 ret = scrub_repair_block_from_good_copy(
1119                                                 sblock_bad, sblock_other);
1120                                 if (!ret)
1121                                         goto corrected_error;
1122                         }
1123                 }
1124         }
1125
1126         if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1127                 goto did_not_correct_error;
1128
1129         /*
1130          * In case of I/O errors in the area that is supposed to be
1131          * repaired, continue by picking good copies of those pages.
1132          * Select the good pages from mirrors to rewrite bad pages from
1133          * the area to fix. Afterwards verify the checksum of the block
1134          * that is supposed to be repaired. This verification step is
1135          * only done for the purpose of statistic counting and for the
1136          * final scrub report, whether errors remain.
1137          * A perfect algorithm could make use of the checksum and try
1138          * all possible combinations of pages from the different mirrors
1139          * until the checksum verification succeeds. For example, when
1140          * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1141          * of mirror #2 is readable but the final checksum test fails,
1142          * then the 2nd page of mirror #3 could be tried, whether now
1143          * the final checksum succeedes. But this would be a rare
1144          * exception and is therefore not implemented. At least it is
1145          * avoided that the good copy is overwritten.
1146          * A more useful improvement would be to pick the sectors
1147          * without I/O error based on sector sizes (512 bytes on legacy
1148          * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1149          * mirror could be repaired by taking 512 byte of a different
1150          * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1151          * area are unreadable.
1152          */
1153         success = 1;
1154         for (page_num = 0; page_num < sblock_bad->page_count;
1155              page_num++) {
1156                 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1157                 struct scrub_block *sblock_other = NULL;
1158
1159                 /* skip no-io-error page in scrub */
1160                 if (!page_bad->io_error && !sctx->is_dev_replace)
1161                         continue;
1162
1163                 /* try to find no-io-error page in mirrors */
1164                 if (page_bad->io_error) {
1165                         for (mirror_index = 0;
1166                              mirror_index < BTRFS_MAX_MIRRORS &&
1167                              sblocks_for_recheck[mirror_index].page_count > 0;
1168                              mirror_index++) {
1169                                 if (!sblocks_for_recheck[mirror_index].
1170                                     pagev[page_num]->io_error) {
1171                                         sblock_other = sblocks_for_recheck +
1172                                                        mirror_index;
1173                                         break;
1174                                 }
1175                         }
1176                         if (!sblock_other)
1177                                 success = 0;
1178                 }
1179
1180                 if (sctx->is_dev_replace) {
1181                         /*
1182                          * did not find a mirror to fetch the page
1183                          * from. scrub_write_page_to_dev_replace()
1184                          * handles this case (page->io_error), by
1185                          * filling the block with zeros before
1186                          * submitting the write request
1187                          */
1188                         if (!sblock_other)
1189                                 sblock_other = sblock_bad;
1190
1191                         if (scrub_write_page_to_dev_replace(sblock_other,
1192                                                             page_num) != 0) {
1193                                 btrfs_dev_replace_stats_inc(
1194                                         &sctx->dev_root->
1195                                         fs_info->dev_replace.
1196                                         num_write_errors);
1197                                 success = 0;
1198                         }
1199                 } else if (sblock_other) {
1200                         ret = scrub_repair_page_from_good_copy(sblock_bad,
1201                                                                sblock_other,
1202                                                                page_num, 0);
1203                         if (0 == ret)
1204                                 page_bad->io_error = 0;
1205                         else
1206                                 success = 0;
1207                 }
1208         }
1209
1210         if (success && !sctx->is_dev_replace) {
1211                 if (is_metadata || have_csum) {
1212                         /*
1213                          * need to verify the checksum now that all
1214                          * sectors on disk are repaired (the write
1215                          * request for data to be repaired is on its way).
1216                          * Just be lazy and use scrub_recheck_block()
1217                          * which re-reads the data before the checksum
1218                          * is verified, but most likely the data comes out
1219                          * of the page cache.
1220                          */
1221                         scrub_recheck_block(fs_info, sblock_bad,
1222                                             is_metadata, have_csum, csum,
1223                                             generation, sctx->csum_size, 1);
1224                         if (!sblock_bad->header_error &&
1225                             !sblock_bad->checksum_error &&
1226                             sblock_bad->no_io_error_seen)
1227                                 goto corrected_error;
1228                         else
1229                                 goto did_not_correct_error;
1230                 } else {
1231 corrected_error:
1232                         spin_lock(&sctx->stat_lock);
1233                         sctx->stat.corrected_errors++;
1234                         sblock_to_check->data_corrected = 1;
1235                         spin_unlock(&sctx->stat_lock);
1236                         printk_ratelimited_in_rcu(KERN_ERR
1237                                 "BTRFS: fixed up error at logical %llu on dev %s\n",
1238                                 logical, rcu_str_deref(dev->name));
1239                 }
1240         } else {
1241 did_not_correct_error:
1242                 spin_lock(&sctx->stat_lock);
1243                 sctx->stat.uncorrectable_errors++;
1244                 spin_unlock(&sctx->stat_lock);
1245                 printk_ratelimited_in_rcu(KERN_ERR
1246                         "BTRFS: unable to fixup (regular) error at logical %llu on dev %s\n",
1247                         logical, rcu_str_deref(dev->name));
1248         }
1249
1250 out:
1251         if (sblocks_for_recheck) {
1252                 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1253                      mirror_index++) {
1254                         struct scrub_block *sblock = sblocks_for_recheck +
1255                                                      mirror_index;
1256                         struct scrub_recover *recover;
1257                         int page_index;
1258
1259                         for (page_index = 0; page_index < sblock->page_count;
1260                              page_index++) {
1261                                 sblock->pagev[page_index]->sblock = NULL;
1262                                 recover = sblock->pagev[page_index]->recover;
1263                                 if (recover) {
1264                                         scrub_put_recover(recover);
1265                                         sblock->pagev[page_index]->recover =
1266                                                                         NULL;
1267                                 }
1268                                 scrub_page_put(sblock->pagev[page_index]);
1269                         }
1270                 }
1271                 kfree(sblocks_for_recheck);
1272         }
1273
1274         return 0;
1275 }
1276
1277 static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
1278 {
1279         if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1280                 return 2;
1281         else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1282                 return 3;
1283         else
1284                 return (int)bbio->num_stripes;
1285 }
1286
1287 static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1288                                                  u64 *raid_map,
1289                                                  u64 mapped_length,
1290                                                  int nstripes, int mirror,
1291                                                  int *stripe_index,
1292                                                  u64 *stripe_offset)
1293 {
1294         int i;
1295
1296         if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1297                 /* RAID5/6 */
1298                 for (i = 0; i < nstripes; i++) {
1299                         if (raid_map[i] == RAID6_Q_STRIPE ||
1300                             raid_map[i] == RAID5_P_STRIPE)
1301                                 continue;
1302
1303                         if (logical >= raid_map[i] &&
1304                             logical < raid_map[i] + mapped_length)
1305                                 break;
1306                 }
1307
1308                 *stripe_index = i;
1309                 *stripe_offset = logical - raid_map[i];
1310         } else {
1311                 /* The other RAID type */
1312                 *stripe_index = mirror;
1313                 *stripe_offset = 0;
1314         }
1315 }
1316
1317 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1318                                      struct scrub_block *sblocks_for_recheck)
1319 {
1320         struct scrub_ctx *sctx = original_sblock->sctx;
1321         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
1322         u64 length = original_sblock->page_count * PAGE_SIZE;
1323         u64 logical = original_sblock->pagev[0]->logical;
1324         struct scrub_recover *recover;
1325         struct btrfs_bio *bbio;
1326         u64 sublen;
1327         u64 mapped_length;
1328         u64 stripe_offset;
1329         int stripe_index;
1330         int page_index = 0;
1331         int mirror_index;
1332         int nmirrors;
1333         int ret;
1334
1335         /*
1336          * note: the two members refs and outstanding_pages
1337          * are not used (and not set) in the blocks that are used for
1338          * the recheck procedure
1339          */
1340
1341         while (length > 0) {
1342                 sublen = min_t(u64, length, PAGE_SIZE);
1343                 mapped_length = sublen;
1344                 bbio = NULL;
1345
1346                 /*
1347                  * with a length of PAGE_SIZE, each returned stripe
1348                  * represents one mirror
1349                  */
1350                 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
1351                                        &mapped_length, &bbio, 0, 1);
1352                 if (ret || !bbio || mapped_length < sublen) {
1353                         btrfs_put_bbio(bbio);
1354                         return -EIO;
1355                 }
1356
1357                 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1358                 if (!recover) {
1359                         btrfs_put_bbio(bbio);
1360                         return -ENOMEM;
1361                 }
1362
1363                 atomic_set(&recover->refs, 1);
1364                 recover->bbio = bbio;
1365                 recover->map_length = mapped_length;
1366
1367                 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
1368
1369                 nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
1370
1371                 for (mirror_index = 0; mirror_index < nmirrors;
1372                      mirror_index++) {
1373                         struct scrub_block *sblock;
1374                         struct scrub_page *page;
1375
1376                         sblock = sblocks_for_recheck + mirror_index;
1377                         sblock->sctx = sctx;
1378                         page = kzalloc(sizeof(*page), GFP_NOFS);
1379                         if (!page) {
1380 leave_nomem:
1381                                 spin_lock(&sctx->stat_lock);
1382                                 sctx->stat.malloc_errors++;
1383                                 spin_unlock(&sctx->stat_lock);
1384                                 scrub_put_recover(recover);
1385                                 return -ENOMEM;
1386                         }
1387                         scrub_page_get(page);
1388                         sblock->pagev[page_index] = page;
1389                         page->logical = logical;
1390
1391                         scrub_stripe_index_and_offset(logical,
1392                                                       bbio->map_type,
1393                                                       bbio->raid_map,
1394                                                       mapped_length,
1395                                                       bbio->num_stripes -
1396                                                       bbio->num_tgtdevs,
1397                                                       mirror_index,
1398                                                       &stripe_index,
1399                                                       &stripe_offset);
1400                         page->physical = bbio->stripes[stripe_index].physical +
1401                                          stripe_offset;
1402                         page->dev = bbio->stripes[stripe_index].dev;
1403
1404                         BUG_ON(page_index >= original_sblock->page_count);
1405                         page->physical_for_dev_replace =
1406                                 original_sblock->pagev[page_index]->
1407                                 physical_for_dev_replace;
1408                         /* for missing devices, dev->bdev is NULL */
1409                         page->mirror_num = mirror_index + 1;
1410                         sblock->page_count++;
1411                         page->page = alloc_page(GFP_NOFS);
1412                         if (!page->page)
1413                                 goto leave_nomem;
1414
1415                         scrub_get_recover(recover);
1416                         page->recover = recover;
1417                 }
1418                 scrub_put_recover(recover);
1419                 length -= sublen;
1420                 logical += sublen;
1421                 page_index++;
1422         }
1423
1424         return 0;
1425 }
1426
1427 struct scrub_bio_ret {
1428         struct completion event;
1429         int error;
1430 };
1431
1432 static void scrub_bio_wait_endio(struct bio *bio, int error)
1433 {
1434         struct scrub_bio_ret *ret = bio->bi_private;
1435
1436         ret->error = error;
1437         complete(&ret->event);
1438 }
1439
1440 static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1441 {
1442         return page->recover &&
1443                (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
1444 }
1445
1446 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1447                                         struct bio *bio,
1448                                         struct scrub_page *page)
1449 {
1450         struct scrub_bio_ret done;
1451         int ret;
1452
1453         init_completion(&done.event);
1454         done.error = 0;
1455         bio->bi_iter.bi_sector = page->logical >> 9;
1456         bio->bi_private = &done;
1457         bio->bi_end_io = scrub_bio_wait_endio;
1458
1459         ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
1460                                     page->recover->map_length,
1461                                     page->mirror_num, 0);
1462         if (ret)
1463                 return ret;
1464
1465         wait_for_completion(&done.event);
1466         if (done.error)
1467                 return -EIO;
1468
1469         return 0;
1470 }
1471
1472 /*
1473  * this function will check the on disk data for checksum errors, header
1474  * errors and read I/O errors. If any I/O errors happen, the exact pages
1475  * which are errored are marked as being bad. The goal is to enable scrub
1476  * to take those pages that are not errored from all the mirrors so that
1477  * the pages that are errored in the just handled mirror can be repaired.
1478  */
1479 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1480                                 struct scrub_block *sblock, int is_metadata,
1481                                 int have_csum, u8 *csum, u64 generation,
1482                                 u16 csum_size, int retry_failed_mirror)
1483 {
1484         int page_num;
1485
1486         sblock->no_io_error_seen = 1;
1487         sblock->header_error = 0;
1488         sblock->checksum_error = 0;
1489
1490         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1491                 struct bio *bio;
1492                 struct scrub_page *page = sblock->pagev[page_num];
1493
1494                 if (page->dev->bdev == NULL) {
1495                         page->io_error = 1;
1496                         sblock->no_io_error_seen = 0;
1497                         continue;
1498                 }
1499
1500                 WARN_ON(!page->page);
1501                 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1502                 if (!bio) {
1503                         page->io_error = 1;
1504                         sblock->no_io_error_seen = 0;
1505                         continue;
1506                 }
1507                 bio->bi_bdev = page->dev->bdev;
1508
1509                 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1510                 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1511                         if (scrub_submit_raid56_bio_wait(fs_info, bio, page))
1512                                 sblock->no_io_error_seen = 0;
1513                 } else {
1514                         bio->bi_iter.bi_sector = page->physical >> 9;
1515
1516                         if (btrfsic_submit_bio_wait(READ, bio))
1517                                 sblock->no_io_error_seen = 0;
1518                 }
1519
1520                 bio_put(bio);
1521         }
1522
1523         if (sblock->no_io_error_seen)
1524                 scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1525                                              have_csum, csum, generation,
1526                                              csum_size);
1527
1528         return;
1529 }
1530
1531 static inline int scrub_check_fsid(u8 fsid[],
1532                                    struct scrub_page *spage)
1533 {
1534         struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1535         int ret;
1536
1537         ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
1538         return !ret;
1539 }
1540
1541 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1542                                          struct scrub_block *sblock,
1543                                          int is_metadata, int have_csum,
1544                                          const u8 *csum, u64 generation,
1545                                          u16 csum_size)
1546 {
1547         int page_num;
1548         u8 calculated_csum[BTRFS_CSUM_SIZE];
1549         u32 crc = ~(u32)0;
1550         void *mapped_buffer;
1551
1552         WARN_ON(!sblock->pagev[0]->page);
1553         if (is_metadata) {
1554                 struct btrfs_header *h;
1555
1556                 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
1557                 h = (struct btrfs_header *)mapped_buffer;
1558
1559                 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h) ||
1560                     !scrub_check_fsid(h->fsid, sblock->pagev[0]) ||
1561                     memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1562                            BTRFS_UUID_SIZE)) {
1563                         sblock->header_error = 1;
1564                 } else if (generation != btrfs_stack_header_generation(h)) {
1565                         sblock->header_error = 1;
1566                         sblock->generation_error = 1;
1567                 }
1568                 csum = h->csum;
1569         } else {
1570                 if (!have_csum)
1571                         return;
1572
1573                 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
1574         }
1575
1576         for (page_num = 0;;) {
1577                 if (page_num == 0 && is_metadata)
1578                         crc = btrfs_csum_data(
1579                                 ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1580                                 crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1581                 else
1582                         crc = btrfs_csum_data(mapped_buffer, crc, PAGE_SIZE);
1583
1584                 kunmap_atomic(mapped_buffer);
1585                 page_num++;
1586                 if (page_num >= sblock->page_count)
1587                         break;
1588                 WARN_ON(!sblock->pagev[page_num]->page);
1589
1590                 mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page);
1591         }
1592
1593         btrfs_csum_final(crc, calculated_csum);
1594         if (memcmp(calculated_csum, csum, csum_size))
1595                 sblock->checksum_error = 1;
1596 }
1597
1598 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1599                                              struct scrub_block *sblock_good)
1600 {
1601         int page_num;
1602         int ret = 0;
1603
1604         for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1605                 int ret_sub;
1606
1607                 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1608                                                            sblock_good,
1609                                                            page_num, 1);
1610                 if (ret_sub)
1611                         ret = ret_sub;
1612         }
1613
1614         return ret;
1615 }
1616
1617 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1618                                             struct scrub_block *sblock_good,
1619                                             int page_num, int force_write)
1620 {
1621         struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1622         struct scrub_page *page_good = sblock_good->pagev[page_num];
1623
1624         BUG_ON(page_bad->page == NULL);
1625         BUG_ON(page_good->page == NULL);
1626         if (force_write || sblock_bad->header_error ||
1627             sblock_bad->checksum_error || page_bad->io_error) {
1628                 struct bio *bio;
1629                 int ret;
1630
1631                 if (!page_bad->dev->bdev) {
1632                         printk_ratelimited(KERN_WARNING "BTRFS: "
1633                                 "scrub_repair_page_from_good_copy(bdev == NULL) "
1634                                 "is unexpected!\n");
1635                         return -EIO;
1636                 }
1637
1638                 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1639                 if (!bio)
1640                         return -EIO;
1641                 bio->bi_bdev = page_bad->dev->bdev;
1642                 bio->bi_iter.bi_sector = page_bad->physical >> 9;
1643
1644                 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1645                 if (PAGE_SIZE != ret) {
1646                         bio_put(bio);
1647                         return -EIO;
1648                 }
1649
1650                 if (btrfsic_submit_bio_wait(WRITE, bio)) {
1651                         btrfs_dev_stat_inc_and_print(page_bad->dev,
1652                                 BTRFS_DEV_STAT_WRITE_ERRS);
1653                         btrfs_dev_replace_stats_inc(
1654                                 &sblock_bad->sctx->dev_root->fs_info->
1655                                 dev_replace.num_write_errors);
1656                         bio_put(bio);
1657                         return -EIO;
1658                 }
1659                 bio_put(bio);
1660         }
1661
1662         return 0;
1663 }
1664
1665 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1666 {
1667         int page_num;
1668
1669         /*
1670          * This block is used for the check of the parity on the source device,
1671          * so the data needn't be written into the destination device.
1672          */
1673         if (sblock->sparity)
1674                 return;
1675
1676         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1677                 int ret;
1678
1679                 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1680                 if (ret)
1681                         btrfs_dev_replace_stats_inc(
1682                                 &sblock->sctx->dev_root->fs_info->dev_replace.
1683                                 num_write_errors);
1684         }
1685 }
1686
1687 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1688                                            int page_num)
1689 {
1690         struct scrub_page *spage = sblock->pagev[page_num];
1691
1692         BUG_ON(spage->page == NULL);
1693         if (spage->io_error) {
1694                 void *mapped_buffer = kmap_atomic(spage->page);
1695
1696                 memset(mapped_buffer, 0, PAGE_CACHE_SIZE);
1697                 flush_dcache_page(spage->page);
1698                 kunmap_atomic(mapped_buffer);
1699         }
1700         return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1701 }
1702
1703 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1704                                     struct scrub_page *spage)
1705 {
1706         struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1707         struct scrub_bio *sbio;
1708         int ret;
1709
1710         mutex_lock(&wr_ctx->wr_lock);
1711 again:
1712         if (!wr_ctx->wr_curr_bio) {
1713                 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1714                                               GFP_NOFS);
1715                 if (!wr_ctx->wr_curr_bio) {
1716                         mutex_unlock(&wr_ctx->wr_lock);
1717                         return -ENOMEM;
1718                 }
1719                 wr_ctx->wr_curr_bio->sctx = sctx;
1720                 wr_ctx->wr_curr_bio->page_count = 0;
1721         }
1722         sbio = wr_ctx->wr_curr_bio;
1723         if (sbio->page_count == 0) {
1724                 struct bio *bio;
1725
1726                 sbio->physical = spage->physical_for_dev_replace;
1727                 sbio->logical = spage->logical;
1728                 sbio->dev = wr_ctx->tgtdev;
1729                 bio = sbio->bio;
1730                 if (!bio) {
1731                         bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
1732                         if (!bio) {
1733                                 mutex_unlock(&wr_ctx->wr_lock);
1734                                 return -ENOMEM;
1735                         }
1736                         sbio->bio = bio;
1737                 }
1738
1739                 bio->bi_private = sbio;
1740                 bio->bi_end_io = scrub_wr_bio_end_io;
1741                 bio->bi_bdev = sbio->dev->bdev;
1742                 bio->bi_iter.bi_sector = sbio->physical >> 9;
1743                 sbio->err = 0;
1744         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1745                    spage->physical_for_dev_replace ||
1746                    sbio->logical + sbio->page_count * PAGE_SIZE !=
1747                    spage->logical) {
1748                 scrub_wr_submit(sctx);
1749                 goto again;
1750         }
1751
1752         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1753         if (ret != PAGE_SIZE) {
1754                 if (sbio->page_count < 1) {
1755                         bio_put(sbio->bio);
1756                         sbio->bio = NULL;
1757                         mutex_unlock(&wr_ctx->wr_lock);
1758                         return -EIO;
1759                 }
1760                 scrub_wr_submit(sctx);
1761                 goto again;
1762         }
1763
1764         sbio->pagev[sbio->page_count] = spage;
1765         scrub_page_get(spage);
1766         sbio->page_count++;
1767         if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1768                 scrub_wr_submit(sctx);
1769         mutex_unlock(&wr_ctx->wr_lock);
1770
1771         return 0;
1772 }
1773
1774 static void scrub_wr_submit(struct scrub_ctx *sctx)
1775 {
1776         struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1777         struct scrub_bio *sbio;
1778
1779         if (!wr_ctx->wr_curr_bio)
1780                 return;
1781
1782         sbio = wr_ctx->wr_curr_bio;
1783         wr_ctx->wr_curr_bio = NULL;
1784         WARN_ON(!sbio->bio->bi_bdev);
1785         scrub_pending_bio_inc(sctx);
1786         /* process all writes in a single worker thread. Then the block layer
1787          * orders the requests before sending them to the driver which
1788          * doubled the write performance on spinning disks when measured
1789          * with Linux 3.5 */
1790         btrfsic_submit_bio(WRITE, sbio->bio);
1791 }
1792
1793 static void scrub_wr_bio_end_io(struct bio *bio, int err)
1794 {
1795         struct scrub_bio *sbio = bio->bi_private;
1796         struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
1797
1798         sbio->err = err;
1799         sbio->bio = bio;
1800
1801         btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1802                          scrub_wr_bio_end_io_worker, NULL, NULL);
1803         btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1804 }
1805
1806 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1807 {
1808         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1809         struct scrub_ctx *sctx = sbio->sctx;
1810         int i;
1811
1812         WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1813         if (sbio->err) {
1814                 struct btrfs_dev_replace *dev_replace =
1815                         &sbio->sctx->dev_root->fs_info->dev_replace;
1816
1817                 for (i = 0; i < sbio->page_count; i++) {
1818                         struct scrub_page *spage = sbio->pagev[i];
1819
1820                         spage->io_error = 1;
1821                         btrfs_dev_replace_stats_inc(&dev_replace->
1822                                                     num_write_errors);
1823                 }
1824         }
1825
1826         for (i = 0; i < sbio->page_count; i++)
1827                 scrub_page_put(sbio->pagev[i]);
1828
1829         bio_put(sbio->bio);
1830         kfree(sbio);
1831         scrub_pending_bio_dec(sctx);
1832 }
1833
1834 static int scrub_checksum(struct scrub_block *sblock)
1835 {
1836         u64 flags;
1837         int ret;
1838
1839         WARN_ON(sblock->page_count < 1);
1840         flags = sblock->pagev[0]->flags;
1841         ret = 0;
1842         if (flags & BTRFS_EXTENT_FLAG_DATA)
1843                 ret = scrub_checksum_data(sblock);
1844         else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1845                 ret = scrub_checksum_tree_block(sblock);
1846         else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1847                 (void)scrub_checksum_super(sblock);
1848         else
1849                 WARN_ON(1);
1850         if (ret)
1851                 scrub_handle_errored_block(sblock);
1852
1853         return ret;
1854 }
1855
1856 static int scrub_checksum_data(struct scrub_block *sblock)
1857 {
1858         struct scrub_ctx *sctx = sblock->sctx;
1859         u8 csum[BTRFS_CSUM_SIZE];
1860         u8 *on_disk_csum;
1861         struct page *page;
1862         void *buffer;
1863         u32 crc = ~(u32)0;
1864         int fail = 0;
1865         u64 len;
1866         int index;
1867
1868         BUG_ON(sblock->page_count < 1);
1869         if (!sblock->pagev[0]->have_csum)
1870                 return 0;
1871
1872         on_disk_csum = sblock->pagev[0]->csum;
1873         page = sblock->pagev[0]->page;
1874         buffer = kmap_atomic(page);
1875
1876         len = sctx->sectorsize;
1877         index = 0;
1878         for (;;) {
1879                 u64 l = min_t(u64, len, PAGE_SIZE);
1880
1881                 crc = btrfs_csum_data(buffer, crc, l);
1882                 kunmap_atomic(buffer);
1883                 len -= l;
1884                 if (len == 0)
1885                         break;
1886                 index++;
1887                 BUG_ON(index >= sblock->page_count);
1888                 BUG_ON(!sblock->pagev[index]->page);
1889                 page = sblock->pagev[index]->page;
1890                 buffer = kmap_atomic(page);
1891         }
1892
1893         btrfs_csum_final(crc, csum);
1894         if (memcmp(csum, on_disk_csum, sctx->csum_size))
1895                 fail = 1;
1896
1897         return fail;
1898 }
1899
1900 static int scrub_checksum_tree_block(struct scrub_block *sblock)
1901 {
1902         struct scrub_ctx *sctx = sblock->sctx;
1903         struct btrfs_header *h;
1904         struct btrfs_root *root = sctx->dev_root;
1905         struct btrfs_fs_info *fs_info = root->fs_info;
1906         u8 calculated_csum[BTRFS_CSUM_SIZE];
1907         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1908         struct page *page;
1909         void *mapped_buffer;
1910         u64 mapped_size;
1911         void *p;
1912         u32 crc = ~(u32)0;
1913         int fail = 0;
1914         int crc_fail = 0;
1915         u64 len;
1916         int index;
1917
1918         BUG_ON(sblock->page_count < 1);
1919         page = sblock->pagev[0]->page;
1920         mapped_buffer = kmap_atomic(page);
1921         h = (struct btrfs_header *)mapped_buffer;
1922         memcpy(on_disk_csum, h->csum, sctx->csum_size);
1923
1924         /*
1925          * we don't use the getter functions here, as we
1926          * a) don't have an extent buffer and
1927          * b) the page is already kmapped
1928          */
1929
1930         if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
1931                 ++fail;
1932
1933         if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h))
1934                 ++fail;
1935
1936         if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
1937                 ++fail;
1938
1939         if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1940                    BTRFS_UUID_SIZE))
1941                 ++fail;
1942
1943         len = sctx->nodesize - BTRFS_CSUM_SIZE;
1944         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1945         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1946         index = 0;
1947         for (;;) {
1948                 u64 l = min_t(u64, len, mapped_size);
1949
1950                 crc = btrfs_csum_data(p, crc, l);
1951                 kunmap_atomic(mapped_buffer);
1952                 len -= l;
1953                 if (len == 0)
1954                         break;
1955                 index++;
1956                 BUG_ON(index >= sblock->page_count);
1957                 BUG_ON(!sblock->pagev[index]->page);
1958                 page = sblock->pagev[index]->page;
1959                 mapped_buffer = kmap_atomic(page);
1960                 mapped_size = PAGE_SIZE;
1961                 p = mapped_buffer;
1962         }
1963
1964         btrfs_csum_final(crc, calculated_csum);
1965         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1966                 ++crc_fail;
1967
1968         return fail || crc_fail;
1969 }
1970
1971 static int scrub_checksum_super(struct scrub_block *sblock)
1972 {
1973         struct btrfs_super_block *s;
1974         struct scrub_ctx *sctx = sblock->sctx;
1975         u8 calculated_csum[BTRFS_CSUM_SIZE];
1976         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1977         struct page *page;
1978         void *mapped_buffer;
1979         u64 mapped_size;
1980         void *p;
1981         u32 crc = ~(u32)0;
1982         int fail_gen = 0;
1983         int fail_cor = 0;
1984         u64 len;
1985         int index;
1986
1987         BUG_ON(sblock->page_count < 1);
1988         page = sblock->pagev[0]->page;
1989         mapped_buffer = kmap_atomic(page);
1990         s = (struct btrfs_super_block *)mapped_buffer;
1991         memcpy(on_disk_csum, s->csum, sctx->csum_size);
1992
1993         if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
1994                 ++fail_cor;
1995
1996         if (sblock->pagev[0]->generation != btrfs_super_generation(s))
1997                 ++fail_gen;
1998
1999         if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
2000                 ++fail_cor;
2001
2002         len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
2003         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
2004         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
2005         index = 0;
2006         for (;;) {
2007                 u64 l = min_t(u64, len, mapped_size);
2008
2009                 crc = btrfs_csum_data(p, crc, l);
2010                 kunmap_atomic(mapped_buffer);
2011                 len -= l;
2012                 if (len == 0)
2013                         break;
2014                 index++;
2015                 BUG_ON(index >= sblock->page_count);
2016                 BUG_ON(!sblock->pagev[index]->page);
2017                 page = sblock->pagev[index]->page;
2018                 mapped_buffer = kmap_atomic(page);
2019                 mapped_size = PAGE_SIZE;
2020                 p = mapped_buffer;
2021         }
2022
2023         btrfs_csum_final(crc, calculated_csum);
2024         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
2025                 ++fail_cor;
2026
2027         if (fail_cor + fail_gen) {
2028                 /*
2029                  * if we find an error in a super block, we just report it.
2030                  * They will get written with the next transaction commit
2031                  * anyway
2032                  */
2033                 spin_lock(&sctx->stat_lock);
2034                 ++sctx->stat.super_errors;
2035                 spin_unlock(&sctx->stat_lock);
2036                 if (fail_cor)
2037                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
2038                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2039                 else
2040                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
2041                                 BTRFS_DEV_STAT_GENERATION_ERRS);
2042         }
2043
2044         return fail_cor + fail_gen;
2045 }
2046
2047 static void scrub_block_get(struct scrub_block *sblock)
2048 {
2049         atomic_inc(&sblock->refs);
2050 }
2051
2052 static void scrub_block_put(struct scrub_block *sblock)
2053 {
2054         if (atomic_dec_and_test(&sblock->refs)) {
2055                 int i;
2056
2057                 if (sblock->sparity)
2058                         scrub_parity_put(sblock->sparity);
2059
2060                 for (i = 0; i < sblock->page_count; i++)
2061                         scrub_page_put(sblock->pagev[i]);
2062                 kfree(sblock);
2063         }
2064 }
2065
2066 static void scrub_page_get(struct scrub_page *spage)
2067 {
2068         atomic_inc(&spage->refs);
2069 }
2070
2071 static void scrub_page_put(struct scrub_page *spage)
2072 {
2073         if (atomic_dec_and_test(&spage->refs)) {
2074                 if (spage->page)
2075                         __free_page(spage->page);
2076                 kfree(spage);
2077         }
2078 }
2079
2080 static void scrub_submit(struct scrub_ctx *sctx)
2081 {
2082         struct scrub_bio *sbio;
2083
2084         if (sctx->curr == -1)
2085                 return;
2086
2087         sbio = sctx->bios[sctx->curr];
2088         sctx->curr = -1;
2089         scrub_pending_bio_inc(sctx);
2090
2091         if (!sbio->bio->bi_bdev) {
2092                 /*
2093                  * this case should not happen. If btrfs_map_block() is
2094                  * wrong, it could happen for dev-replace operations on
2095                  * missing devices when no mirrors are available, but in
2096                  * this case it should already fail the mount.
2097                  * This case is handled correctly (but _very_ slowly).
2098                  */
2099                 printk_ratelimited(KERN_WARNING
2100                         "BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
2101                 bio_endio(sbio->bio, -EIO);
2102         } else {
2103                 btrfsic_submit_bio(READ, sbio->bio);
2104         }
2105 }
2106
2107 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2108                                     struct scrub_page *spage)
2109 {
2110         struct scrub_block *sblock = spage->sblock;
2111         struct scrub_bio *sbio;
2112         int ret;
2113
2114 again:
2115         /*
2116          * grab a fresh bio or wait for one to become available
2117          */
2118         while (sctx->curr == -1) {
2119                 spin_lock(&sctx->list_lock);
2120                 sctx->curr = sctx->first_free;
2121                 if (sctx->curr != -1) {
2122                         sctx->first_free = sctx->bios[sctx->curr]->next_free;
2123                         sctx->bios[sctx->curr]->next_free = -1;
2124                         sctx->bios[sctx->curr]->page_count = 0;
2125                         spin_unlock(&sctx->list_lock);
2126                 } else {
2127                         spin_unlock(&sctx->list_lock);
2128                         wait_event(sctx->list_wait, sctx->first_free != -1);
2129                 }
2130         }
2131         sbio = sctx->bios[sctx->curr];
2132         if (sbio->page_count == 0) {
2133                 struct bio *bio;
2134
2135                 sbio->physical = spage->physical;
2136                 sbio->logical = spage->logical;
2137                 sbio->dev = spage->dev;
2138                 bio = sbio->bio;
2139                 if (!bio) {
2140                         bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
2141                         if (!bio)
2142                                 return -ENOMEM;
2143                         sbio->bio = bio;
2144                 }
2145
2146                 bio->bi_private = sbio;
2147                 bio->bi_end_io = scrub_bio_end_io;
2148                 bio->bi_bdev = sbio->dev->bdev;
2149                 bio->bi_iter.bi_sector = sbio->physical >> 9;
2150                 sbio->err = 0;
2151         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2152                    spage->physical ||
2153                    sbio->logical + sbio->page_count * PAGE_SIZE !=
2154                    spage->logical ||
2155                    sbio->dev != spage->dev) {
2156                 scrub_submit(sctx);
2157                 goto again;
2158         }
2159
2160         sbio->pagev[sbio->page_count] = spage;
2161         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2162         if (ret != PAGE_SIZE) {
2163                 if (sbio->page_count < 1) {
2164                         bio_put(sbio->bio);
2165                         sbio->bio = NULL;
2166                         return -EIO;
2167                 }
2168                 scrub_submit(sctx);
2169                 goto again;
2170         }
2171
2172         scrub_block_get(sblock); /* one for the page added to the bio */
2173         atomic_inc(&sblock->outstanding_pages);
2174         sbio->page_count++;
2175         if (sbio->page_count == sctx->pages_per_rd_bio)
2176                 scrub_submit(sctx);
2177
2178         return 0;
2179 }
2180
2181 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2182                        u64 physical, struct btrfs_device *dev, u64 flags,
2183                        u64 gen, int mirror_num, u8 *csum, int force,
2184                        u64 physical_for_dev_replace)
2185 {
2186         struct scrub_block *sblock;
2187         int index;
2188
2189         sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2190         if (!sblock) {
2191                 spin_lock(&sctx->stat_lock);
2192                 sctx->stat.malloc_errors++;
2193                 spin_unlock(&sctx->stat_lock);
2194                 return -ENOMEM;
2195         }
2196
2197         /* one ref inside this function, plus one for each page added to
2198          * a bio later on */
2199         atomic_set(&sblock->refs, 1);
2200         sblock->sctx = sctx;
2201         sblock->no_io_error_seen = 1;
2202
2203         for (index = 0; len > 0; index++) {
2204                 struct scrub_page *spage;
2205                 u64 l = min_t(u64, len, PAGE_SIZE);
2206
2207                 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2208                 if (!spage) {
2209 leave_nomem:
2210                         spin_lock(&sctx->stat_lock);
2211                         sctx->stat.malloc_errors++;
2212                         spin_unlock(&sctx->stat_lock);
2213                         scrub_block_put(sblock);
2214                         return -ENOMEM;
2215                 }
2216                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2217                 scrub_page_get(spage);
2218                 sblock->pagev[index] = spage;
2219                 spage->sblock = sblock;
2220                 spage->dev = dev;
2221                 spage->flags = flags;
2222                 spage->generation = gen;
2223                 spage->logical = logical;
2224                 spage->physical = physical;
2225                 spage->physical_for_dev_replace = physical_for_dev_replace;
2226                 spage->mirror_num = mirror_num;
2227                 if (csum) {
2228                         spage->have_csum = 1;
2229                         memcpy(spage->csum, csum, sctx->csum_size);
2230                 } else {
2231                         spage->have_csum = 0;
2232                 }
2233                 sblock->page_count++;
2234                 spage->page = alloc_page(GFP_NOFS);
2235                 if (!spage->page)
2236                         goto leave_nomem;
2237                 len -= l;
2238                 logical += l;
2239                 physical += l;
2240                 physical_for_dev_replace += l;
2241         }
2242
2243         WARN_ON(sblock->page_count == 0);
2244         for (index = 0; index < sblock->page_count; index++) {
2245                 struct scrub_page *spage = sblock->pagev[index];
2246                 int ret;
2247
2248                 ret = scrub_add_page_to_rd_bio(sctx, spage);
2249                 if (ret) {
2250                         scrub_block_put(sblock);
2251                         return ret;
2252                 }
2253         }
2254
2255         if (force)
2256                 scrub_submit(sctx);
2257
2258         /* last one frees, either here or in bio completion for last page */
2259         scrub_block_put(sblock);
2260         return 0;
2261 }
2262
2263 static void scrub_bio_end_io(struct bio *bio, int err)
2264 {
2265         struct scrub_bio *sbio = bio->bi_private;
2266         struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
2267
2268         sbio->err = err;
2269         sbio->bio = bio;
2270
2271         btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2272 }
2273
2274 static void scrub_bio_end_io_worker(struct btrfs_work *work)
2275 {
2276         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2277         struct scrub_ctx *sctx = sbio->sctx;
2278         int i;
2279
2280         BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2281         if (sbio->err) {
2282                 for (i = 0; i < sbio->page_count; i++) {
2283                         struct scrub_page *spage = sbio->pagev[i];
2284
2285                         spage->io_error = 1;
2286                         spage->sblock->no_io_error_seen = 0;
2287                 }
2288         }
2289
2290         /* now complete the scrub_block items that have all pages completed */
2291         for (i = 0; i < sbio->page_count; i++) {
2292                 struct scrub_page *spage = sbio->pagev[i];
2293                 struct scrub_block *sblock = spage->sblock;
2294
2295                 if (atomic_dec_and_test(&sblock->outstanding_pages))
2296                         scrub_block_complete(sblock);
2297                 scrub_block_put(sblock);
2298         }
2299
2300         bio_put(sbio->bio);
2301         sbio->bio = NULL;
2302         spin_lock(&sctx->list_lock);
2303         sbio->next_free = sctx->first_free;
2304         sctx->first_free = sbio->index;
2305         spin_unlock(&sctx->list_lock);
2306
2307         if (sctx->is_dev_replace &&
2308             atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2309                 mutex_lock(&sctx->wr_ctx.wr_lock);
2310                 scrub_wr_submit(sctx);
2311                 mutex_unlock(&sctx->wr_ctx.wr_lock);
2312         }
2313
2314         scrub_pending_bio_dec(sctx);
2315 }
2316
2317 static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2318                                        unsigned long *bitmap,
2319                                        u64 start, u64 len)
2320 {
2321         u32 offset;
2322         int nsectors;
2323         int sectorsize = sparity->sctx->dev_root->sectorsize;
2324
2325         if (len >= sparity->stripe_len) {
2326                 bitmap_set(bitmap, 0, sparity->nsectors);
2327                 return;
2328         }
2329
2330         start -= sparity->logic_start;
2331         start = div_u64_rem(start, sparity->stripe_len, &offset);
2332         offset /= sectorsize;
2333         nsectors = (int)len / sectorsize;
2334
2335         if (offset + nsectors <= sparity->nsectors) {
2336                 bitmap_set(bitmap, offset, nsectors);
2337                 return;
2338         }
2339
2340         bitmap_set(bitmap, offset, sparity->nsectors - offset);
2341         bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2342 }
2343
2344 static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2345                                                    u64 start, u64 len)
2346 {
2347         __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2348 }
2349
2350 static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2351                                                   u64 start, u64 len)
2352 {
2353         __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2354 }
2355
2356 static void scrub_block_complete(struct scrub_block *sblock)
2357 {
2358         int corrupted = 0;
2359
2360         if (!sblock->no_io_error_seen) {
2361                 corrupted = 1;
2362                 scrub_handle_errored_block(sblock);
2363         } else {
2364                 /*
2365                  * if has checksum error, write via repair mechanism in
2366                  * dev replace case, otherwise write here in dev replace
2367                  * case.
2368                  */
2369                 corrupted = scrub_checksum(sblock);
2370                 if (!corrupted && sblock->sctx->is_dev_replace)
2371                         scrub_write_block_to_dev_replace(sblock);
2372         }
2373
2374         if (sblock->sparity && corrupted && !sblock->data_corrected) {
2375                 u64 start = sblock->pagev[0]->logical;
2376                 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2377                           PAGE_SIZE;
2378
2379                 scrub_parity_mark_sectors_error(sblock->sparity,
2380                                                 start, end - start);
2381         }
2382 }
2383
2384 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
2385                            u8 *csum)
2386 {
2387         struct btrfs_ordered_sum *sum = NULL;
2388         unsigned long index;
2389         unsigned long num_sectors;
2390
2391         while (!list_empty(&sctx->csum_list)) {
2392                 sum = list_first_entry(&sctx->csum_list,
2393                                        struct btrfs_ordered_sum, list);
2394                 if (sum->bytenr > logical)
2395                         return 0;
2396                 if (sum->bytenr + sum->len > logical)
2397                         break;
2398
2399                 ++sctx->stat.csum_discards;
2400                 list_del(&sum->list);
2401                 kfree(sum);
2402                 sum = NULL;
2403         }
2404         if (!sum)
2405                 return 0;
2406
2407         index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
2408         num_sectors = sum->len / sctx->sectorsize;
2409         memcpy(csum, sum->sums + index, sctx->csum_size);
2410         if (index == num_sectors - 1) {
2411                 list_del(&sum->list);
2412                 kfree(sum);
2413         }
2414         return 1;
2415 }
2416
2417 /* scrub extent tries to collect up to 64 kB for each bio */
2418 static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
2419                         u64 physical, struct btrfs_device *dev, u64 flags,
2420                         u64 gen, int mirror_num, u64 physical_for_dev_replace)
2421 {
2422         int ret;
2423         u8 csum[BTRFS_CSUM_SIZE];
2424         u32 blocksize;
2425
2426         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2427                 blocksize = sctx->sectorsize;
2428                 spin_lock(&sctx->stat_lock);
2429                 sctx->stat.data_extents_scrubbed++;
2430                 sctx->stat.data_bytes_scrubbed += len;
2431                 spin_unlock(&sctx->stat_lock);
2432         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2433                 blocksize = sctx->nodesize;
2434                 spin_lock(&sctx->stat_lock);
2435                 sctx->stat.tree_extents_scrubbed++;
2436                 sctx->stat.tree_bytes_scrubbed += len;
2437                 spin_unlock(&sctx->stat_lock);
2438         } else {
2439                 blocksize = sctx->sectorsize;
2440                 WARN_ON(1);
2441         }
2442
2443         while (len) {
2444                 u64 l = min_t(u64, len, blocksize);
2445                 int have_csum = 0;
2446
2447                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2448                         /* push csums to sbio */
2449                         have_csum = scrub_find_csum(sctx, logical, l, csum);
2450                         if (have_csum == 0)
2451                                 ++sctx->stat.no_csum;
2452                         if (sctx->is_dev_replace && !have_csum) {
2453                                 ret = copy_nocow_pages(sctx, logical, l,
2454                                                        mirror_num,
2455                                                       physical_for_dev_replace);
2456                                 goto behind_scrub_pages;
2457                         }
2458                 }
2459                 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2460                                   mirror_num, have_csum ? csum : NULL, 0,
2461                                   physical_for_dev_replace);
2462 behind_scrub_pages:
2463                 if (ret)
2464                         return ret;
2465                 len -= l;
2466                 logical += l;
2467                 physical += l;
2468                 physical_for_dev_replace += l;
2469         }
2470         return 0;
2471 }
2472
2473 static int scrub_pages_for_parity(struct scrub_parity *sparity,
2474                                   u64 logical, u64 len,
2475                                   u64 physical, struct btrfs_device *dev,
2476                                   u64 flags, u64 gen, int mirror_num, u8 *csum)
2477 {
2478         struct scrub_ctx *sctx = sparity->sctx;
2479         struct scrub_block *sblock;
2480         int index;
2481
2482         sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2483         if (!sblock) {
2484                 spin_lock(&sctx->stat_lock);
2485                 sctx->stat.malloc_errors++;
2486                 spin_unlock(&sctx->stat_lock);
2487                 return -ENOMEM;
2488         }
2489
2490         /* one ref inside this function, plus one for each page added to
2491          * a bio later on */
2492         atomic_set(&sblock->refs, 1);
2493         sblock->sctx = sctx;
2494         sblock->no_io_error_seen = 1;
2495         sblock->sparity = sparity;
2496         scrub_parity_get(sparity);
2497
2498         for (index = 0; len > 0; index++) {
2499                 struct scrub_page *spage;
2500                 u64 l = min_t(u64, len, PAGE_SIZE);
2501
2502                 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2503                 if (!spage) {
2504 leave_nomem:
2505                         spin_lock(&sctx->stat_lock);
2506                         sctx->stat.malloc_errors++;
2507                         spin_unlock(&sctx->stat_lock);
2508                         scrub_block_put(sblock);
2509                         return -ENOMEM;
2510                 }
2511                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2512                 /* For scrub block */
2513                 scrub_page_get(spage);
2514                 sblock->pagev[index] = spage;
2515                 /* For scrub parity */
2516                 scrub_page_get(spage);
2517                 list_add_tail(&spage->list, &sparity->spages);
2518                 spage->sblock = sblock;
2519                 spage->dev = dev;
2520                 spage->flags = flags;
2521                 spage->generation = gen;
2522                 spage->logical = logical;
2523                 spage->physical = physical;
2524                 spage->mirror_num = mirror_num;
2525                 if (csum) {
2526                         spage->have_csum = 1;
2527                         memcpy(spage->csum, csum, sctx->csum_size);
2528                 } else {
2529                         spage->have_csum = 0;
2530                 }
2531                 sblock->page_count++;
2532                 spage->page = alloc_page(GFP_NOFS);
2533                 if (!spage->page)
2534                         goto leave_nomem;
2535                 len -= l;
2536                 logical += l;
2537                 physical += l;
2538         }
2539
2540         WARN_ON(sblock->page_count == 0);
2541         for (index = 0; index < sblock->page_count; index++) {
2542                 struct scrub_page *spage = sblock->pagev[index];
2543                 int ret;
2544
2545                 ret = scrub_add_page_to_rd_bio(sctx, spage);
2546                 if (ret) {
2547                         scrub_block_put(sblock);
2548                         return ret;
2549                 }
2550         }
2551
2552         /* last one frees, either here or in bio completion for last page */
2553         scrub_block_put(sblock);
2554         return 0;
2555 }
2556
2557 static int scrub_extent_for_parity(struct scrub_parity *sparity,
2558                                    u64 logical, u64 len,
2559                                    u64 physical, struct btrfs_device *dev,
2560                                    u64 flags, u64 gen, int mirror_num)
2561 {
2562         struct scrub_ctx *sctx = sparity->sctx;
2563         int ret;
2564         u8 csum[BTRFS_CSUM_SIZE];
2565         u32 blocksize;
2566
2567         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2568                 blocksize = sctx->sectorsize;
2569         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2570                 blocksize = sctx->nodesize;
2571         } else {
2572                 blocksize = sctx->sectorsize;
2573                 WARN_ON(1);
2574         }
2575
2576         while (len) {
2577                 u64 l = min_t(u64, len, blocksize);
2578                 int have_csum = 0;
2579
2580                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2581                         /* push csums to sbio */
2582                         have_csum = scrub_find_csum(sctx, logical, l, csum);
2583                         if (have_csum == 0)
2584                                 goto skip;
2585                 }
2586                 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2587                                              flags, gen, mirror_num,
2588                                              have_csum ? csum : NULL);
2589                 if (ret)
2590                         return ret;
2591 skip:
2592                 len -= l;
2593                 logical += l;
2594                 physical += l;
2595         }
2596         return 0;
2597 }
2598
2599 /*
2600  * Given a physical address, this will calculate it's
2601  * logical offset. if this is a parity stripe, it will return
2602  * the most left data stripe's logical offset.
2603  *
2604  * return 0 if it is a data stripe, 1 means parity stripe.
2605  */
2606 static int get_raid56_logic_offset(u64 physical, int num,
2607                                    struct map_lookup *map, u64 *offset,
2608                                    u64 *stripe_start)
2609 {
2610         int i;
2611         int j = 0;
2612         u64 stripe_nr;
2613         u64 last_offset;
2614         u32 stripe_index;
2615         u32 rot;
2616
2617         last_offset = (physical - map->stripes[num].physical) *
2618                       nr_data_stripes(map);
2619         if (stripe_start)
2620                 *stripe_start = last_offset;
2621
2622         *offset = last_offset;
2623         for (i = 0; i < nr_data_stripes(map); i++) {
2624                 *offset = last_offset + i * map->stripe_len;
2625
2626                 stripe_nr = div_u64(*offset, map->stripe_len);
2627                 stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
2628
2629                 /* Work out the disk rotation on this stripe-set */
2630                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2631                 /* calculate which stripe this data locates */
2632                 rot += i;
2633                 stripe_index = rot % map->num_stripes;
2634                 if (stripe_index == num)
2635                         return 0;
2636                 if (stripe_index < num)
2637                         j++;
2638         }
2639         *offset = last_offset + j * map->stripe_len;
2640         return 1;
2641 }
2642
2643 static void scrub_free_parity(struct scrub_parity *sparity)
2644 {
2645         struct scrub_ctx *sctx = sparity->sctx;
2646         struct scrub_page *curr, *next;
2647         int nbits;
2648
2649         nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2650         if (nbits) {
2651                 spin_lock(&sctx->stat_lock);
2652                 sctx->stat.read_errors += nbits;
2653                 sctx->stat.uncorrectable_errors += nbits;
2654                 spin_unlock(&sctx->stat_lock);
2655         }
2656
2657         list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2658                 list_del_init(&curr->list);
2659                 scrub_page_put(curr);
2660         }
2661
2662         kfree(sparity);
2663 }
2664
2665 static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2666 {
2667         struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2668                                                     work);
2669         struct scrub_ctx *sctx = sparity->sctx;
2670
2671         scrub_free_parity(sparity);
2672         scrub_pending_bio_dec(sctx);
2673 }
2674
2675 static void scrub_parity_bio_endio(struct bio *bio, int error)
2676 {
2677         struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2678
2679         if (error)
2680                 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2681                           sparity->nsectors);
2682
2683         bio_put(bio);
2684
2685         btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
2686                         scrub_parity_bio_endio_worker, NULL, NULL);
2687         btrfs_queue_work(sparity->sctx->dev_root->fs_info->scrub_parity_workers,
2688                          &sparity->work);
2689 }
2690
2691 static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2692 {
2693         struct scrub_ctx *sctx = sparity->sctx;
2694         struct bio *bio;
2695         struct btrfs_raid_bio *rbio;
2696         struct scrub_page *spage;
2697         struct btrfs_bio *bbio = NULL;
2698         u64 length;
2699         int ret;
2700
2701         if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2702                            sparity->nsectors))
2703                 goto out;
2704
2705         length = sparity->logic_end - sparity->logic_start + 1;
2706         ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
2707                                sparity->logic_start,
2708                                &length, &bbio, 0, 1);
2709         if (ret || !bbio || !bbio->raid_map)
2710                 goto bbio_out;
2711
2712         bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2713         if (!bio)
2714                 goto bbio_out;
2715
2716         bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2717         bio->bi_private = sparity;
2718         bio->bi_end_io = scrub_parity_bio_endio;
2719
2720         rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
2721                                               length, sparity->scrub_dev,
2722                                               sparity->dbitmap,
2723                                               sparity->nsectors);
2724         if (!rbio)
2725                 goto rbio_out;
2726
2727         list_for_each_entry(spage, &sparity->spages, list)
2728                 raid56_parity_add_scrub_pages(rbio, spage->page,
2729                                               spage->logical);
2730
2731         scrub_pending_bio_inc(sctx);
2732         raid56_parity_submit_scrub_rbio(rbio);
2733         return;
2734
2735 rbio_out:
2736         bio_put(bio);
2737 bbio_out:
2738         btrfs_put_bbio(bbio);
2739         bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2740                   sparity->nsectors);
2741         spin_lock(&sctx->stat_lock);
2742         sctx->stat.malloc_errors++;
2743         spin_unlock(&sctx->stat_lock);
2744 out:
2745         scrub_free_parity(sparity);
2746 }
2747
2748 static inline int scrub_calc_parity_bitmap_len(int nsectors)
2749 {
2750         return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * (BITS_PER_LONG / 8);
2751 }
2752
2753 static void scrub_parity_get(struct scrub_parity *sparity)
2754 {
2755         atomic_inc(&sparity->refs);
2756 }
2757
2758 static void scrub_parity_put(struct scrub_parity *sparity)
2759 {
2760         if (!atomic_dec_and_test(&sparity->refs))
2761                 return;
2762
2763         scrub_parity_check_and_repair(sparity);
2764 }
2765
2766 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2767                                                   struct map_lookup *map,
2768                                                   struct btrfs_device *sdev,
2769                                                   struct btrfs_path *path,
2770                                                   u64 logic_start,
2771                                                   u64 logic_end)
2772 {
2773         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2774         struct btrfs_root *root = fs_info->extent_root;
2775         struct btrfs_root *csum_root = fs_info->csum_root;
2776         struct btrfs_extent_item *extent;
2777         u64 flags;
2778         int ret;
2779         int slot;
2780         struct extent_buffer *l;
2781         struct btrfs_key key;
2782         u64 generation;
2783         u64 extent_logical;
2784         u64 extent_physical;
2785         u64 extent_len;
2786         struct btrfs_device *extent_dev;
2787         struct scrub_parity *sparity;
2788         int nsectors;
2789         int bitmap_len;
2790         int extent_mirror_num;
2791         int stop_loop = 0;
2792
2793         nsectors = map->stripe_len / root->sectorsize;
2794         bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2795         sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2796                           GFP_NOFS);
2797         if (!sparity) {
2798                 spin_lock(&sctx->stat_lock);
2799                 sctx->stat.malloc_errors++;
2800                 spin_unlock(&sctx->stat_lock);
2801                 return -ENOMEM;
2802         }
2803
2804         sparity->stripe_len = map->stripe_len;
2805         sparity->nsectors = nsectors;
2806         sparity->sctx = sctx;
2807         sparity->scrub_dev = sdev;
2808         sparity->logic_start = logic_start;
2809         sparity->logic_end = logic_end;
2810         atomic_set(&sparity->refs, 1);
2811         INIT_LIST_HEAD(&sparity->spages);
2812         sparity->dbitmap = sparity->bitmap;
2813         sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2814
2815         ret = 0;
2816         while (logic_start < logic_end) {
2817                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2818                         key.type = BTRFS_METADATA_ITEM_KEY;
2819                 else
2820                         key.type = BTRFS_EXTENT_ITEM_KEY;
2821                 key.objectid = logic_start;
2822                 key.offset = (u64)-1;
2823
2824                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2825                 if (ret < 0)
2826                         goto out;
2827
2828                 if (ret > 0) {
2829                         ret = btrfs_previous_extent_item(root, path, 0);
2830                         if (ret < 0)
2831                                 goto out;
2832                         if (ret > 0) {
2833                                 btrfs_release_path(path);
2834                                 ret = btrfs_search_slot(NULL, root, &key,
2835                                                         path, 0, 0);
2836                                 if (ret < 0)
2837                                         goto out;
2838                         }
2839                 }
2840
2841                 stop_loop = 0;
2842                 while (1) {
2843                         u64 bytes;
2844
2845                         l = path->nodes[0];
2846                         slot = path->slots[0];
2847                         if (slot >= btrfs_header_nritems(l)) {
2848                                 ret = btrfs_next_leaf(root, path);
2849                                 if (ret == 0)
2850                                         continue;
2851                                 if (ret < 0)
2852                                         goto out;
2853
2854                                 stop_loop = 1;
2855                                 break;
2856                         }
2857                         btrfs_item_key_to_cpu(l, &key, slot);
2858
2859                         if (key.type == BTRFS_METADATA_ITEM_KEY)
2860                                 bytes = root->nodesize;
2861                         else
2862                                 bytes = key.offset;
2863
2864                         if (key.objectid + bytes <= logic_start)
2865                                 goto next;
2866
2867                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2868                             key.type != BTRFS_METADATA_ITEM_KEY)
2869                                 goto next;
2870
2871                         if (key.objectid > logic_end) {
2872                                 stop_loop = 1;
2873                                 break;
2874                         }
2875
2876                         while (key.objectid >= logic_start + map->stripe_len)
2877                                 logic_start += map->stripe_len;
2878
2879                         extent = btrfs_item_ptr(l, slot,
2880                                                 struct btrfs_extent_item);
2881                         flags = btrfs_extent_flags(l, extent);
2882                         generation = btrfs_extent_generation(l, extent);
2883
2884                         if (key.objectid < logic_start &&
2885                             (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
2886                                 btrfs_err(fs_info,
2887                                           "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2888                                            key.objectid, logic_start);
2889                                 goto next;
2890                         }
2891 again:
2892                         extent_logical = key.objectid;
2893                         extent_len = bytes;
2894
2895                         if (extent_logical < logic_start) {
2896                                 extent_len -= logic_start - extent_logical;
2897                                 extent_logical = logic_start;
2898                         }
2899
2900                         if (extent_logical + extent_len >
2901                             logic_start + map->stripe_len)
2902                                 extent_len = logic_start + map->stripe_len -
2903                                              extent_logical;
2904
2905                         scrub_parity_mark_sectors_data(sparity, extent_logical,
2906                                                        extent_len);
2907
2908                         scrub_remap_extent(fs_info, extent_logical,
2909                                            extent_len, &extent_physical,
2910                                            &extent_dev,
2911                                            &extent_mirror_num);
2912
2913                         ret = btrfs_lookup_csums_range(csum_root,
2914                                                 extent_logical,
2915                                                 extent_logical + extent_len - 1,
2916                                                 &sctx->csum_list, 1);
2917                         if (ret)
2918                                 goto out;
2919
2920                         ret = scrub_extent_for_parity(sparity, extent_logical,
2921                                                       extent_len,
2922                                                       extent_physical,
2923                                                       extent_dev, flags,
2924                                                       generation,
2925                                                       extent_mirror_num);
2926                         if (ret)
2927                                 goto out;
2928
2929                         scrub_free_csums(sctx);
2930                         if (extent_logical + extent_len <
2931                             key.objectid + bytes) {
2932                                 logic_start += map->stripe_len;
2933
2934                                 if (logic_start >= logic_end) {
2935                                         stop_loop = 1;
2936                                         break;
2937                                 }
2938
2939                                 if (logic_start < key.objectid + bytes) {
2940                                         cond_resched();
2941                                         goto again;
2942                                 }
2943                         }
2944 next:
2945                         path->slots[0]++;
2946                 }
2947
2948                 btrfs_release_path(path);
2949
2950                 if (stop_loop)
2951                         break;
2952
2953                 logic_start += map->stripe_len;
2954         }
2955 out:
2956         if (ret < 0)
2957                 scrub_parity_mark_sectors_error(sparity, logic_start,
2958                                                 logic_end - logic_start + 1);
2959         scrub_parity_put(sparity);
2960         scrub_submit(sctx);
2961         mutex_lock(&sctx->wr_ctx.wr_lock);
2962         scrub_wr_submit(sctx);
2963         mutex_unlock(&sctx->wr_ctx.wr_lock);
2964
2965         btrfs_release_path(path);
2966         return ret < 0 ? ret : 0;
2967 }
2968
2969 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2970                                            struct map_lookup *map,
2971                                            struct btrfs_device *scrub_dev,
2972                                            int num, u64 base, u64 length,
2973                                            int is_dev_replace)
2974 {
2975         struct btrfs_path *path, *ppath;
2976         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2977         struct btrfs_root *root = fs_info->extent_root;
2978         struct btrfs_root *csum_root = fs_info->csum_root;
2979         struct btrfs_extent_item *extent;
2980         struct blk_plug plug;
2981         u64 flags;
2982         int ret;
2983         int slot;
2984         u64 nstripes;
2985         struct extent_buffer *l;
2986         struct btrfs_key key;
2987         u64 physical;
2988         u64 logical;
2989         u64 logic_end;
2990         u64 physical_end;
2991         u64 generation;
2992         int mirror_num;
2993         struct reada_control *reada1;
2994         struct reada_control *reada2;
2995         struct btrfs_key key_start;
2996         struct btrfs_key key_end;
2997         u64 increment = map->stripe_len;
2998         u64 offset;
2999         u64 extent_logical;
3000         u64 extent_physical;
3001         u64 extent_len;
3002         u64 stripe_logical;
3003         u64 stripe_end;
3004         struct btrfs_device *extent_dev;
3005         int extent_mirror_num;
3006         int stop_loop = 0;
3007
3008         physical = map->stripes[num].physical;
3009         offset = 0;
3010         nstripes = div_u64(length, map->stripe_len);
3011         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3012                 offset = map->stripe_len * num;
3013                 increment = map->stripe_len * map->num_stripes;
3014                 mirror_num = 1;
3015         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3016                 int factor = map->num_stripes / map->sub_stripes;
3017                 offset = map->stripe_len * (num / map->sub_stripes);
3018                 increment = map->stripe_len * factor;
3019                 mirror_num = num % map->sub_stripes + 1;
3020         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3021                 increment = map->stripe_len;
3022                 mirror_num = num % map->num_stripes + 1;
3023         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3024                 increment = map->stripe_len;
3025                 mirror_num = num % map->num_stripes + 1;
3026         } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3027                 get_raid56_logic_offset(physical, num, map, &offset, NULL);
3028                 increment = map->stripe_len * nr_data_stripes(map);
3029                 mirror_num = 1;
3030         } else {
3031                 increment = map->stripe_len;
3032                 mirror_num = 1;
3033         }
3034
3035         path = btrfs_alloc_path();
3036         if (!path)
3037                 return -ENOMEM;
3038
3039         ppath = btrfs_alloc_path();
3040         if (!ppath) {
3041                 btrfs_free_path(path);
3042                 return -ENOMEM;
3043         }
3044
3045         /*
3046          * work on commit root. The related disk blocks are static as
3047          * long as COW is applied. This means, it is save to rewrite
3048          * them to repair disk errors without any race conditions
3049          */
3050         path->search_commit_root = 1;
3051         path->skip_locking = 1;
3052
3053         ppath->search_commit_root = 1;
3054         ppath->skip_locking = 1;
3055         /*
3056          * trigger the readahead for extent tree csum tree and wait for
3057          * completion. During readahead, the scrub is officially paused
3058          * to not hold off transaction commits
3059          */
3060         logical = base + offset;
3061         physical_end = physical + nstripes * map->stripe_len;
3062         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3063                 get_raid56_logic_offset(physical_end, num,
3064                                         map, &logic_end, NULL);
3065                 logic_end += base;
3066         } else {
3067                 logic_end = logical + increment * nstripes;
3068         }
3069         wait_event(sctx->list_wait,
3070                    atomic_read(&sctx->bios_in_flight) == 0);
3071         scrub_blocked_if_needed(fs_info);
3072
3073         /* FIXME it might be better to start readahead at commit root */
3074         key_start.objectid = logical;
3075         key_start.type = BTRFS_EXTENT_ITEM_KEY;
3076         key_start.offset = (u64)0;
3077         key_end.objectid = logic_end;
3078         key_end.type = BTRFS_METADATA_ITEM_KEY;
3079         key_end.offset = (u64)-1;
3080         reada1 = btrfs_reada_add(root, &key_start, &key_end);
3081
3082         key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3083         key_start.type = BTRFS_EXTENT_CSUM_KEY;
3084         key_start.offset = logical;
3085         key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3086         key_end.type = BTRFS_EXTENT_CSUM_KEY;
3087         key_end.offset = logic_end;
3088         reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
3089
3090         if (!IS_ERR(reada1))
3091                 btrfs_reada_wait(reada1);
3092         if (!IS_ERR(reada2))
3093                 btrfs_reada_wait(reada2);
3094
3095
3096         /*
3097          * collect all data csums for the stripe to avoid seeking during
3098          * the scrub. This might currently (crc32) end up to be about 1MB
3099          */
3100         blk_start_plug(&plug);
3101
3102         /*
3103          * now find all extents for each stripe and scrub them
3104          */
3105         ret = 0;
3106         while (physical < physical_end) {
3107                 /* for raid56, we skip parity stripe */
3108                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3109                         ret = get_raid56_logic_offset(physical, num,
3110                                         map, &logical, &stripe_logical);
3111                         logical += base;
3112                         if (ret) {
3113                                 stripe_logical += base;
3114                                 stripe_end = stripe_logical + increment - 1;
3115                                 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3116                                                 ppath, stripe_logical,
3117                                                 stripe_end);
3118                                 if (ret)
3119                                         goto out;
3120                                 goto skip;
3121                         }
3122                 }
3123                 /*
3124                  * canceled?
3125                  */
3126                 if (atomic_read(&fs_info->scrub_cancel_req) ||
3127                     atomic_read(&sctx->cancel_req)) {
3128                         ret = -ECANCELED;
3129                         goto out;
3130                 }
3131                 /*
3132                  * check to see if we have to pause
3133                  */
3134                 if (atomic_read(&fs_info->scrub_pause_req)) {
3135                         /* push queued extents */
3136                         atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3137                         scrub_submit(sctx);
3138                         mutex_lock(&sctx->wr_ctx.wr_lock);
3139                         scrub_wr_submit(sctx);
3140                         mutex_unlock(&sctx->wr_ctx.wr_lock);
3141                         wait_event(sctx->list_wait,
3142                                    atomic_read(&sctx->bios_in_flight) == 0);
3143                         atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3144                         scrub_blocked_if_needed(fs_info);
3145                 }
3146
3147                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3148                         key.type = BTRFS_METADATA_ITEM_KEY;
3149                 else
3150                         key.type = BTRFS_EXTENT_ITEM_KEY;
3151                 key.objectid = logical;
3152                 key.offset = (u64)-1;
3153
3154                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3155                 if (ret < 0)
3156                         goto out;
3157
3158                 if (ret > 0) {
3159                         ret = btrfs_previous_extent_item(root, path, 0);
3160                         if (ret < 0)
3161                                 goto out;
3162                         if (ret > 0) {
3163                                 /* there's no smaller item, so stick with the
3164                                  * larger one */
3165                                 btrfs_release_path(path);
3166                                 ret = btrfs_search_slot(NULL, root, &key,
3167                                                         path, 0, 0);
3168                                 if (ret < 0)
3169                                         goto out;
3170                         }
3171                 }
3172
3173                 stop_loop = 0;
3174                 while (1) {
3175                         u64 bytes;
3176
3177                         l = path->nodes[0];
3178                         slot = path->slots[0];
3179                         if (slot >= btrfs_header_nritems(l)) {
3180                                 ret = btrfs_next_leaf(root, path);
3181                                 if (ret == 0)
3182                                         continue;
3183                                 if (ret < 0)
3184                                         goto out;
3185
3186                                 stop_loop = 1;
3187                                 break;
3188                         }
3189                         btrfs_item_key_to_cpu(l, &key, slot);
3190
3191                         if (key.type == BTRFS_METADATA_ITEM_KEY)
3192                                 bytes = root->nodesize;
3193                         else
3194                                 bytes = key.offset;
3195
3196                         if (key.objectid + bytes <= logical)
3197                                 goto next;
3198
3199                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3200                             key.type != BTRFS_METADATA_ITEM_KEY)
3201                                 goto next;
3202
3203                         if (key.objectid >= logical + map->stripe_len) {
3204                                 /* out of this device extent */
3205                                 if (key.objectid >= logic_end)
3206                                         stop_loop = 1;
3207                                 break;
3208                         }
3209
3210                         extent = btrfs_item_ptr(l, slot,
3211                                                 struct btrfs_extent_item);
3212                         flags = btrfs_extent_flags(l, extent);
3213                         generation = btrfs_extent_generation(l, extent);
3214
3215                         if (key.objectid < logical &&
3216                             (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
3217                                 btrfs_err(fs_info,
3218                                            "scrub: tree block %llu spanning "
3219                                            "stripes, ignored. logical=%llu",
3220                                        key.objectid, logical);
3221                                 goto next;
3222                         }
3223
3224 again:
3225                         extent_logical = key.objectid;
3226                         extent_len = bytes;
3227
3228                         /*
3229                          * trim extent to this stripe
3230                          */
3231                         if (extent_logical < logical) {
3232                                 extent_len -= logical - extent_logical;
3233                                 extent_logical = logical;
3234                         }
3235                         if (extent_logical + extent_len >
3236                             logical + map->stripe_len) {
3237                                 extent_len = logical + map->stripe_len -
3238                                              extent_logical;
3239                         }
3240
3241                         extent_physical = extent_logical - logical + physical;
3242                         extent_dev = scrub_dev;
3243                         extent_mirror_num = mirror_num;
3244                         if (is_dev_replace)
3245                                 scrub_remap_extent(fs_info, extent_logical,
3246                                                    extent_len, &extent_physical,
3247                                                    &extent_dev,
3248                                                    &extent_mirror_num);
3249
3250                         ret = btrfs_lookup_csums_range(csum_root, logical,
3251                                                 logical + map->stripe_len - 1,
3252                                                 &sctx->csum_list, 1);
3253                         if (ret)
3254                                 goto out;
3255
3256                         ret = scrub_extent(sctx, extent_logical, extent_len,
3257                                            extent_physical, extent_dev, flags,
3258                                            generation, extent_mirror_num,
3259                                            extent_logical - logical + physical);
3260                         if (ret)
3261                                 goto out;
3262
3263                         scrub_free_csums(sctx);
3264                         if (extent_logical + extent_len <
3265                             key.objectid + bytes) {
3266                                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3267                                         /*
3268                                          * loop until we find next data stripe
3269                                          * or we have finished all stripes.
3270                                          */
3271 loop:
3272                                         physical += map->stripe_len;
3273                                         ret = get_raid56_logic_offset(physical,
3274                                                         num, map, &logical,
3275                                                         &stripe_logical);
3276                                         logical += base;
3277
3278                                         if (ret && physical < physical_end) {
3279                                                 stripe_logical += base;
3280                                                 stripe_end = stripe_logical +
3281                                                                 increment - 1;
3282                                                 ret = scrub_raid56_parity(sctx,
3283                                                         map, scrub_dev, ppath,
3284                                                         stripe_logical,
3285                                                         stripe_end);
3286                                                 if (ret)
3287                                                         goto out;
3288                                                 goto loop;
3289                                         }
3290                                 } else {
3291                                         physical += map->stripe_len;
3292                                         logical += increment;
3293                                 }
3294                                 if (logical < key.objectid + bytes) {
3295                                         cond_resched();
3296                                         goto again;
3297                                 }
3298
3299                                 if (physical >= physical_end) {
3300                                         stop_loop = 1;
3301                                         break;
3302                                 }
3303                         }
3304 next:
3305                         path->slots[0]++;
3306                 }
3307                 btrfs_release_path(path);
3308 skip:
3309                 logical += increment;
3310                 physical += map->stripe_len;
3311                 spin_lock(&sctx->stat_lock);
3312                 if (stop_loop)
3313                         sctx->stat.last_physical = map->stripes[num].physical +
3314                                                    length;
3315                 else
3316                         sctx->stat.last_physical = physical;
3317                 spin_unlock(&sctx->stat_lock);
3318                 if (stop_loop)
3319                         break;
3320         }
3321 out:
3322         /* push queued extents */
3323         scrub_submit(sctx);
3324         mutex_lock(&sctx->wr_ctx.wr_lock);
3325         scrub_wr_submit(sctx);
3326         mutex_unlock(&sctx->wr_ctx.wr_lock);
3327
3328         blk_finish_plug(&plug);
3329         btrfs_free_path(path);
3330         btrfs_free_path(ppath);
3331         return ret < 0 ? ret : 0;
3332 }
3333
3334 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3335                                           struct btrfs_device *scrub_dev,
3336                                           u64 chunk_tree, u64 chunk_objectid,
3337                                           u64 chunk_offset, u64 length,
3338                                           u64 dev_offset, int is_dev_replace)
3339 {
3340         struct btrfs_mapping_tree *map_tree =
3341                 &sctx->dev_root->fs_info->mapping_tree;
3342         struct map_lookup *map;
3343         struct extent_map *em;
3344         int i;
3345         int ret = 0;
3346
3347         read_lock(&map_tree->map_tree.lock);
3348         em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3349         read_unlock(&map_tree->map_tree.lock);
3350
3351         if (!em)
3352                 return -EINVAL;
3353
3354         map = (struct map_lookup *)em->bdev;
3355         if (em->start != chunk_offset)
3356                 goto out;
3357
3358         if (em->len < length)
3359                 goto out;
3360
3361         for (i = 0; i < map->num_stripes; ++i) {
3362                 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3363                     map->stripes[i].physical == dev_offset) {
3364                         ret = scrub_stripe(sctx, map, scrub_dev, i,
3365                                            chunk_offset, length,
3366                                            is_dev_replace);
3367                         if (ret)
3368                                 goto out;
3369                 }
3370         }
3371 out:
3372         free_extent_map(em);
3373
3374         return ret;
3375 }
3376
3377 static noinline_for_stack
3378 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3379                            struct btrfs_device *scrub_dev, u64 start, u64 end,
3380                            int is_dev_replace)
3381 {
3382         struct btrfs_dev_extent *dev_extent = NULL;
3383         struct btrfs_path *path;
3384         struct btrfs_root *root = sctx->dev_root;
3385         struct btrfs_fs_info *fs_info = root->fs_info;
3386         u64 length;
3387         u64 chunk_tree;
3388         u64 chunk_objectid;
3389         u64 chunk_offset;
3390         int ret;
3391         int slot;
3392         struct extent_buffer *l;
3393         struct btrfs_key key;
3394         struct btrfs_key found_key;
3395         struct btrfs_block_group_cache *cache;
3396         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
3397
3398         path = btrfs_alloc_path();
3399         if (!path)
3400                 return -ENOMEM;
3401
3402         path->reada = 2;
3403         path->search_commit_root = 1;
3404         path->skip_locking = 1;
3405
3406         key.objectid = scrub_dev->devid;
3407         key.offset = 0ull;
3408         key.type = BTRFS_DEV_EXTENT_KEY;
3409
3410         while (1) {
3411                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3412                 if (ret < 0)
3413                         break;
3414                 if (ret > 0) {
3415                         if (path->slots[0] >=
3416                             btrfs_header_nritems(path->nodes[0])) {
3417                                 ret = btrfs_next_leaf(root, path);
3418                                 if (ret)
3419                                         break;
3420                         }
3421                 }
3422
3423                 l = path->nodes[0];
3424                 slot = path->slots[0];
3425
3426                 btrfs_item_key_to_cpu(l, &found_key, slot);
3427
3428                 if (found_key.objectid != scrub_dev->devid)
3429                         break;
3430
3431                 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
3432                         break;
3433
3434                 if (found_key.offset >= end)
3435                         break;
3436
3437                 if (found_key.offset < key.offset)
3438                         break;
3439
3440                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3441                 length = btrfs_dev_extent_length(l, dev_extent);
3442
3443                 if (found_key.offset + length <= start)
3444                         goto skip;
3445
3446                 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3447                 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3448                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3449
3450                 /*
3451                  * get a reference on the corresponding block group to prevent
3452                  * the chunk from going away while we scrub it
3453                  */
3454                 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3455
3456                 /* some chunks are removed but not committed to disk yet,
3457                  * continue scrubbing */
3458                 if (!cache)
3459                         goto skip;
3460
3461                 dev_replace->cursor_right = found_key.offset + length;
3462                 dev_replace->cursor_left = found_key.offset;
3463                 dev_replace->item_needs_writeback = 1;
3464                 ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid,
3465                                   chunk_offset, length, found_key.offset,
3466                                   is_dev_replace);
3467
3468                 /*
3469                  * flush, submit all pending read and write bios, afterwards
3470                  * wait for them.
3471                  * Note that in the dev replace case, a read request causes
3472                  * write requests that are submitted in the read completion
3473                  * worker. Therefore in the current situation, it is required
3474                  * that all write requests are flushed, so that all read and
3475                  * write requests are really completed when bios_in_flight
3476                  * changes to 0.
3477                  */
3478                 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3479                 scrub_submit(sctx);
3480                 mutex_lock(&sctx->wr_ctx.wr_lock);
3481                 scrub_wr_submit(sctx);
3482                 mutex_unlock(&sctx->wr_ctx.wr_lock);
3483
3484                 wait_event(sctx->list_wait,
3485                            atomic_read(&sctx->bios_in_flight) == 0);
3486                 atomic_inc(&fs_info->scrubs_paused);
3487                 wake_up(&fs_info->scrub_pause_wait);
3488
3489                 /*
3490                  * must be called before we decrease @scrub_paused.
3491                  * make sure we don't block transaction commit while
3492                  * we are waiting pending workers finished.
3493                  */
3494                 wait_event(sctx->list_wait,
3495                            atomic_read(&sctx->workers_pending) == 0);
3496                 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3497
3498                 mutex_lock(&fs_info->scrub_lock);
3499                 __scrub_blocked_if_needed(fs_info);
3500                 atomic_dec(&fs_info->scrubs_paused);
3501                 mutex_unlock(&fs_info->scrub_lock);
3502                 wake_up(&fs_info->scrub_pause_wait);
3503
3504                 btrfs_put_block_group(cache);
3505                 if (ret)
3506                         break;
3507                 if (is_dev_replace &&
3508                     atomic64_read(&dev_replace->num_write_errors) > 0) {
3509                         ret = -EIO;
3510                         break;
3511                 }
3512                 if (sctx->stat.malloc_errors > 0) {
3513                         ret = -ENOMEM;
3514                         break;
3515                 }
3516
3517                 dev_replace->cursor_left = dev_replace->cursor_right;
3518                 dev_replace->item_needs_writeback = 1;
3519 skip:
3520                 key.offset = found_key.offset + length;
3521                 btrfs_release_path(path);
3522         }
3523
3524         btrfs_free_path(path);
3525
3526         /*
3527          * ret can still be 1 from search_slot or next_leaf,
3528          * that's not an error
3529          */
3530         return ret < 0 ? ret : 0;
3531 }
3532
3533 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3534                                            struct btrfs_device *scrub_dev)
3535 {
3536         int     i;
3537         u64     bytenr;
3538         u64     gen;
3539         int     ret;
3540         struct btrfs_root *root = sctx->dev_root;
3541
3542         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
3543                 return -EIO;
3544
3545         /* Seed devices of a new filesystem has their own generation. */
3546         if (scrub_dev->fs_devices != root->fs_info->fs_devices)
3547                 gen = scrub_dev->generation;
3548         else
3549                 gen = root->fs_info->last_trans_committed;
3550
3551         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3552                 bytenr = btrfs_sb_offset(i);
3553                 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3554                     scrub_dev->commit_total_bytes)
3555                         break;
3556
3557                 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
3558                                   scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
3559                                   NULL, 1, bytenr);
3560                 if (ret)
3561                         return ret;
3562         }
3563         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3564
3565         return 0;
3566 }
3567
3568 /*
3569  * get a reference count on fs_info->scrub_workers. start worker if necessary
3570  */
3571 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3572                                                 int is_dev_replace)
3573 {
3574         unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
3575         int max_active = fs_info->thread_pool_size;
3576
3577         if (fs_info->scrub_workers_refcnt == 0) {
3578                 if (is_dev_replace)
3579                         fs_info->scrub_workers =
3580                                 btrfs_alloc_workqueue("btrfs-scrub", flags,
3581                                                       1, 4);
3582                 else
3583                         fs_info->scrub_workers =
3584                                 btrfs_alloc_workqueue("btrfs-scrub", flags,
3585                                                       max_active, 4);
3586                 if (!fs_info->scrub_workers)
3587                         goto fail_scrub_workers;
3588
3589                 fs_info->scrub_wr_completion_workers =
3590                         btrfs_alloc_workqueue("btrfs-scrubwrc", flags,
3591                                               max_active, 2);
3592                 if (!fs_info->scrub_wr_completion_workers)
3593                         goto fail_scrub_wr_completion_workers;
3594
3595                 fs_info->scrub_nocow_workers =
3596                         btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);
3597                 if (!fs_info->scrub_nocow_workers)
3598                         goto fail_scrub_nocow_workers;
3599                 fs_info->scrub_parity_workers =
3600                         btrfs_alloc_workqueue("btrfs-scrubparity", flags,
3601                                               max_active, 2);
3602                 if (!fs_info->scrub_parity_workers)
3603                         goto fail_scrub_parity_workers;
3604         }
3605         ++fs_info->scrub_workers_refcnt;
3606         return 0;
3607
3608 fail_scrub_parity_workers:
3609         btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3610 fail_scrub_nocow_workers:
3611         btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3612 fail_scrub_wr_completion_workers:
3613         btrfs_destroy_workqueue(fs_info->scrub_workers);
3614 fail_scrub_workers:
3615         return -ENOMEM;
3616 }
3617
3618 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
3619 {
3620         if (--fs_info->scrub_workers_refcnt == 0) {
3621                 btrfs_destroy_workqueue(fs_info->scrub_workers);
3622                 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3623                 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3624                 btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
3625         }
3626         WARN_ON(fs_info->scrub_workers_refcnt < 0);
3627 }
3628
3629 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3630                     u64 end, struct btrfs_scrub_progress *progress,
3631                     int readonly, int is_dev_replace)
3632 {
3633         struct scrub_ctx *sctx;
3634         int ret;
3635         struct btrfs_device *dev;
3636         struct rcu_string *name;
3637
3638         if (btrfs_fs_closing(fs_info))
3639                 return -EINVAL;
3640
3641         if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
3642                 /*
3643                  * in this case scrub is unable to calculate the checksum
3644                  * the way scrub is implemented. Do not handle this
3645                  * situation at all because it won't ever happen.
3646                  */
3647                 btrfs_err(fs_info,
3648                            "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3649                        fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
3650                 return -EINVAL;
3651         }
3652
3653         if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
3654                 /* not supported for data w/o checksums */
3655                 btrfs_err(fs_info,
3656                            "scrub: size assumption sectorsize != PAGE_SIZE "
3657                            "(%d != %lu) fails",
3658                        fs_info->chunk_root->sectorsize, PAGE_SIZE);
3659                 return -EINVAL;
3660         }
3661
3662         if (fs_info->chunk_root->nodesize >
3663             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3664             fs_info->chunk_root->sectorsize >
3665             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3666                 /*
3667                  * would exhaust the array bounds of pagev member in
3668                  * struct scrub_block
3669                  */
3670                 btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
3671                            "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3672                        fs_info->chunk_root->nodesize,
3673                        SCRUB_MAX_PAGES_PER_BLOCK,
3674                        fs_info->chunk_root->sectorsize,
3675                        SCRUB_MAX_PAGES_PER_BLOCK);
3676                 return -EINVAL;
3677         }
3678
3679
3680         mutex_lock(&fs_info->fs_devices->device_list_mutex);
3681         dev = btrfs_find_device(fs_info, devid, NULL, NULL);
3682         if (!dev || (dev->missing && !is_dev_replace)) {
3683                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3684                 return -ENODEV;
3685         }
3686
3687         if (!is_dev_replace && !readonly && !dev->writeable) {
3688                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3689                 rcu_read_lock();
3690                 name = rcu_dereference(dev->name);
3691                 btrfs_err(fs_info, "scrub: device %s is not writable",
3692                           name->str);
3693                 rcu_read_unlock();
3694                 return -EROFS;
3695         }
3696
3697         mutex_lock(&fs_info->scrub_lock);
3698         if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
3699                 mutex_unlock(&fs_info->scrub_lock);
3700                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3701                 return -EIO;
3702         }
3703
3704         btrfs_dev_replace_lock(&fs_info->dev_replace);
3705         if (dev->scrub_device ||
3706             (!is_dev_replace &&
3707              btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
3708                 btrfs_dev_replace_unlock(&fs_info->dev_replace);
3709                 mutex_unlock(&fs_info->scrub_lock);
3710                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3711                 return -EINPROGRESS;
3712         }
3713         btrfs_dev_replace_unlock(&fs_info->dev_replace);
3714
3715         ret = scrub_workers_get(fs_info, is_dev_replace);
3716         if (ret) {
3717                 mutex_unlock(&fs_info->scrub_lock);
3718                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3719                 return ret;
3720         }
3721
3722         sctx = scrub_setup_ctx(dev, is_dev_replace);
3723         if (IS_ERR(sctx)) {
3724                 mutex_unlock(&fs_info->scrub_lock);
3725                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3726                 scrub_workers_put(fs_info);
3727                 return PTR_ERR(sctx);
3728         }
3729         sctx->readonly = readonly;
3730         dev->scrub_device = sctx;
3731         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3732
3733         /*
3734          * checking @scrub_pause_req here, we can avoid
3735          * race between committing transaction and scrubbing.
3736          */
3737         __scrub_blocked_if_needed(fs_info);
3738         atomic_inc(&fs_info->scrubs_running);
3739         mutex_unlock(&fs_info->scrub_lock);
3740
3741         if (!is_dev_replace) {
3742                 /*
3743                  * by holding device list mutex, we can
3744                  * kick off writing super in log tree sync.
3745                  */
3746                 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3747                 ret = scrub_supers(sctx, dev);
3748                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3749         }
3750
3751         if (!ret)
3752                 ret = scrub_enumerate_chunks(sctx, dev, start, end,
3753                                              is_dev_replace);
3754
3755         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3756         atomic_dec(&fs_info->scrubs_running);
3757         wake_up(&fs_info->scrub_pause_wait);
3758
3759         wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
3760
3761         if (progress)
3762                 memcpy(progress, &sctx->stat, sizeof(*progress));
3763
3764         mutex_lock(&fs_info->scrub_lock);
3765         dev->scrub_device = NULL;
3766         scrub_workers_put(fs_info);
3767         mutex_unlock(&fs_info->scrub_lock);
3768
3769         scrub_put_ctx(sctx);
3770
3771         return ret;
3772 }
3773
3774 void btrfs_scrub_pause(struct btrfs_root *root)
3775 {
3776         struct btrfs_fs_info *fs_info = root->fs_info;
3777
3778         mutex_lock(&fs_info->scrub_lock);
3779         atomic_inc(&fs_info->scrub_pause_req);
3780         while (atomic_read(&fs_info->scrubs_paused) !=
3781                atomic_read(&fs_info->scrubs_running)) {
3782                 mutex_unlock(&fs_info->scrub_lock);
3783                 wait_event(fs_info->scrub_pause_wait,
3784                            atomic_read(&fs_info->scrubs_paused) ==
3785                            atomic_read(&fs_info->scrubs_running));
3786                 mutex_lock(&fs_info->scrub_lock);
3787         }
3788         mutex_unlock(&fs_info->scrub_lock);
3789 }
3790
3791 void btrfs_scrub_continue(struct btrfs_root *root)
3792 {
3793         struct btrfs_fs_info *fs_info = root->fs_info;
3794
3795         atomic_dec(&fs_info->scrub_pause_req);
3796         wake_up(&fs_info->scrub_pause_wait);
3797 }
3798
3799 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
3800 {
3801         mutex_lock(&fs_info->scrub_lock);
3802         if (!atomic_read(&fs_info->scrubs_running)) {
3803                 mutex_unlock(&fs_info->scrub_lock);
3804                 return -ENOTCONN;
3805         }
3806
3807         atomic_inc(&fs_info->scrub_cancel_req);
3808         while (atomic_read(&fs_info->scrubs_running)) {
3809                 mutex_unlock(&fs_info->scrub_lock);
3810                 wait_event(fs_info->scrub_pause_wait,
3811                            atomic_read(&fs_info->scrubs_running) == 0);
3812                 mutex_lock(&fs_info->scrub_lock);
3813         }
3814         atomic_dec(&fs_info->scrub_cancel_req);
3815         mutex_unlock(&fs_info->scrub_lock);
3816
3817         return 0;
3818 }
3819
3820 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
3821                            struct btrfs_device *dev)
3822 {
3823         struct scrub_ctx *sctx;
3824
3825         mutex_lock(&fs_info->scrub_lock);
3826         sctx = dev->scrub_device;
3827         if (!sctx) {
3828                 mutex_unlock(&fs_info->scrub_lock);
3829                 return -ENOTCONN;
3830         }
3831         atomic_inc(&sctx->cancel_req);
3832         while (dev->scrub_device) {
3833                 mutex_unlock(&fs_info->scrub_lock);
3834                 wait_event(fs_info->scrub_pause_wait,
3835                            dev->scrub_device == NULL);
3836                 mutex_lock(&fs_info->scrub_lock);
3837         }
3838         mutex_unlock(&fs_info->scrub_lock);
3839
3840         return 0;
3841 }
3842
3843 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
3844                          struct btrfs_scrub_progress *progress)
3845 {
3846         struct btrfs_device *dev;
3847         struct scrub_ctx *sctx = NULL;
3848
3849         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
3850         dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
3851         if (dev)
3852                 sctx = dev->scrub_device;
3853         if (sctx)
3854                 memcpy(progress, &sctx->stat, sizeof(*progress));
3855         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3856
3857         return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
3858 }
3859
3860 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
3861                                u64 extent_logical, u64 extent_len,
3862                                u64 *extent_physical,
3863                                struct btrfs_device **extent_dev,
3864                                int *extent_mirror_num)
3865 {
3866         u64 mapped_length;
3867         struct btrfs_bio *bbio = NULL;
3868         int ret;
3869
3870         mapped_length = extent_len;
3871         ret = btrfs_map_block(fs_info, READ, extent_logical,
3872                               &mapped_length, &bbio, 0);
3873         if (ret || !bbio || mapped_length < extent_len ||
3874             !bbio->stripes[0].dev->bdev) {
3875                 btrfs_put_bbio(bbio);
3876                 return;
3877         }
3878
3879         *extent_physical = bbio->stripes[0].physical;
3880         *extent_mirror_num = bbio->mirror_num;
3881         *extent_dev = bbio->stripes[0].dev;
3882         btrfs_put_bbio(bbio);
3883 }
3884
3885 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
3886                               struct scrub_wr_ctx *wr_ctx,
3887                               struct btrfs_fs_info *fs_info,
3888                               struct btrfs_device *dev,
3889                               int is_dev_replace)
3890 {
3891         WARN_ON(wr_ctx->wr_curr_bio != NULL);
3892
3893         mutex_init(&wr_ctx->wr_lock);
3894         wr_ctx->wr_curr_bio = NULL;
3895         if (!is_dev_replace)
3896                 return 0;
3897
3898         WARN_ON(!dev->bdev);
3899         wr_ctx->pages_per_wr_bio = min_t(int, SCRUB_PAGES_PER_WR_BIO,
3900                                          bio_get_nr_vecs(dev->bdev));
3901         wr_ctx->tgtdev = dev;
3902         atomic_set(&wr_ctx->flush_all_writes, 0);
3903         return 0;
3904 }
3905
3906 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
3907 {
3908         mutex_lock(&wr_ctx->wr_lock);
3909         kfree(wr_ctx->wr_curr_bio);
3910         wr_ctx->wr_curr_bio = NULL;
3911         mutex_unlock(&wr_ctx->wr_lock);
3912 }
3913
3914 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
3915                             int mirror_num, u64 physical_for_dev_replace)
3916 {
3917         struct scrub_copy_nocow_ctx *nocow_ctx;
3918         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
3919
3920         nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
3921         if (!nocow_ctx) {
3922                 spin_lock(&sctx->stat_lock);
3923                 sctx->stat.malloc_errors++;
3924                 spin_unlock(&sctx->stat_lock);
3925                 return -ENOMEM;
3926         }
3927
3928         scrub_pending_trans_workers_inc(sctx);
3929
3930         nocow_ctx->sctx = sctx;
3931         nocow_ctx->logical = logical;
3932         nocow_ctx->len = len;
3933         nocow_ctx->mirror_num = mirror_num;
3934         nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
3935         btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
3936                         copy_nocow_pages_worker, NULL, NULL);
3937         INIT_LIST_HEAD(&nocow_ctx->inodes);
3938         btrfs_queue_work(fs_info->scrub_nocow_workers,
3939                          &nocow_ctx->work);
3940
3941         return 0;
3942 }
3943
3944 static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
3945 {
3946         struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
3947         struct scrub_nocow_inode *nocow_inode;
3948
3949         nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
3950         if (!nocow_inode)
3951                 return -ENOMEM;
3952         nocow_inode->inum = inum;
3953         nocow_inode->offset = offset;
3954         nocow_inode->root = root;
3955         list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
3956         return 0;
3957 }
3958
3959 #define COPY_COMPLETE 1
3960
3961 static void copy_nocow_pages_worker(struct btrfs_work *work)
3962 {
3963         struct scrub_copy_nocow_ctx *nocow_ctx =
3964                 container_of(work, struct scrub_copy_nocow_ctx, work);
3965         struct scrub_ctx *sctx = nocow_ctx->sctx;
3966         u64 logical = nocow_ctx->logical;
3967         u64 len = nocow_ctx->len;
3968         int mirror_num = nocow_ctx->mirror_num;
3969         u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
3970         int ret;
3971         struct btrfs_trans_handle *trans = NULL;
3972         struct btrfs_fs_info *fs_info;
3973         struct btrfs_path *path;
3974         struct btrfs_root *root;
3975         int not_written = 0;
3976
3977         fs_info = sctx->dev_root->fs_info;
3978         root = fs_info->extent_root;
3979
3980         path = btrfs_alloc_path();
3981         if (!path) {
3982                 spin_lock(&sctx->stat_lock);
3983                 sctx->stat.malloc_errors++;
3984                 spin_unlock(&sctx->stat_lock);
3985                 not_written = 1;
3986                 goto out;
3987         }
3988
3989         trans = btrfs_join_transaction(root);
3990         if (IS_ERR(trans)) {
3991                 not_written = 1;
3992                 goto out;
3993         }
3994
3995         ret = iterate_inodes_from_logical(logical, fs_info, path,
3996                                           record_inode_for_nocow, nocow_ctx);
3997         if (ret != 0 && ret != -ENOENT) {
3998                 btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
3999                         "phys %llu, len %llu, mir %u, ret %d",
4000                         logical, physical_for_dev_replace, len, mirror_num,
4001                         ret);
4002                 not_written = 1;
4003                 goto out;
4004         }
4005
4006         btrfs_end_transaction(trans, root);
4007         trans = NULL;
4008         while (!list_empty(&nocow_ctx->inodes)) {
4009                 struct scrub_nocow_inode *entry;
4010                 entry = list_first_entry(&nocow_ctx->inodes,
4011                                          struct scrub_nocow_inode,
4012                                          list);
4013                 list_del_init(&entry->list);
4014                 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
4015                                                  entry->root, nocow_ctx);
4016                 kfree(entry);
4017                 if (ret == COPY_COMPLETE) {
4018                         ret = 0;
4019                         break;
4020                 } else if (ret) {
4021                         break;
4022                 }
4023         }
4024 out:
4025         while (!list_empty(&nocow_ctx->inodes)) {
4026                 struct scrub_nocow_inode *entry;
4027                 entry = list_first_entry(&nocow_ctx->inodes,
4028                                          struct scrub_nocow_inode,
4029                                          list);
4030                 list_del_init(&entry->list);
4031                 kfree(entry);
4032         }
4033         if (trans && !IS_ERR(trans))
4034                 btrfs_end_transaction(trans, root);
4035         if (not_written)
4036                 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
4037                                             num_uncorrectable_read_errors);
4038
4039         btrfs_free_path(path);
4040         kfree(nocow_ctx);
4041
4042         scrub_pending_trans_workers_dec(sctx);
4043 }
4044
4045 static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
4046                                  u64 logical)
4047 {
4048         struct extent_state *cached_state = NULL;
4049         struct btrfs_ordered_extent *ordered;
4050         struct extent_io_tree *io_tree;
4051         struct extent_map *em;
4052         u64 lockstart = start, lockend = start + len - 1;
4053         int ret = 0;
4054
4055         io_tree = &BTRFS_I(inode)->io_tree;
4056
4057         lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
4058         ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
4059         if (ordered) {
4060                 btrfs_put_ordered_extent(ordered);
4061                 ret = 1;
4062                 goto out_unlock;
4063         }
4064
4065         em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4066         if (IS_ERR(em)) {
4067                 ret = PTR_ERR(em);
4068                 goto out_unlock;
4069         }
4070
4071         /*
4072          * This extent does not actually cover the logical extent anymore,
4073          * move on to the next inode.
4074          */
4075         if (em->block_start > logical ||
4076             em->block_start + em->block_len < logical + len) {
4077                 free_extent_map(em);
4078                 ret = 1;
4079                 goto out_unlock;
4080         }
4081         free_extent_map(em);
4082
4083 out_unlock:
4084         unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
4085                              GFP_NOFS);
4086         return ret;
4087 }
4088
4089 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4090                                       struct scrub_copy_nocow_ctx *nocow_ctx)
4091 {
4092         struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
4093         struct btrfs_key key;
4094         struct inode *inode;
4095         struct page *page;
4096         struct btrfs_root *local_root;
4097         struct extent_io_tree *io_tree;
4098         u64 physical_for_dev_replace;
4099         u64 nocow_ctx_logical;
4100         u64 len = nocow_ctx->len;
4101         unsigned long index;
4102         int srcu_index;
4103         int ret = 0;
4104         int err = 0;
4105
4106         key.objectid = root;
4107         key.type = BTRFS_ROOT_ITEM_KEY;
4108         key.offset = (u64)-1;
4109
4110         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
4111
4112         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
4113         if (IS_ERR(local_root)) {
4114                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4115                 return PTR_ERR(local_root);
4116         }
4117
4118         key.type = BTRFS_INODE_ITEM_KEY;
4119         key.objectid = inum;
4120         key.offset = 0;
4121         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
4122         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4123         if (IS_ERR(inode))
4124                 return PTR_ERR(inode);
4125
4126         /* Avoid truncate/dio/punch hole.. */
4127         mutex_lock(&inode->i_mutex);
4128         inode_dio_wait(inode);
4129
4130         physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4131         io_tree = &BTRFS_I(inode)->io_tree;
4132         nocow_ctx_logical = nocow_ctx->logical;
4133
4134         ret = check_extent_to_block(inode, offset, len, nocow_ctx_logical);
4135         if (ret) {
4136                 ret = ret > 0 ? 0 : ret;
4137                 goto out;
4138         }
4139
4140         while (len >= PAGE_CACHE_SIZE) {
4141                 index = offset >> PAGE_CACHE_SHIFT;
4142 again:
4143                 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4144                 if (!page) {
4145                         btrfs_err(fs_info, "find_or_create_page() failed");
4146                         ret = -ENOMEM;
4147                         goto out;
4148                 }
4149
4150                 if (PageUptodate(page)) {
4151                         if (PageDirty(page))
4152                                 goto next_page;
4153                 } else {
4154                         ClearPageError(page);
4155                         err = extent_read_full_page(io_tree, page,
4156                                                            btrfs_get_extent,
4157                                                            nocow_ctx->mirror_num);
4158                         if (err) {
4159                                 ret = err;
4160                                 goto next_page;
4161                         }
4162
4163                         lock_page(page);
4164                         /*
4165                          * If the page has been remove from the page cache,
4166                          * the data on it is meaningless, because it may be
4167                          * old one, the new data may be written into the new
4168                          * page in the page cache.
4169                          */
4170                         if (page->mapping != inode->i_mapping) {
4171                                 unlock_page(page);
4172                                 page_cache_release(page);
4173                                 goto again;
4174                         }
4175                         if (!PageUptodate(page)) {
4176                                 ret = -EIO;
4177                                 goto next_page;
4178                         }
4179                 }
4180
4181                 ret = check_extent_to_block(inode, offset, len,
4182                                             nocow_ctx_logical);
4183                 if (ret) {
4184                         ret = ret > 0 ? 0 : ret;
4185                         goto next_page;
4186                 }
4187
4188                 err = write_page_nocow(nocow_ctx->sctx,
4189                                        physical_for_dev_replace, page);
4190                 if (err)
4191                         ret = err;
4192 next_page:
4193                 unlock_page(page);
4194                 page_cache_release(page);
4195
4196                 if (ret)
4197                         break;
4198
4199                 offset += PAGE_CACHE_SIZE;
4200                 physical_for_dev_replace += PAGE_CACHE_SIZE;
4201                 nocow_ctx_logical += PAGE_CACHE_SIZE;
4202                 len -= PAGE_CACHE_SIZE;
4203         }
4204         ret = COPY_COMPLETE;
4205 out:
4206         mutex_unlock(&inode->i_mutex);
4207         iput(inode);
4208         return ret;
4209 }
4210
4211 static int write_page_nocow(struct scrub_ctx *sctx,
4212                             u64 physical_for_dev_replace, struct page *page)
4213 {
4214         struct bio *bio;
4215         struct btrfs_device *dev;
4216         int ret;
4217
4218         dev = sctx->wr_ctx.tgtdev;
4219         if (!dev)
4220                 return -EIO;
4221         if (!dev->bdev) {
4222                 printk_ratelimited(KERN_WARNING
4223                         "BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
4224                 return -EIO;
4225         }
4226         bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
4227         if (!bio) {
4228                 spin_lock(&sctx->stat_lock);
4229                 sctx->stat.malloc_errors++;
4230                 spin_unlock(&sctx->stat_lock);
4231                 return -ENOMEM;
4232         }
4233         bio->bi_iter.bi_size = 0;
4234         bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
4235         bio->bi_bdev = dev->bdev;
4236         ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
4237         if (ret != PAGE_CACHE_SIZE) {
4238 leave_with_eio:
4239                 bio_put(bio);
4240                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4241                 return -EIO;
4242         }
4243
4244         if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
4245                 goto leave_with_eio;
4246
4247         bio_put(bio);
4248         return 0;
4249 }