]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/md/raid1.c
md/raid1: factor out flush_bio_list()
[karo-tx-linux.git] / drivers / md / raid1.c
1 /*
2  * raid1.c : Multiple Devices driver for Linux
3  *
4  * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
5  *
6  * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
7  *
8  * RAID-1 management functions.
9  *
10  * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
11  *
12  * Fixes to reconstruction by Jakob Ã˜stergaard" <jakob@ostenfeld.dk>
13  * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
14  *
15  * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
16  * bitmapped intelligence in resync:
17  *
18  *      - bitmap marked during normal i/o
19  *      - bitmap used to skip nondirty blocks during sync
20  *
21  * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
22  * - persistent bitmap code
23  *
24  * This program is free software; you can redistribute it and/or modify
25  * it under the terms of the GNU General Public License as published by
26  * the Free Software Foundation; either version 2, or (at your option)
27  * any later version.
28  *
29  * You should have received a copy of the GNU General Public License
30  * (for example /usr/src/linux/COPYING); if not, write to the Free
31  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32  */
33
34 #include <linux/slab.h>
35 #include <linux/delay.h>
36 #include <linux/blkdev.h>
37 #include <linux/module.h>
38 #include <linux/seq_file.h>
39 #include <linux/ratelimit.h>
40 #include <linux/sched/signal.h>
41
42 #include <trace/events/block.h>
43
44 #include "md.h"
45 #include "raid1.h"
46 #include "bitmap.h"
47
48 #define UNSUPPORTED_MDDEV_FLAGS         \
49         ((1L << MD_HAS_JOURNAL) |       \
50          (1L << MD_JOURNAL_CLEAN) |     \
51          (1L << MD_HAS_PPL))
52
53 /*
54  * Number of guaranteed r1bios in case of extreme VM load:
55  */
56 #define NR_RAID1_BIOS 256
57
58 /* when we get a read error on a read-only array, we redirect to another
59  * device without failing the first device, or trying to over-write to
60  * correct the read error.  To keep track of bad blocks on a per-bio
61  * level, we store IO_BLOCKED in the appropriate 'bios' pointer
62  */
63 #define IO_BLOCKED ((struct bio *)1)
64 /* When we successfully write to a known bad-block, we need to remove the
65  * bad-block marking which must be done from process context.  So we record
66  * the success by setting devs[n].bio to IO_MADE_GOOD
67  */
68 #define IO_MADE_GOOD ((struct bio *)2)
69
70 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
71
72 /* When there are this many requests queue to be written by
73  * the raid1 thread, we become 'congested' to provide back-pressure
74  * for writeback.
75  */
76 static int max_queued_requests = 1024;
77
78 static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
79 static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
80
81 #define raid1_log(md, fmt, args...)                             \
82         do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
83
84 /*
85  * 'strct resync_pages' stores actual pages used for doing the resync
86  *  IO, and it is per-bio, so make .bi_private points to it.
87  */
88 static inline struct resync_pages *get_resync_pages(struct bio *bio)
89 {
90         return bio->bi_private;
91 }
92
93 /*
94  * for resync bio, r1bio pointer can be retrieved from the per-bio
95  * 'struct resync_pages'.
96  */
97 static inline struct r1bio *get_resync_r1bio(struct bio *bio)
98 {
99         return get_resync_pages(bio)->raid_bio;
100 }
101
102 static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
103 {
104         struct pool_info *pi = data;
105         int size = offsetof(struct r1bio, bios[pi->raid_disks]);
106
107         /* allocate a r1bio with room for raid_disks entries in the bios array */
108         return kzalloc(size, gfp_flags);
109 }
110
111 static void r1bio_pool_free(void *r1_bio, void *data)
112 {
113         kfree(r1_bio);
114 }
115
116 #define RESYNC_DEPTH 32
117 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
118 #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
119 #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
120 #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
121 #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
122
123 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
124 {
125         struct pool_info *pi = data;
126         struct r1bio *r1_bio;
127         struct bio *bio;
128         int need_pages;
129         int j;
130         struct resync_pages *rps;
131
132         r1_bio = r1bio_pool_alloc(gfp_flags, pi);
133         if (!r1_bio)
134                 return NULL;
135
136         rps = kmalloc(sizeof(struct resync_pages) * pi->raid_disks,
137                       gfp_flags);
138         if (!rps)
139                 goto out_free_r1bio;
140
141         /*
142          * Allocate bios : 1 for reading, n-1 for writing
143          */
144         for (j = pi->raid_disks ; j-- ; ) {
145                 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
146                 if (!bio)
147                         goto out_free_bio;
148                 r1_bio->bios[j] = bio;
149         }
150         /*
151          * Allocate RESYNC_PAGES data pages and attach them to
152          * the first bio.
153          * If this is a user-requested check/repair, allocate
154          * RESYNC_PAGES for each bio.
155          */
156         if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
157                 need_pages = pi->raid_disks;
158         else
159                 need_pages = 1;
160         for (j = 0; j < pi->raid_disks; j++) {
161                 struct resync_pages *rp = &rps[j];
162
163                 bio = r1_bio->bios[j];
164
165                 if (j < need_pages) {
166                         if (resync_alloc_pages(rp, gfp_flags))
167                                 goto out_free_pages;
168                 } else {
169                         memcpy(rp, &rps[0], sizeof(*rp));
170                         resync_get_all_pages(rp);
171                 }
172
173                 rp->idx = 0;
174                 rp->raid_bio = r1_bio;
175                 bio->bi_private = rp;
176         }
177
178         r1_bio->master_bio = NULL;
179
180         return r1_bio;
181
182 out_free_pages:
183         while (--j >= 0)
184                 resync_free_pages(&rps[j]);
185
186 out_free_bio:
187         while (++j < pi->raid_disks)
188                 bio_put(r1_bio->bios[j]);
189         kfree(rps);
190
191 out_free_r1bio:
192         r1bio_pool_free(r1_bio, data);
193         return NULL;
194 }
195
196 static void r1buf_pool_free(void *__r1_bio, void *data)
197 {
198         struct pool_info *pi = data;
199         int i;
200         struct r1bio *r1bio = __r1_bio;
201         struct resync_pages *rp = NULL;
202
203         for (i = pi->raid_disks; i--; ) {
204                 rp = get_resync_pages(r1bio->bios[i]);
205                 resync_free_pages(rp);
206                 bio_put(r1bio->bios[i]);
207         }
208
209         /* resync pages array stored in the 1st bio's .bi_private */
210         kfree(rp);
211
212         r1bio_pool_free(r1bio, data);
213 }
214
215 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
216 {
217         int i;
218
219         for (i = 0; i < conf->raid_disks * 2; i++) {
220                 struct bio **bio = r1_bio->bios + i;
221                 if (!BIO_SPECIAL(*bio))
222                         bio_put(*bio);
223                 *bio = NULL;
224         }
225 }
226
227 static void free_r1bio(struct r1bio *r1_bio)
228 {
229         struct r1conf *conf = r1_bio->mddev->private;
230
231         put_all_bios(conf, r1_bio);
232         mempool_free(r1_bio, conf->r1bio_pool);
233 }
234
235 static void put_buf(struct r1bio *r1_bio)
236 {
237         struct r1conf *conf = r1_bio->mddev->private;
238         sector_t sect = r1_bio->sector;
239         int i;
240
241         for (i = 0; i < conf->raid_disks * 2; i++) {
242                 struct bio *bio = r1_bio->bios[i];
243                 if (bio->bi_end_io)
244                         rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
245         }
246
247         mempool_free(r1_bio, conf->r1buf_pool);
248
249         lower_barrier(conf, sect);
250 }
251
252 static void reschedule_retry(struct r1bio *r1_bio)
253 {
254         unsigned long flags;
255         struct mddev *mddev = r1_bio->mddev;
256         struct r1conf *conf = mddev->private;
257         int idx;
258
259         idx = sector_to_idx(r1_bio->sector);
260         spin_lock_irqsave(&conf->device_lock, flags);
261         list_add(&r1_bio->retry_list, &conf->retry_list);
262         atomic_inc(&conf->nr_queued[idx]);
263         spin_unlock_irqrestore(&conf->device_lock, flags);
264
265         wake_up(&conf->wait_barrier);
266         md_wakeup_thread(mddev->thread);
267 }
268
269 /*
270  * raid_end_bio_io() is called when we have finished servicing a mirrored
271  * operation and are ready to return a success/failure code to the buffer
272  * cache layer.
273  */
274 static void call_bio_endio(struct r1bio *r1_bio)
275 {
276         struct bio *bio = r1_bio->master_bio;
277         struct r1conf *conf = r1_bio->mddev->private;
278
279         if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
280                 bio->bi_error = -EIO;
281
282         bio_endio(bio);
283         /*
284          * Wake up any possible resync thread that waits for the device
285          * to go idle.
286          */
287         allow_barrier(conf, r1_bio->sector);
288 }
289
290 static void raid_end_bio_io(struct r1bio *r1_bio)
291 {
292         struct bio *bio = r1_bio->master_bio;
293
294         /* if nobody has done the final endio yet, do it now */
295         if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
296                 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
297                          (bio_data_dir(bio) == WRITE) ? "write" : "read",
298                          (unsigned long long) bio->bi_iter.bi_sector,
299                          (unsigned long long) bio_end_sector(bio) - 1);
300
301                 call_bio_endio(r1_bio);
302         }
303         free_r1bio(r1_bio);
304 }
305
306 /*
307  * Update disk head position estimator based on IRQ completion info.
308  */
309 static inline void update_head_pos(int disk, struct r1bio *r1_bio)
310 {
311         struct r1conf *conf = r1_bio->mddev->private;
312
313         conf->mirrors[disk].head_position =
314                 r1_bio->sector + (r1_bio->sectors);
315 }
316
317 /*
318  * Find the disk number which triggered given bio
319  */
320 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
321 {
322         int mirror;
323         struct r1conf *conf = r1_bio->mddev->private;
324         int raid_disks = conf->raid_disks;
325
326         for (mirror = 0; mirror < raid_disks * 2; mirror++)
327                 if (r1_bio->bios[mirror] == bio)
328                         break;
329
330         BUG_ON(mirror == raid_disks * 2);
331         update_head_pos(mirror, r1_bio);
332
333         return mirror;
334 }
335
336 static void raid1_end_read_request(struct bio *bio)
337 {
338         int uptodate = !bio->bi_error;
339         struct r1bio *r1_bio = bio->bi_private;
340         struct r1conf *conf = r1_bio->mddev->private;
341         struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
342
343         /*
344          * this branch is our 'one mirror IO has finished' event handler:
345          */
346         update_head_pos(r1_bio->read_disk, r1_bio);
347
348         if (uptodate)
349                 set_bit(R1BIO_Uptodate, &r1_bio->state);
350         else if (test_bit(FailFast, &rdev->flags) &&
351                  test_bit(R1BIO_FailFast, &r1_bio->state))
352                 /* This was a fail-fast read so we definitely
353                  * want to retry */
354                 ;
355         else {
356                 /* If all other devices have failed, we want to return
357                  * the error upwards rather than fail the last device.
358                  * Here we redefine "uptodate" to mean "Don't want to retry"
359                  */
360                 unsigned long flags;
361                 spin_lock_irqsave(&conf->device_lock, flags);
362                 if (r1_bio->mddev->degraded == conf->raid_disks ||
363                     (r1_bio->mddev->degraded == conf->raid_disks-1 &&
364                      test_bit(In_sync, &rdev->flags)))
365                         uptodate = 1;
366                 spin_unlock_irqrestore(&conf->device_lock, flags);
367         }
368
369         if (uptodate) {
370                 raid_end_bio_io(r1_bio);
371                 rdev_dec_pending(rdev, conf->mddev);
372         } else {
373                 /*
374                  * oops, read error:
375                  */
376                 char b[BDEVNAME_SIZE];
377                 pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
378                                    mdname(conf->mddev),
379                                    bdevname(rdev->bdev, b),
380                                    (unsigned long long)r1_bio->sector);
381                 set_bit(R1BIO_ReadError, &r1_bio->state);
382                 reschedule_retry(r1_bio);
383                 /* don't drop the reference on read_disk yet */
384         }
385 }
386
387 static void close_write(struct r1bio *r1_bio)
388 {
389         /* it really is the end of this request */
390         if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
391                 bio_free_pages(r1_bio->behind_master_bio);
392                 bio_put(r1_bio->behind_master_bio);
393                 r1_bio->behind_master_bio = NULL;
394         }
395         /* clear the bitmap if all writes complete successfully */
396         bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
397                         r1_bio->sectors,
398                         !test_bit(R1BIO_Degraded, &r1_bio->state),
399                         test_bit(R1BIO_BehindIO, &r1_bio->state));
400         md_write_end(r1_bio->mddev);
401 }
402
403 static void r1_bio_write_done(struct r1bio *r1_bio)
404 {
405         if (!atomic_dec_and_test(&r1_bio->remaining))
406                 return;
407
408         if (test_bit(R1BIO_WriteError, &r1_bio->state))
409                 reschedule_retry(r1_bio);
410         else {
411                 close_write(r1_bio);
412                 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
413                         reschedule_retry(r1_bio);
414                 else
415                         raid_end_bio_io(r1_bio);
416         }
417 }
418
419 static void raid1_end_write_request(struct bio *bio)
420 {
421         struct r1bio *r1_bio = bio->bi_private;
422         int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
423         struct r1conf *conf = r1_bio->mddev->private;
424         struct bio *to_put = NULL;
425         int mirror = find_bio_disk(r1_bio, bio);
426         struct md_rdev *rdev = conf->mirrors[mirror].rdev;
427         bool discard_error;
428
429         discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
430
431         /*
432          * 'one mirror IO has finished' event handler:
433          */
434         if (bio->bi_error && !discard_error) {
435                 set_bit(WriteErrorSeen, &rdev->flags);
436                 if (!test_and_set_bit(WantReplacement, &rdev->flags))
437                         set_bit(MD_RECOVERY_NEEDED, &
438                                 conf->mddev->recovery);
439
440                 if (test_bit(FailFast, &rdev->flags) &&
441                     (bio->bi_opf & MD_FAILFAST) &&
442                     /* We never try FailFast to WriteMostly devices */
443                     !test_bit(WriteMostly, &rdev->flags)) {
444                         md_error(r1_bio->mddev, rdev);
445                         if (!test_bit(Faulty, &rdev->flags))
446                                 /* This is the only remaining device,
447                                  * We need to retry the write without
448                                  * FailFast
449                                  */
450                                 set_bit(R1BIO_WriteError, &r1_bio->state);
451                         else {
452                                 /* Finished with this branch */
453                                 r1_bio->bios[mirror] = NULL;
454                                 to_put = bio;
455                         }
456                 } else
457                         set_bit(R1BIO_WriteError, &r1_bio->state);
458         } else {
459                 /*
460                  * Set R1BIO_Uptodate in our master bio, so that we
461                  * will return a good error code for to the higher
462                  * levels even if IO on some other mirrored buffer
463                  * fails.
464                  *
465                  * The 'master' represents the composite IO operation
466                  * to user-side. So if something waits for IO, then it
467                  * will wait for the 'master' bio.
468                  */
469                 sector_t first_bad;
470                 int bad_sectors;
471
472                 r1_bio->bios[mirror] = NULL;
473                 to_put = bio;
474                 /*
475                  * Do not set R1BIO_Uptodate if the current device is
476                  * rebuilding or Faulty. This is because we cannot use
477                  * such device for properly reading the data back (we could
478                  * potentially use it, if the current write would have felt
479                  * before rdev->recovery_offset, but for simplicity we don't
480                  * check this here.
481                  */
482                 if (test_bit(In_sync, &rdev->flags) &&
483                     !test_bit(Faulty, &rdev->flags))
484                         set_bit(R1BIO_Uptodate, &r1_bio->state);
485
486                 /* Maybe we can clear some bad blocks. */
487                 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
488                                 &first_bad, &bad_sectors) && !discard_error) {
489                         r1_bio->bios[mirror] = IO_MADE_GOOD;
490                         set_bit(R1BIO_MadeGood, &r1_bio->state);
491                 }
492         }
493
494         if (behind) {
495                 /* we release behind master bio when all write are done */
496                 if (r1_bio->behind_master_bio == bio)
497                         to_put = NULL;
498
499                 if (test_bit(WriteMostly, &rdev->flags))
500                         atomic_dec(&r1_bio->behind_remaining);
501
502                 /*
503                  * In behind mode, we ACK the master bio once the I/O
504                  * has safely reached all non-writemostly
505                  * disks. Setting the Returned bit ensures that this
506                  * gets done only once -- we don't ever want to return
507                  * -EIO here, instead we'll wait
508                  */
509                 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
510                     test_bit(R1BIO_Uptodate, &r1_bio->state)) {
511                         /* Maybe we can return now */
512                         if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
513                                 struct bio *mbio = r1_bio->master_bio;
514                                 pr_debug("raid1: behind end write sectors"
515                                          " %llu-%llu\n",
516                                          (unsigned long long) mbio->bi_iter.bi_sector,
517                                          (unsigned long long) bio_end_sector(mbio) - 1);
518                                 call_bio_endio(r1_bio);
519                         }
520                 }
521         }
522         if (r1_bio->bios[mirror] == NULL)
523                 rdev_dec_pending(rdev, conf->mddev);
524
525         /*
526          * Let's see if all mirrored write operations have finished
527          * already.
528          */
529         r1_bio_write_done(r1_bio);
530
531         if (to_put)
532                 bio_put(to_put);
533 }
534
535 static sector_t align_to_barrier_unit_end(sector_t start_sector,
536                                           sector_t sectors)
537 {
538         sector_t len;
539
540         WARN_ON(sectors == 0);
541         /*
542          * len is the number of sectors from start_sector to end of the
543          * barrier unit which start_sector belongs to.
544          */
545         len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) -
546               start_sector;
547
548         if (len > sectors)
549                 len = sectors;
550
551         return len;
552 }
553
554 /*
555  * This routine returns the disk from which the requested read should
556  * be done. There is a per-array 'next expected sequential IO' sector
557  * number - if this matches on the next IO then we use the last disk.
558  * There is also a per-disk 'last know head position' sector that is
559  * maintained from IRQ contexts, both the normal and the resync IO
560  * completion handlers update this position correctly. If there is no
561  * perfect sequential match then we pick the disk whose head is closest.
562  *
563  * If there are 2 mirrors in the same 2 devices, performance degrades
564  * because position is mirror, not device based.
565  *
566  * The rdev for the device selected will have nr_pending incremented.
567  */
568 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
569 {
570         const sector_t this_sector = r1_bio->sector;
571         int sectors;
572         int best_good_sectors;
573         int best_disk, best_dist_disk, best_pending_disk;
574         int has_nonrot_disk;
575         int disk;
576         sector_t best_dist;
577         unsigned int min_pending;
578         struct md_rdev *rdev;
579         int choose_first;
580         int choose_next_idle;
581
582         rcu_read_lock();
583         /*
584          * Check if we can balance. We can balance on the whole
585          * device if no resync is going on, or below the resync window.
586          * We take the first readable disk when above the resync window.
587          */
588  retry:
589         sectors = r1_bio->sectors;
590         best_disk = -1;
591         best_dist_disk = -1;
592         best_dist = MaxSector;
593         best_pending_disk = -1;
594         min_pending = UINT_MAX;
595         best_good_sectors = 0;
596         has_nonrot_disk = 0;
597         choose_next_idle = 0;
598         clear_bit(R1BIO_FailFast, &r1_bio->state);
599
600         if ((conf->mddev->recovery_cp < this_sector + sectors) ||
601             (mddev_is_clustered(conf->mddev) &&
602             md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
603                     this_sector + sectors)))
604                 choose_first = 1;
605         else
606                 choose_first = 0;
607
608         for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
609                 sector_t dist;
610                 sector_t first_bad;
611                 int bad_sectors;
612                 unsigned int pending;
613                 bool nonrot;
614
615                 rdev = rcu_dereference(conf->mirrors[disk].rdev);
616                 if (r1_bio->bios[disk] == IO_BLOCKED
617                     || rdev == NULL
618                     || test_bit(Faulty, &rdev->flags))
619                         continue;
620                 if (!test_bit(In_sync, &rdev->flags) &&
621                     rdev->recovery_offset < this_sector + sectors)
622                         continue;
623                 if (test_bit(WriteMostly, &rdev->flags)) {
624                         /* Don't balance among write-mostly, just
625                          * use the first as a last resort */
626                         if (best_dist_disk < 0) {
627                                 if (is_badblock(rdev, this_sector, sectors,
628                                                 &first_bad, &bad_sectors)) {
629                                         if (first_bad <= this_sector)
630                                                 /* Cannot use this */
631                                                 continue;
632                                         best_good_sectors = first_bad - this_sector;
633                                 } else
634                                         best_good_sectors = sectors;
635                                 best_dist_disk = disk;
636                                 best_pending_disk = disk;
637                         }
638                         continue;
639                 }
640                 /* This is a reasonable device to use.  It might
641                  * even be best.
642                  */
643                 if (is_badblock(rdev, this_sector, sectors,
644                                 &first_bad, &bad_sectors)) {
645                         if (best_dist < MaxSector)
646                                 /* already have a better device */
647                                 continue;
648                         if (first_bad <= this_sector) {
649                                 /* cannot read here. If this is the 'primary'
650                                  * device, then we must not read beyond
651                                  * bad_sectors from another device..
652                                  */
653                                 bad_sectors -= (this_sector - first_bad);
654                                 if (choose_first && sectors > bad_sectors)
655                                         sectors = bad_sectors;
656                                 if (best_good_sectors > sectors)
657                                         best_good_sectors = sectors;
658
659                         } else {
660                                 sector_t good_sectors = first_bad - this_sector;
661                                 if (good_sectors > best_good_sectors) {
662                                         best_good_sectors = good_sectors;
663                                         best_disk = disk;
664                                 }
665                                 if (choose_first)
666                                         break;
667                         }
668                         continue;
669                 } else
670                         best_good_sectors = sectors;
671
672                 if (best_disk >= 0)
673                         /* At least two disks to choose from so failfast is OK */
674                         set_bit(R1BIO_FailFast, &r1_bio->state);
675
676                 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
677                 has_nonrot_disk |= nonrot;
678                 pending = atomic_read(&rdev->nr_pending);
679                 dist = abs(this_sector - conf->mirrors[disk].head_position);
680                 if (choose_first) {
681                         best_disk = disk;
682                         break;
683                 }
684                 /* Don't change to another disk for sequential reads */
685                 if (conf->mirrors[disk].next_seq_sect == this_sector
686                     || dist == 0) {
687                         int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
688                         struct raid1_info *mirror = &conf->mirrors[disk];
689
690                         best_disk = disk;
691                         /*
692                          * If buffered sequential IO size exceeds optimal
693                          * iosize, check if there is idle disk. If yes, choose
694                          * the idle disk. read_balance could already choose an
695                          * idle disk before noticing it's a sequential IO in
696                          * this disk. This doesn't matter because this disk
697                          * will idle, next time it will be utilized after the
698                          * first disk has IO size exceeds optimal iosize. In
699                          * this way, iosize of the first disk will be optimal
700                          * iosize at least. iosize of the second disk might be
701                          * small, but not a big deal since when the second disk
702                          * starts IO, the first disk is likely still busy.
703                          */
704                         if (nonrot && opt_iosize > 0 &&
705                             mirror->seq_start != MaxSector &&
706                             mirror->next_seq_sect > opt_iosize &&
707                             mirror->next_seq_sect - opt_iosize >=
708                             mirror->seq_start) {
709                                 choose_next_idle = 1;
710                                 continue;
711                         }
712                         break;
713                 }
714
715                 if (choose_next_idle)
716                         continue;
717
718                 if (min_pending > pending) {
719                         min_pending = pending;
720                         best_pending_disk = disk;
721                 }
722
723                 if (dist < best_dist) {
724                         best_dist = dist;
725                         best_dist_disk = disk;
726                 }
727         }
728
729         /*
730          * If all disks are rotational, choose the closest disk. If any disk is
731          * non-rotational, choose the disk with less pending request even the
732          * disk is rotational, which might/might not be optimal for raids with
733          * mixed ratation/non-rotational disks depending on workload.
734          */
735         if (best_disk == -1) {
736                 if (has_nonrot_disk || min_pending == 0)
737                         best_disk = best_pending_disk;
738                 else
739                         best_disk = best_dist_disk;
740         }
741
742         if (best_disk >= 0) {
743                 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
744                 if (!rdev)
745                         goto retry;
746                 atomic_inc(&rdev->nr_pending);
747                 sectors = best_good_sectors;
748
749                 if (conf->mirrors[best_disk].next_seq_sect != this_sector)
750                         conf->mirrors[best_disk].seq_start = this_sector;
751
752                 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
753         }
754         rcu_read_unlock();
755         *max_sectors = sectors;
756
757         return best_disk;
758 }
759
760 static int raid1_congested(struct mddev *mddev, int bits)
761 {
762         struct r1conf *conf = mddev->private;
763         int i, ret = 0;
764
765         if ((bits & (1 << WB_async_congested)) &&
766             conf->pending_count >= max_queued_requests)
767                 return 1;
768
769         rcu_read_lock();
770         for (i = 0; i < conf->raid_disks * 2; i++) {
771                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
772                 if (rdev && !test_bit(Faulty, &rdev->flags)) {
773                         struct request_queue *q = bdev_get_queue(rdev->bdev);
774
775                         BUG_ON(!q);
776
777                         /* Note the '|| 1' - when read_balance prefers
778                          * non-congested targets, it can be removed
779                          */
780                         if ((bits & (1 << WB_async_congested)) || 1)
781                                 ret |= bdi_congested(q->backing_dev_info, bits);
782                         else
783                                 ret &= bdi_congested(q->backing_dev_info, bits);
784                 }
785         }
786         rcu_read_unlock();
787         return ret;
788 }
789
790 static void flush_bio_list(struct r1conf *conf, struct bio *bio)
791 {
792         /* flush any pending bitmap writes to disk before proceeding w/ I/O */
793         bitmap_unplug(conf->mddev->bitmap);
794         wake_up(&conf->wait_barrier);
795
796         while (bio) { /* submit pending writes */
797                 struct bio *next = bio->bi_next;
798                 struct md_rdev *rdev = (void*)bio->bi_bdev;
799                 bio->bi_next = NULL;
800                 bio->bi_bdev = rdev->bdev;
801                 if (test_bit(Faulty, &rdev->flags)) {
802                         bio->bi_error = -EIO;
803                         bio_endio(bio);
804                 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
805                                     !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
806                         /* Just ignore it */
807                         bio_endio(bio);
808                 else
809                         generic_make_request(bio);
810                 bio = next;
811         }
812 }
813
814 static void flush_pending_writes(struct r1conf *conf)
815 {
816         /* Any writes that have been queued but are awaiting
817          * bitmap updates get flushed here.
818          */
819         spin_lock_irq(&conf->device_lock);
820
821         if (conf->pending_bio_list.head) {
822                 struct bio *bio;
823                 bio = bio_list_get(&conf->pending_bio_list);
824                 conf->pending_count = 0;
825                 spin_unlock_irq(&conf->device_lock);
826                 flush_bio_list(conf, bio);
827         } else
828                 spin_unlock_irq(&conf->device_lock);
829 }
830
831 /* Barriers....
832  * Sometimes we need to suspend IO while we do something else,
833  * either some resync/recovery, or reconfigure the array.
834  * To do this we raise a 'barrier'.
835  * The 'barrier' is a counter that can be raised multiple times
836  * to count how many activities are happening which preclude
837  * normal IO.
838  * We can only raise the barrier if there is no pending IO.
839  * i.e. if nr_pending == 0.
840  * We choose only to raise the barrier if no-one is waiting for the
841  * barrier to go down.  This means that as soon as an IO request
842  * is ready, no other operations which require a barrier will start
843  * until the IO request has had a chance.
844  *
845  * So: regular IO calls 'wait_barrier'.  When that returns there
846  *    is no backgroup IO happening,  It must arrange to call
847  *    allow_barrier when it has finished its IO.
848  * backgroup IO calls must call raise_barrier.  Once that returns
849  *    there is no normal IO happeing.  It must arrange to call
850  *    lower_barrier when the particular background IO completes.
851  */
852 static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
853 {
854         int idx = sector_to_idx(sector_nr);
855
856         spin_lock_irq(&conf->resync_lock);
857
858         /* Wait until no block IO is waiting */
859         wait_event_lock_irq(conf->wait_barrier,
860                             !atomic_read(&conf->nr_waiting[idx]),
861                             conf->resync_lock);
862
863         /* block any new IO from starting */
864         atomic_inc(&conf->barrier[idx]);
865         /*
866          * In raise_barrier() we firstly increase conf->barrier[idx] then
867          * check conf->nr_pending[idx]. In _wait_barrier() we firstly
868          * increase conf->nr_pending[idx] then check conf->barrier[idx].
869          * A memory barrier here to make sure conf->nr_pending[idx] won't
870          * be fetched before conf->barrier[idx] is increased. Otherwise
871          * there will be a race between raise_barrier() and _wait_barrier().
872          */
873         smp_mb__after_atomic();
874
875         /* For these conditions we must wait:
876          * A: while the array is in frozen state
877          * B: while conf->nr_pending[idx] is not 0, meaning regular I/O
878          *    existing in corresponding I/O barrier bucket.
879          * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches
880          *    max resync count which allowed on current I/O barrier bucket.
881          */
882         wait_event_lock_irq(conf->wait_barrier,
883                             !conf->array_frozen &&
884                              !atomic_read(&conf->nr_pending[idx]) &&
885                              atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH,
886                             conf->resync_lock);
887
888         atomic_inc(&conf->nr_pending[idx]);
889         spin_unlock_irq(&conf->resync_lock);
890 }
891
892 static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
893 {
894         int idx = sector_to_idx(sector_nr);
895
896         BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
897
898         atomic_dec(&conf->barrier[idx]);
899         atomic_dec(&conf->nr_pending[idx]);
900         wake_up(&conf->wait_barrier);
901 }
902
903 static void _wait_barrier(struct r1conf *conf, int idx)
904 {
905         /*
906          * We need to increase conf->nr_pending[idx] very early here,
907          * then raise_barrier() can be blocked when it waits for
908          * conf->nr_pending[idx] to be 0. Then we can avoid holding
909          * conf->resync_lock when there is no barrier raised in same
910          * barrier unit bucket. Also if the array is frozen, I/O
911          * should be blocked until array is unfrozen.
912          */
913         atomic_inc(&conf->nr_pending[idx]);
914         /*
915          * In _wait_barrier() we firstly increase conf->nr_pending[idx], then
916          * check conf->barrier[idx]. In raise_barrier() we firstly increase
917          * conf->barrier[idx], then check conf->nr_pending[idx]. A memory
918          * barrier is necessary here to make sure conf->barrier[idx] won't be
919          * fetched before conf->nr_pending[idx] is increased. Otherwise there
920          * will be a race between _wait_barrier() and raise_barrier().
921          */
922         smp_mb__after_atomic();
923
924         /*
925          * Don't worry about checking two atomic_t variables at same time
926          * here. If during we check conf->barrier[idx], the array is
927          * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is
928          * 0, it is safe to return and make the I/O continue. Because the
929          * array is frozen, all I/O returned here will eventually complete
930          * or be queued, no race will happen. See code comment in
931          * frozen_array().
932          */
933         if (!READ_ONCE(conf->array_frozen) &&
934             !atomic_read(&conf->barrier[idx]))
935                 return;
936
937         /*
938          * After holding conf->resync_lock, conf->nr_pending[idx]
939          * should be decreased before waiting for barrier to drop.
940          * Otherwise, we may encounter a race condition because
941          * raise_barrer() might be waiting for conf->nr_pending[idx]
942          * to be 0 at same time.
943          */
944         spin_lock_irq(&conf->resync_lock);
945         atomic_inc(&conf->nr_waiting[idx]);
946         atomic_dec(&conf->nr_pending[idx]);
947         /*
948          * In case freeze_array() is waiting for
949          * get_unqueued_pending() == extra
950          */
951         wake_up(&conf->wait_barrier);
952         /* Wait for the barrier in same barrier unit bucket to drop. */
953         wait_event_lock_irq(conf->wait_barrier,
954                             !conf->array_frozen &&
955                              !atomic_read(&conf->barrier[idx]),
956                             conf->resync_lock);
957         atomic_inc(&conf->nr_pending[idx]);
958         atomic_dec(&conf->nr_waiting[idx]);
959         spin_unlock_irq(&conf->resync_lock);
960 }
961
962 static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
963 {
964         int idx = sector_to_idx(sector_nr);
965
966         /*
967          * Very similar to _wait_barrier(). The difference is, for read
968          * I/O we don't need wait for sync I/O, but if the whole array
969          * is frozen, the read I/O still has to wait until the array is
970          * unfrozen. Since there is no ordering requirement with
971          * conf->barrier[idx] here, memory barrier is unnecessary as well.
972          */
973         atomic_inc(&conf->nr_pending[idx]);
974
975         if (!READ_ONCE(conf->array_frozen))
976                 return;
977
978         spin_lock_irq(&conf->resync_lock);
979         atomic_inc(&conf->nr_waiting[idx]);
980         atomic_dec(&conf->nr_pending[idx]);
981         /*
982          * In case freeze_array() is waiting for
983          * get_unqueued_pending() == extra
984          */
985         wake_up(&conf->wait_barrier);
986         /* Wait for array to be unfrozen */
987         wait_event_lock_irq(conf->wait_barrier,
988                             !conf->array_frozen,
989                             conf->resync_lock);
990         atomic_inc(&conf->nr_pending[idx]);
991         atomic_dec(&conf->nr_waiting[idx]);
992         spin_unlock_irq(&conf->resync_lock);
993 }
994
995 static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
996 {
997         int idx = sector_to_idx(sector_nr);
998
999         _wait_barrier(conf, idx);
1000 }
1001
1002 static void wait_all_barriers(struct r1conf *conf)
1003 {
1004         int idx;
1005
1006         for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
1007                 _wait_barrier(conf, idx);
1008 }
1009
1010 static void _allow_barrier(struct r1conf *conf, int idx)
1011 {
1012         atomic_dec(&conf->nr_pending[idx]);
1013         wake_up(&conf->wait_barrier);
1014 }
1015
1016 static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
1017 {
1018         int idx = sector_to_idx(sector_nr);
1019
1020         _allow_barrier(conf, idx);
1021 }
1022
1023 static void allow_all_barriers(struct r1conf *conf)
1024 {
1025         int idx;
1026
1027         for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
1028                 _allow_barrier(conf, idx);
1029 }
1030
1031 /* conf->resync_lock should be held */
1032 static int get_unqueued_pending(struct r1conf *conf)
1033 {
1034         int idx, ret;
1035
1036         for (ret = 0, idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
1037                 ret += atomic_read(&conf->nr_pending[idx]) -
1038                         atomic_read(&conf->nr_queued[idx]);
1039
1040         return ret;
1041 }
1042
1043 static void freeze_array(struct r1conf *conf, int extra)
1044 {
1045         /* Stop sync I/O and normal I/O and wait for everything to
1046          * go quiet.
1047          * This is called in two situations:
1048          * 1) management command handlers (reshape, remove disk, quiesce).
1049          * 2) one normal I/O request failed.
1050
1051          * After array_frozen is set to 1, new sync IO will be blocked at
1052          * raise_barrier(), and new normal I/O will blocked at _wait_barrier()
1053          * or wait_read_barrier(). The flying I/Os will either complete or be
1054          * queued. When everything goes quite, there are only queued I/Os left.
1055
1056          * Every flying I/O contributes to a conf->nr_pending[idx], idx is the
1057          * barrier bucket index which this I/O request hits. When all sync and
1058          * normal I/O are queued, sum of all conf->nr_pending[] will match sum
1059          * of all conf->nr_queued[]. But normal I/O failure is an exception,
1060          * in handle_read_error(), we may call freeze_array() before trying to
1061          * fix the read error. In this case, the error read I/O is not queued,
1062          * so get_unqueued_pending() == 1.
1063          *
1064          * Therefore before this function returns, we need to wait until
1065          * get_unqueued_pendings(conf) gets equal to extra. For
1066          * normal I/O context, extra is 1, in rested situations extra is 0.
1067          */
1068         spin_lock_irq(&conf->resync_lock);
1069         conf->array_frozen = 1;
1070         raid1_log(conf->mddev, "wait freeze");
1071         wait_event_lock_irq_cmd(
1072                 conf->wait_barrier,
1073                 get_unqueued_pending(conf) == extra,
1074                 conf->resync_lock,
1075                 flush_pending_writes(conf));
1076         spin_unlock_irq(&conf->resync_lock);
1077 }
1078 static void unfreeze_array(struct r1conf *conf)
1079 {
1080         /* reverse the effect of the freeze */
1081         spin_lock_irq(&conf->resync_lock);
1082         conf->array_frozen = 0;
1083         spin_unlock_irq(&conf->resync_lock);
1084         wake_up(&conf->wait_barrier);
1085 }
1086
1087 static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
1088                                            struct bio *bio)
1089 {
1090         int size = bio->bi_iter.bi_size;
1091         unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1092         int i = 0;
1093         struct bio *behind_bio = NULL;
1094
1095         behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
1096         if (!behind_bio)
1097                 goto fail;
1098
1099         /* discard op, we don't support writezero/writesame yet */
1100         if (!bio_has_data(bio))
1101                 goto skip_copy;
1102
1103         while (i < vcnt && size) {
1104                 struct page *page;
1105                 int len = min_t(int, PAGE_SIZE, size);
1106
1107                 page = alloc_page(GFP_NOIO);
1108                 if (unlikely(!page))
1109                         goto free_pages;
1110
1111                 bio_add_page(behind_bio, page, len, 0);
1112
1113                 size -= len;
1114                 i++;
1115         }
1116
1117         bio_copy_data(behind_bio, bio);
1118 skip_copy:
1119         r1_bio->behind_master_bio = behind_bio;;
1120         set_bit(R1BIO_BehindIO, &r1_bio->state);
1121
1122         return behind_bio;
1123
1124 free_pages:
1125         pr_debug("%dB behind alloc failed, doing sync I/O\n",
1126                  bio->bi_iter.bi_size);
1127         bio_free_pages(behind_bio);
1128 fail:
1129         return behind_bio;
1130 }
1131
1132 struct raid1_plug_cb {
1133         struct blk_plug_cb      cb;
1134         struct bio_list         pending;
1135         int                     pending_cnt;
1136 };
1137
1138 static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1139 {
1140         struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1141                                                   cb);
1142         struct mddev *mddev = plug->cb.data;
1143         struct r1conf *conf = mddev->private;
1144         struct bio *bio;
1145
1146         if (from_schedule || current->bio_list) {
1147                 spin_lock_irq(&conf->device_lock);
1148                 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1149                 conf->pending_count += plug->pending_cnt;
1150                 spin_unlock_irq(&conf->device_lock);
1151                 wake_up(&conf->wait_barrier);
1152                 md_wakeup_thread(mddev->thread);
1153                 kfree(plug);
1154                 return;
1155         }
1156
1157         /* we aren't scheduling, so we can do the write-out directly. */
1158         bio = bio_list_get(&plug->pending);
1159         flush_bio_list(conf, bio);
1160         kfree(plug);
1161 }
1162
1163 static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
1164 {
1165         r1_bio->master_bio = bio;
1166         r1_bio->sectors = bio_sectors(bio);
1167         r1_bio->state = 0;
1168         r1_bio->mddev = mddev;
1169         r1_bio->sector = bio->bi_iter.bi_sector;
1170 }
1171
1172 static inline struct r1bio *
1173 alloc_r1bio(struct mddev *mddev, struct bio *bio)
1174 {
1175         struct r1conf *conf = mddev->private;
1176         struct r1bio *r1_bio;
1177
1178         r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1179         /* Ensure no bio records IO_BLOCKED */
1180         memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
1181         init_r1bio(r1_bio, mddev, bio);
1182         return r1_bio;
1183 }
1184
1185 static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1186                                int max_read_sectors, struct r1bio *r1_bio)
1187 {
1188         struct r1conf *conf = mddev->private;
1189         struct raid1_info *mirror;
1190         struct bio *read_bio;
1191         struct bitmap *bitmap = mddev->bitmap;
1192         const int op = bio_op(bio);
1193         const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1194         int max_sectors;
1195         int rdisk;
1196         bool print_msg = !!r1_bio;
1197         char b[BDEVNAME_SIZE];
1198
1199         /*
1200          * If r1_bio is set, we are blocking the raid1d thread
1201          * so there is a tiny risk of deadlock.  So ask for
1202          * emergency memory if needed.
1203          */
1204         gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
1205
1206         if (print_msg) {
1207                 /* Need to get the block device name carefully */
1208                 struct md_rdev *rdev;
1209                 rcu_read_lock();
1210                 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
1211                 if (rdev)
1212                         bdevname(rdev->bdev, b);
1213                 else
1214                         strcpy(b, "???");
1215                 rcu_read_unlock();
1216         }
1217
1218         /*
1219          * Still need barrier for READ in case that whole
1220          * array is frozen.
1221          */
1222         wait_read_barrier(conf, bio->bi_iter.bi_sector);
1223
1224         if (!r1_bio)
1225                 r1_bio = alloc_r1bio(mddev, bio);
1226         else
1227                 init_r1bio(r1_bio, mddev, bio);
1228         r1_bio->sectors = max_read_sectors;
1229
1230         /*
1231          * make_request() can abort the operation when read-ahead is being
1232          * used and no empty request is available.
1233          */
1234         rdisk = read_balance(conf, r1_bio, &max_sectors);
1235
1236         if (rdisk < 0) {
1237                 /* couldn't find anywhere to read from */
1238                 if (print_msg) {
1239                         pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
1240                                             mdname(mddev),
1241                                             b,
1242                                             (unsigned long long)r1_bio->sector);
1243                 }
1244                 raid_end_bio_io(r1_bio);
1245                 return;
1246         }
1247         mirror = conf->mirrors + rdisk;
1248
1249         if (print_msg)
1250                 pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
1251                                     mdname(mddev),
1252                                     (unsigned long long)r1_bio->sector,
1253                                     bdevname(mirror->rdev->bdev, b));
1254
1255         if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1256             bitmap) {
1257                 /*
1258                  * Reading from a write-mostly device must take care not to
1259                  * over-take any writes that are 'behind'
1260                  */
1261                 raid1_log(mddev, "wait behind writes");
1262                 wait_event(bitmap->behind_wait,
1263                            atomic_read(&bitmap->behind_writes) == 0);
1264         }
1265
1266         if (max_sectors < bio_sectors(bio)) {
1267                 struct bio *split = bio_split(bio, max_sectors,
1268                                               gfp, conf->bio_split);
1269                 bio_chain(split, bio);
1270                 generic_make_request(bio);
1271                 bio = split;
1272                 r1_bio->master_bio = bio;
1273                 r1_bio->sectors = max_sectors;
1274         }
1275
1276         r1_bio->read_disk = rdisk;
1277
1278         read_bio = bio_clone_fast(bio, gfp, mddev->bio_set);
1279
1280         r1_bio->bios[rdisk] = read_bio;
1281
1282         read_bio->bi_iter.bi_sector = r1_bio->sector +
1283                 mirror->rdev->data_offset;
1284         read_bio->bi_bdev = mirror->rdev->bdev;
1285         read_bio->bi_end_io = raid1_end_read_request;
1286         bio_set_op_attrs(read_bio, op, do_sync);
1287         if (test_bit(FailFast, &mirror->rdev->flags) &&
1288             test_bit(R1BIO_FailFast, &r1_bio->state))
1289                 read_bio->bi_opf |= MD_FAILFAST;
1290         read_bio->bi_private = r1_bio;
1291
1292         if (mddev->gendisk)
1293                 trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
1294                                       read_bio, disk_devt(mddev->gendisk),
1295                                       r1_bio->sector);
1296
1297         generic_make_request(read_bio);
1298 }
1299
1300 static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1301                                 int max_write_sectors)
1302 {
1303         struct r1conf *conf = mddev->private;
1304         struct r1bio *r1_bio;
1305         int i, disks;
1306         struct bitmap *bitmap = mddev->bitmap;
1307         unsigned long flags;
1308         struct md_rdev *blocked_rdev;
1309         struct blk_plug_cb *cb;
1310         struct raid1_plug_cb *plug = NULL;
1311         int first_clone;
1312         int max_sectors;
1313
1314         /*
1315          * Register the new request and wait if the reconstruction
1316          * thread has put up a bar for new requests.
1317          * Continue immediately if no resync is active currently.
1318          */
1319
1320         md_write_start(mddev, bio); /* wait on superblock update early */
1321
1322         if ((bio_end_sector(bio) > mddev->suspend_lo &&
1323             bio->bi_iter.bi_sector < mddev->suspend_hi) ||
1324             (mddev_is_clustered(mddev) &&
1325              md_cluster_ops->area_resyncing(mddev, WRITE,
1326                      bio->bi_iter.bi_sector, bio_end_sector(bio)))) {
1327
1328                 /*
1329                  * As the suspend_* range is controlled by userspace, we want
1330                  * an interruptible wait.
1331                  */
1332                 DEFINE_WAIT(w);
1333                 for (;;) {
1334                         flush_signals(current);
1335                         prepare_to_wait(&conf->wait_barrier,
1336                                         &w, TASK_INTERRUPTIBLE);
1337                         if (bio_end_sector(bio) <= mddev->suspend_lo ||
1338                             bio->bi_iter.bi_sector >= mddev->suspend_hi ||
1339                             (mddev_is_clustered(mddev) &&
1340                              !md_cluster_ops->area_resyncing(mddev, WRITE,
1341                                      bio->bi_iter.bi_sector,
1342                                      bio_end_sector(bio))))
1343                                 break;
1344                         schedule();
1345                 }
1346                 finish_wait(&conf->wait_barrier, &w);
1347         }
1348         wait_barrier(conf, bio->bi_iter.bi_sector);
1349
1350         r1_bio = alloc_r1bio(mddev, bio);
1351         r1_bio->sectors = max_write_sectors;
1352
1353         if (conf->pending_count >= max_queued_requests) {
1354                 md_wakeup_thread(mddev->thread);
1355                 raid1_log(mddev, "wait queued");
1356                 wait_event(conf->wait_barrier,
1357                            conf->pending_count < max_queued_requests);
1358         }
1359         /* first select target devices under rcu_lock and
1360          * inc refcount on their rdev.  Record them by setting
1361          * bios[x] to bio
1362          * If there are known/acknowledged bad blocks on any device on
1363          * which we have seen a write error, we want to avoid writing those
1364          * blocks.
1365          * This potentially requires several writes to write around
1366          * the bad blocks.  Each set of writes gets it's own r1bio
1367          * with a set of bios attached.
1368          */
1369
1370         disks = conf->raid_disks * 2;
1371  retry_write:
1372         blocked_rdev = NULL;
1373         rcu_read_lock();
1374         max_sectors = r1_bio->sectors;
1375         for (i = 0;  i < disks; i++) {
1376                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1377                 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1378                         atomic_inc(&rdev->nr_pending);
1379                         blocked_rdev = rdev;
1380                         break;
1381                 }
1382                 r1_bio->bios[i] = NULL;
1383                 if (!rdev || test_bit(Faulty, &rdev->flags)) {
1384                         if (i < conf->raid_disks)
1385                                 set_bit(R1BIO_Degraded, &r1_bio->state);
1386                         continue;
1387                 }
1388
1389                 atomic_inc(&rdev->nr_pending);
1390                 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1391                         sector_t first_bad;
1392                         int bad_sectors;
1393                         int is_bad;
1394
1395                         is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
1396                                              &first_bad, &bad_sectors);
1397                         if (is_bad < 0) {
1398                                 /* mustn't write here until the bad block is
1399                                  * acknowledged*/
1400                                 set_bit(BlockedBadBlocks, &rdev->flags);
1401                                 blocked_rdev = rdev;
1402                                 break;
1403                         }
1404                         if (is_bad && first_bad <= r1_bio->sector) {
1405                                 /* Cannot write here at all */
1406                                 bad_sectors -= (r1_bio->sector - first_bad);
1407                                 if (bad_sectors < max_sectors)
1408                                         /* mustn't write more than bad_sectors
1409                                          * to other devices yet
1410                                          */
1411                                         max_sectors = bad_sectors;
1412                                 rdev_dec_pending(rdev, mddev);
1413                                 /* We don't set R1BIO_Degraded as that
1414                                  * only applies if the disk is
1415                                  * missing, so it might be re-added,
1416                                  * and we want to know to recover this
1417                                  * chunk.
1418                                  * In this case the device is here,
1419                                  * and the fact that this chunk is not
1420                                  * in-sync is recorded in the bad
1421                                  * block log
1422                                  */
1423                                 continue;
1424                         }
1425                         if (is_bad) {
1426                                 int good_sectors = first_bad - r1_bio->sector;
1427                                 if (good_sectors < max_sectors)
1428                                         max_sectors = good_sectors;
1429                         }
1430                 }
1431                 r1_bio->bios[i] = bio;
1432         }
1433         rcu_read_unlock();
1434
1435         if (unlikely(blocked_rdev)) {
1436                 /* Wait for this device to become unblocked */
1437                 int j;
1438
1439                 for (j = 0; j < i; j++)
1440                         if (r1_bio->bios[j])
1441                                 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1442                 r1_bio->state = 0;
1443                 allow_barrier(conf, bio->bi_iter.bi_sector);
1444                 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1445                 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1446                 wait_barrier(conf, bio->bi_iter.bi_sector);
1447                 goto retry_write;
1448         }
1449
1450         if (max_sectors < bio_sectors(bio)) {
1451                 struct bio *split = bio_split(bio, max_sectors,
1452                                               GFP_NOIO, conf->bio_split);
1453                 bio_chain(split, bio);
1454                 generic_make_request(bio);
1455                 bio = split;
1456                 r1_bio->master_bio = bio;
1457                 r1_bio->sectors = max_sectors;
1458         }
1459
1460         atomic_set(&r1_bio->remaining, 1);
1461         atomic_set(&r1_bio->behind_remaining, 0);
1462
1463         first_clone = 1;
1464
1465         for (i = 0; i < disks; i++) {
1466                 struct bio *mbio = NULL;
1467                 if (!r1_bio->bios[i])
1468                         continue;
1469
1470
1471                 if (first_clone) {
1472                         /* do behind I/O ?
1473                          * Not if there are too many, or cannot
1474                          * allocate memory, or a reader on WriteMostly
1475                          * is waiting for behind writes to flush */
1476                         if (bitmap &&
1477                             (atomic_read(&bitmap->behind_writes)
1478                              < mddev->bitmap_info.max_write_behind) &&
1479                             !waitqueue_active(&bitmap->behind_wait)) {
1480                                 mbio = alloc_behind_master_bio(r1_bio, bio);
1481                         }
1482
1483                         bitmap_startwrite(bitmap, r1_bio->sector,
1484                                           r1_bio->sectors,
1485                                           test_bit(R1BIO_BehindIO,
1486                                                    &r1_bio->state));
1487                         first_clone = 0;
1488                 }
1489
1490                 if (!mbio) {
1491                         if (r1_bio->behind_master_bio)
1492                                 mbio = bio_clone_fast(r1_bio->behind_master_bio,
1493                                                       GFP_NOIO,
1494                                                       mddev->bio_set);
1495                         else
1496                                 mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
1497                 }
1498
1499                 if (r1_bio->behind_master_bio) {
1500                         if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
1501                                 atomic_inc(&r1_bio->behind_remaining);
1502                 }
1503
1504                 r1_bio->bios[i] = mbio;
1505
1506                 mbio->bi_iter.bi_sector = (r1_bio->sector +
1507                                    conf->mirrors[i].rdev->data_offset);
1508                 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1509                 mbio->bi_end_io = raid1_end_write_request;
1510                 mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
1511                 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
1512                     !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
1513                     conf->raid_disks - mddev->degraded > 1)
1514                         mbio->bi_opf |= MD_FAILFAST;
1515                 mbio->bi_private = r1_bio;
1516
1517                 atomic_inc(&r1_bio->remaining);
1518
1519                 if (mddev->gendisk)
1520                         trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
1521                                               mbio, disk_devt(mddev->gendisk),
1522                                               r1_bio->sector);
1523                 /* flush_pending_writes() needs access to the rdev so...*/
1524                 mbio->bi_bdev = (void*)conf->mirrors[i].rdev;
1525
1526                 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1527                 if (cb)
1528                         plug = container_of(cb, struct raid1_plug_cb, cb);
1529                 else
1530                         plug = NULL;
1531                 spin_lock_irqsave(&conf->device_lock, flags);
1532                 if (plug) {
1533                         bio_list_add(&plug->pending, mbio);
1534                         plug->pending_cnt++;
1535                 } else {
1536                         bio_list_add(&conf->pending_bio_list, mbio);
1537                         conf->pending_count++;
1538                 }
1539                 spin_unlock_irqrestore(&conf->device_lock, flags);
1540                 if (!plug)
1541                         md_wakeup_thread(mddev->thread);
1542         }
1543
1544         r1_bio_write_done(r1_bio);
1545
1546         /* In case raid1d snuck in to freeze_array */
1547         wake_up(&conf->wait_barrier);
1548 }
1549
1550 static void raid1_make_request(struct mddev *mddev, struct bio *bio)
1551 {
1552         sector_t sectors;
1553
1554         if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1555                 md_flush_request(mddev, bio);
1556                 return;
1557         }
1558
1559         /*
1560          * There is a limit to the maximum size, but
1561          * the read/write handler might find a lower limit
1562          * due to bad blocks.  To avoid multiple splits,
1563          * we pass the maximum number of sectors down
1564          * and let the lower level perform the split.
1565          */
1566         sectors = align_to_barrier_unit_end(
1567                 bio->bi_iter.bi_sector, bio_sectors(bio));
1568
1569         if (bio_data_dir(bio) == READ)
1570                 raid1_read_request(mddev, bio, sectors, NULL);
1571         else
1572                 raid1_write_request(mddev, bio, sectors);
1573 }
1574
1575 static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1576 {
1577         struct r1conf *conf = mddev->private;
1578         int i;
1579
1580         seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1581                    conf->raid_disks - mddev->degraded);
1582         rcu_read_lock();
1583         for (i = 0; i < conf->raid_disks; i++) {
1584                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1585                 seq_printf(seq, "%s",
1586                            rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1587         }
1588         rcu_read_unlock();
1589         seq_printf(seq, "]");
1590 }
1591
1592 static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1593 {
1594         char b[BDEVNAME_SIZE];
1595         struct r1conf *conf = mddev->private;
1596         unsigned long flags;
1597
1598         /*
1599          * If it is not operational, then we have already marked it as dead
1600          * else if it is the last working disks, ignore the error, let the
1601          * next level up know.
1602          * else mark the drive as failed
1603          */
1604         spin_lock_irqsave(&conf->device_lock, flags);
1605         if (test_bit(In_sync, &rdev->flags)
1606             && (conf->raid_disks - mddev->degraded) == 1) {
1607                 /*
1608                  * Don't fail the drive, act as though we were just a
1609                  * normal single drive.
1610                  * However don't try a recovery from this drive as
1611                  * it is very likely to fail.
1612                  */
1613                 conf->recovery_disabled = mddev->recovery_disabled;
1614                 spin_unlock_irqrestore(&conf->device_lock, flags);
1615                 return;
1616         }
1617         set_bit(Blocked, &rdev->flags);
1618         if (test_and_clear_bit(In_sync, &rdev->flags)) {
1619                 mddev->degraded++;
1620                 set_bit(Faulty, &rdev->flags);
1621         } else
1622                 set_bit(Faulty, &rdev->flags);
1623         spin_unlock_irqrestore(&conf->device_lock, flags);
1624         /*
1625          * if recovery is running, make sure it aborts.
1626          */
1627         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1628         set_mask_bits(&mddev->sb_flags, 0,
1629                       BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1630         pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
1631                 "md/raid1:%s: Operation continuing on %d devices.\n",
1632                 mdname(mddev), bdevname(rdev->bdev, b),
1633                 mdname(mddev), conf->raid_disks - mddev->degraded);
1634 }
1635
1636 static void print_conf(struct r1conf *conf)
1637 {
1638         int i;
1639
1640         pr_debug("RAID1 conf printout:\n");
1641         if (!conf) {
1642                 pr_debug("(!conf)\n");
1643                 return;
1644         }
1645         pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1646                  conf->raid_disks);
1647
1648         rcu_read_lock();
1649         for (i = 0; i < conf->raid_disks; i++) {
1650                 char b[BDEVNAME_SIZE];
1651                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1652                 if (rdev)
1653                         pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
1654                                  i, !test_bit(In_sync, &rdev->flags),
1655                                  !test_bit(Faulty, &rdev->flags),
1656                                  bdevname(rdev->bdev,b));
1657         }
1658         rcu_read_unlock();
1659 }
1660
1661 static void close_sync(struct r1conf *conf)
1662 {
1663         wait_all_barriers(conf);
1664         allow_all_barriers(conf);
1665
1666         mempool_destroy(conf->r1buf_pool);
1667         conf->r1buf_pool = NULL;
1668 }
1669
1670 static int raid1_spare_active(struct mddev *mddev)
1671 {
1672         int i;
1673         struct r1conf *conf = mddev->private;
1674         int count = 0;
1675         unsigned long flags;
1676
1677         /*
1678          * Find all failed disks within the RAID1 configuration
1679          * and mark them readable.
1680          * Called under mddev lock, so rcu protection not needed.
1681          * device_lock used to avoid races with raid1_end_read_request
1682          * which expects 'In_sync' flags and ->degraded to be consistent.
1683          */
1684         spin_lock_irqsave(&conf->device_lock, flags);
1685         for (i = 0; i < conf->raid_disks; i++) {
1686                 struct md_rdev *rdev = conf->mirrors[i].rdev;
1687                 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1688                 if (repl
1689                     && !test_bit(Candidate, &repl->flags)
1690                     && repl->recovery_offset == MaxSector
1691                     && !test_bit(Faulty, &repl->flags)
1692                     && !test_and_set_bit(In_sync, &repl->flags)) {
1693                         /* replacement has just become active */
1694                         if (!rdev ||
1695                             !test_and_clear_bit(In_sync, &rdev->flags))
1696                                 count++;
1697                         if (rdev) {
1698                                 /* Replaced device not technically
1699                                  * faulty, but we need to be sure
1700                                  * it gets removed and never re-added
1701                                  */
1702                                 set_bit(Faulty, &rdev->flags);
1703                                 sysfs_notify_dirent_safe(
1704                                         rdev->sysfs_state);
1705                         }
1706                 }
1707                 if (rdev
1708                     && rdev->recovery_offset == MaxSector
1709                     && !test_bit(Faulty, &rdev->flags)
1710                     && !test_and_set_bit(In_sync, &rdev->flags)) {
1711                         count++;
1712                         sysfs_notify_dirent_safe(rdev->sysfs_state);
1713                 }
1714         }
1715         mddev->degraded -= count;
1716         spin_unlock_irqrestore(&conf->device_lock, flags);
1717
1718         print_conf(conf);
1719         return count;
1720 }
1721
1722 static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1723 {
1724         struct r1conf *conf = mddev->private;
1725         int err = -EEXIST;
1726         int mirror = 0;
1727         struct raid1_info *p;
1728         int first = 0;
1729         int last = conf->raid_disks - 1;
1730
1731         if (mddev->recovery_disabled == conf->recovery_disabled)
1732                 return -EBUSY;
1733
1734         if (md_integrity_add_rdev(rdev, mddev))
1735                 return -ENXIO;
1736
1737         if (rdev->raid_disk >= 0)
1738                 first = last = rdev->raid_disk;
1739
1740         /*
1741          * find the disk ... but prefer rdev->saved_raid_disk
1742          * if possible.
1743          */
1744         if (rdev->saved_raid_disk >= 0 &&
1745             rdev->saved_raid_disk >= first &&
1746             conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1747                 first = last = rdev->saved_raid_disk;
1748
1749         for (mirror = first; mirror <= last; mirror++) {
1750                 p = conf->mirrors+mirror;
1751                 if (!p->rdev) {
1752
1753                         if (mddev->gendisk)
1754                                 disk_stack_limits(mddev->gendisk, rdev->bdev,
1755                                                   rdev->data_offset << 9);
1756
1757                         p->head_position = 0;
1758                         rdev->raid_disk = mirror;
1759                         err = 0;
1760                         /* As all devices are equivalent, we don't need a full recovery
1761                          * if this was recently any drive of the array
1762                          */
1763                         if (rdev->saved_raid_disk < 0)
1764                                 conf->fullsync = 1;
1765                         rcu_assign_pointer(p->rdev, rdev);
1766                         break;
1767                 }
1768                 if (test_bit(WantReplacement, &p->rdev->flags) &&
1769                     p[conf->raid_disks].rdev == NULL) {
1770                         /* Add this device as a replacement */
1771                         clear_bit(In_sync, &rdev->flags);
1772                         set_bit(Replacement, &rdev->flags);
1773                         rdev->raid_disk = mirror;
1774                         err = 0;
1775                         conf->fullsync = 1;
1776                         rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1777                         break;
1778                 }
1779         }
1780         if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1781                 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1782         print_conf(conf);
1783         return err;
1784 }
1785
1786 static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1787 {
1788         struct r1conf *conf = mddev->private;
1789         int err = 0;
1790         int number = rdev->raid_disk;
1791         struct raid1_info *p = conf->mirrors + number;
1792
1793         if (rdev != p->rdev)
1794                 p = conf->mirrors + conf->raid_disks + number;
1795
1796         print_conf(conf);
1797         if (rdev == p->rdev) {
1798                 if (test_bit(In_sync, &rdev->flags) ||
1799                     atomic_read(&rdev->nr_pending)) {
1800                         err = -EBUSY;
1801                         goto abort;
1802                 }
1803                 /* Only remove non-faulty devices if recovery
1804                  * is not possible.
1805                  */
1806                 if (!test_bit(Faulty, &rdev->flags) &&
1807                     mddev->recovery_disabled != conf->recovery_disabled &&
1808                     mddev->degraded < conf->raid_disks) {
1809                         err = -EBUSY;
1810                         goto abort;
1811                 }
1812                 p->rdev = NULL;
1813                 if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1814                         synchronize_rcu();
1815                         if (atomic_read(&rdev->nr_pending)) {
1816                                 /* lost the race, try later */
1817                                 err = -EBUSY;
1818                                 p->rdev = rdev;
1819                                 goto abort;
1820                         }
1821                 }
1822                 if (conf->mirrors[conf->raid_disks + number].rdev) {
1823                         /* We just removed a device that is being replaced.
1824                          * Move down the replacement.  We drain all IO before
1825                          * doing this to avoid confusion.
1826                          */
1827                         struct md_rdev *repl =
1828                                 conf->mirrors[conf->raid_disks + number].rdev;
1829                         freeze_array(conf, 0);
1830                         clear_bit(Replacement, &repl->flags);
1831                         p->rdev = repl;
1832                         conf->mirrors[conf->raid_disks + number].rdev = NULL;
1833                         unfreeze_array(conf);
1834                         clear_bit(WantReplacement, &rdev->flags);
1835                 } else
1836                         clear_bit(WantReplacement, &rdev->flags);
1837                 err = md_integrity_register(mddev);
1838         }
1839 abort:
1840
1841         print_conf(conf);
1842         return err;
1843 }
1844
1845 static void end_sync_read(struct bio *bio)
1846 {
1847         struct r1bio *r1_bio = get_resync_r1bio(bio);
1848
1849         update_head_pos(r1_bio->read_disk, r1_bio);
1850
1851         /*
1852          * we have read a block, now it needs to be re-written,
1853          * or re-read if the read failed.
1854          * We don't do much here, just schedule handling by raid1d
1855          */
1856         if (!bio->bi_error)
1857                 set_bit(R1BIO_Uptodate, &r1_bio->state);
1858
1859         if (atomic_dec_and_test(&r1_bio->remaining))
1860                 reschedule_retry(r1_bio);
1861 }
1862
1863 static void end_sync_write(struct bio *bio)
1864 {
1865         int uptodate = !bio->bi_error;
1866         struct r1bio *r1_bio = get_resync_r1bio(bio);
1867         struct mddev *mddev = r1_bio->mddev;
1868         struct r1conf *conf = mddev->private;
1869         sector_t first_bad;
1870         int bad_sectors;
1871         struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1872
1873         if (!uptodate) {
1874                 sector_t sync_blocks = 0;
1875                 sector_t s = r1_bio->sector;
1876                 long sectors_to_go = r1_bio->sectors;
1877                 /* make sure these bits doesn't get cleared. */
1878                 do {
1879                         bitmap_end_sync(mddev->bitmap, s,
1880                                         &sync_blocks, 1);
1881                         s += sync_blocks;
1882                         sectors_to_go -= sync_blocks;
1883                 } while (sectors_to_go > 0);
1884                 set_bit(WriteErrorSeen, &rdev->flags);
1885                 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1886                         set_bit(MD_RECOVERY_NEEDED, &
1887                                 mddev->recovery);
1888                 set_bit(R1BIO_WriteError, &r1_bio->state);
1889         } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
1890                                &first_bad, &bad_sectors) &&
1891                    !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1892                                 r1_bio->sector,
1893                                 r1_bio->sectors,
1894                                 &first_bad, &bad_sectors)
1895                 )
1896                 set_bit(R1BIO_MadeGood, &r1_bio->state);
1897
1898         if (atomic_dec_and_test(&r1_bio->remaining)) {
1899                 int s = r1_bio->sectors;
1900                 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1901                     test_bit(R1BIO_WriteError, &r1_bio->state))
1902                         reschedule_retry(r1_bio);
1903                 else {
1904                         put_buf(r1_bio);
1905                         md_done_sync(mddev, s, uptodate);
1906                 }
1907         }
1908 }
1909
1910 static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1911                             int sectors, struct page *page, int rw)
1912 {
1913         if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
1914                 /* success */
1915                 return 1;
1916         if (rw == WRITE) {
1917                 set_bit(WriteErrorSeen, &rdev->flags);
1918                 if (!test_and_set_bit(WantReplacement,
1919                                       &rdev->flags))
1920                         set_bit(MD_RECOVERY_NEEDED, &
1921                                 rdev->mddev->recovery);
1922         }
1923         /* need to record an error - either for the block or the device */
1924         if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1925                 md_error(rdev->mddev, rdev);
1926         return 0;
1927 }
1928
1929 static int fix_sync_read_error(struct r1bio *r1_bio)
1930 {
1931         /* Try some synchronous reads of other devices to get
1932          * good data, much like with normal read errors.  Only
1933          * read into the pages we already have so we don't
1934          * need to re-issue the read request.
1935          * We don't need to freeze the array, because being in an
1936          * active sync request, there is no normal IO, and
1937          * no overlapping syncs.
1938          * We don't need to check is_badblock() again as we
1939          * made sure that anything with a bad block in range
1940          * will have bi_end_io clear.
1941          */
1942         struct mddev *mddev = r1_bio->mddev;
1943         struct r1conf *conf = mddev->private;
1944         struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1945         struct page **pages = get_resync_pages(bio)->pages;
1946         sector_t sect = r1_bio->sector;
1947         int sectors = r1_bio->sectors;
1948         int idx = 0;
1949         struct md_rdev *rdev;
1950
1951         rdev = conf->mirrors[r1_bio->read_disk].rdev;
1952         if (test_bit(FailFast, &rdev->flags)) {
1953                 /* Don't try recovering from here - just fail it
1954                  * ... unless it is the last working device of course */
1955                 md_error(mddev, rdev);
1956                 if (test_bit(Faulty, &rdev->flags))
1957                         /* Don't try to read from here, but make sure
1958                          * put_buf does it's thing
1959                          */
1960                         bio->bi_end_io = end_sync_write;
1961         }
1962
1963         while(sectors) {
1964                 int s = sectors;
1965                 int d = r1_bio->read_disk;
1966                 int success = 0;
1967                 int start;
1968
1969                 if (s > (PAGE_SIZE>>9))
1970                         s = PAGE_SIZE >> 9;
1971                 do {
1972                         if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1973                                 /* No rcu protection needed here devices
1974                                  * can only be removed when no resync is
1975                                  * active, and resync is currently active
1976                                  */
1977                                 rdev = conf->mirrors[d].rdev;
1978                                 if (sync_page_io(rdev, sect, s<<9,
1979                                                  pages[idx],
1980                                                  REQ_OP_READ, 0, false)) {
1981                                         success = 1;
1982                                         break;
1983                                 }
1984                         }
1985                         d++;
1986                         if (d == conf->raid_disks * 2)
1987                                 d = 0;
1988                 } while (!success && d != r1_bio->read_disk);
1989
1990                 if (!success) {
1991                         char b[BDEVNAME_SIZE];
1992                         int abort = 0;
1993                         /* Cannot read from anywhere, this block is lost.
1994                          * Record a bad block on each device.  If that doesn't
1995                          * work just disable and interrupt the recovery.
1996                          * Don't fail devices as that won't really help.
1997                          */
1998                         pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
1999                                             mdname(mddev),
2000                                             bdevname(bio->bi_bdev, b),
2001                                             (unsigned long long)r1_bio->sector);
2002                         for (d = 0; d < conf->raid_disks * 2; d++) {
2003                                 rdev = conf->mirrors[d].rdev;
2004                                 if (!rdev || test_bit(Faulty, &rdev->flags))
2005                                         continue;
2006                                 if (!rdev_set_badblocks(rdev, sect, s, 0))
2007                                         abort = 1;
2008                         }
2009                         if (abort) {
2010                                 conf->recovery_disabled =
2011                                         mddev->recovery_disabled;
2012                                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2013                                 md_done_sync(mddev, r1_bio->sectors, 0);
2014                                 put_buf(r1_bio);
2015                                 return 0;
2016                         }
2017                         /* Try next page */
2018                         sectors -= s;
2019                         sect += s;
2020                         idx++;
2021                         continue;
2022                 }
2023
2024                 start = d;
2025                 /* write it back and re-read */
2026                 while (d != r1_bio->read_disk) {
2027                         if (d == 0)
2028                                 d = conf->raid_disks * 2;
2029                         d--;
2030                         if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2031                                 continue;
2032                         rdev = conf->mirrors[d].rdev;
2033                         if (r1_sync_page_io(rdev, sect, s,
2034                                             pages[idx],
2035                                             WRITE) == 0) {
2036                                 r1_bio->bios[d]->bi_end_io = NULL;
2037                                 rdev_dec_pending(rdev, mddev);
2038                         }
2039                 }
2040                 d = start;
2041                 while (d != r1_bio->read_disk) {
2042                         if (d == 0)
2043                                 d = conf->raid_disks * 2;
2044                         d--;
2045                         if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2046                                 continue;
2047                         rdev = conf->mirrors[d].rdev;
2048                         if (r1_sync_page_io(rdev, sect, s,
2049                                             pages[idx],
2050                                             READ) != 0)
2051                                 atomic_add(s, &rdev->corrected_errors);
2052                 }
2053                 sectors -= s;
2054                 sect += s;
2055                 idx ++;
2056         }
2057         set_bit(R1BIO_Uptodate, &r1_bio->state);
2058         bio->bi_error = 0;
2059         return 1;
2060 }
2061
2062 static void process_checks(struct r1bio *r1_bio)
2063 {
2064         /* We have read all readable devices.  If we haven't
2065          * got the block, then there is no hope left.
2066          * If we have, then we want to do a comparison
2067          * and skip the write if everything is the same.
2068          * If any blocks failed to read, then we need to
2069          * attempt an over-write
2070          */
2071         struct mddev *mddev = r1_bio->mddev;
2072         struct r1conf *conf = mddev->private;
2073         int primary;
2074         int i;
2075         int vcnt;
2076
2077         /* Fix variable parts of all bios */
2078         vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2079         for (i = 0; i < conf->raid_disks * 2; i++) {
2080                 int j;
2081                 int size;
2082                 int error;
2083                 struct bio_vec *bi;
2084                 struct bio *b = r1_bio->bios[i];
2085                 struct resync_pages *rp = get_resync_pages(b);
2086                 if (b->bi_end_io != end_sync_read)
2087                         continue;
2088                 /* fixup the bio for reuse, but preserve errno */
2089                 error = b->bi_error;
2090                 bio_reset(b);
2091                 b->bi_error = error;
2092                 b->bi_vcnt = vcnt;
2093                 b->bi_iter.bi_size = r1_bio->sectors << 9;
2094                 b->bi_iter.bi_sector = r1_bio->sector +
2095                         conf->mirrors[i].rdev->data_offset;
2096                 b->bi_bdev = conf->mirrors[i].rdev->bdev;
2097                 b->bi_end_io = end_sync_read;
2098                 rp->raid_bio = r1_bio;
2099                 b->bi_private = rp;
2100
2101                 size = b->bi_iter.bi_size;
2102                 bio_for_each_segment_all(bi, b, j) {
2103                         bi->bv_offset = 0;
2104                         if (size > PAGE_SIZE)
2105                                 bi->bv_len = PAGE_SIZE;
2106                         else
2107                                 bi->bv_len = size;
2108                         size -= PAGE_SIZE;
2109                 }
2110         }
2111         for (primary = 0; primary < conf->raid_disks * 2; primary++)
2112                 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
2113                     !r1_bio->bios[primary]->bi_error) {
2114                         r1_bio->bios[primary]->bi_end_io = NULL;
2115                         rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2116                         break;
2117                 }
2118         r1_bio->read_disk = primary;
2119         for (i = 0; i < conf->raid_disks * 2; i++) {
2120                 int j;
2121                 struct bio *pbio = r1_bio->bios[primary];
2122                 struct bio *sbio = r1_bio->bios[i];
2123                 int error = sbio->bi_error;
2124                 struct page **ppages = get_resync_pages(pbio)->pages;
2125                 struct page **spages = get_resync_pages(sbio)->pages;
2126                 struct bio_vec *bi;
2127                 int page_len[RESYNC_PAGES] = { 0 };
2128
2129                 if (sbio->bi_end_io != end_sync_read)
2130                         continue;
2131                 /* Now we can 'fixup' the error value */
2132                 sbio->bi_error = 0;
2133
2134                 bio_for_each_segment_all(bi, sbio, j)
2135                         page_len[j] = bi->bv_len;
2136
2137                 if (!error) {
2138                         for (j = vcnt; j-- ; ) {
2139                                 if (memcmp(page_address(ppages[j]),
2140                                            page_address(spages[j]),
2141                                            page_len[j]))
2142                                         break;
2143                         }
2144                 } else
2145                         j = 0;
2146                 if (j >= 0)
2147                         atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2148                 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2149                               && !error)) {
2150                         /* No need to write to this device. */
2151                         sbio->bi_end_io = NULL;
2152                         rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2153                         continue;
2154                 }
2155
2156                 bio_copy_data(sbio, pbio);
2157         }
2158 }
2159
2160 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2161 {
2162         struct r1conf *conf = mddev->private;
2163         int i;
2164         int disks = conf->raid_disks * 2;
2165         struct bio *bio, *wbio;
2166
2167         bio = r1_bio->bios[r1_bio->read_disk];
2168
2169         if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2170                 /* ouch - failed to read all of that. */
2171                 if (!fix_sync_read_error(r1_bio))
2172                         return;
2173
2174         if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2175                 process_checks(r1_bio);
2176
2177         /*
2178          * schedule writes
2179          */
2180         atomic_set(&r1_bio->remaining, 1);
2181         for (i = 0; i < disks ; i++) {
2182                 wbio = r1_bio->bios[i];
2183                 if (wbio->bi_end_io == NULL ||
2184                     (wbio->bi_end_io == end_sync_read &&
2185                      (i == r1_bio->read_disk ||
2186                       !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2187                         continue;
2188                 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags))
2189                         continue;
2190
2191                 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2192                 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2193                         wbio->bi_opf |= MD_FAILFAST;
2194
2195                 wbio->bi_end_io = end_sync_write;
2196                 atomic_inc(&r1_bio->remaining);
2197                 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2198
2199                 generic_make_request(wbio);
2200         }
2201
2202         if (atomic_dec_and_test(&r1_bio->remaining)) {
2203                 /* if we're here, all write(s) have completed, so clean up */
2204                 int s = r1_bio->sectors;
2205                 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2206                     test_bit(R1BIO_WriteError, &r1_bio->state))
2207                         reschedule_retry(r1_bio);
2208                 else {
2209                         put_buf(r1_bio);
2210                         md_done_sync(mddev, s, 1);
2211                 }
2212         }
2213 }
2214
2215 /*
2216  * This is a kernel thread which:
2217  *
2218  *      1.      Retries failed read operations on working mirrors.
2219  *      2.      Updates the raid superblock when problems encounter.
2220  *      3.      Performs writes following reads for array synchronising.
2221  */
2222
2223 static void fix_read_error(struct r1conf *conf, int read_disk,
2224                            sector_t sect, int sectors)
2225 {
2226         struct mddev *mddev = conf->mddev;
2227         while(sectors) {
2228                 int s = sectors;
2229                 int d = read_disk;
2230                 int success = 0;
2231                 int start;
2232                 struct md_rdev *rdev;
2233
2234                 if (s > (PAGE_SIZE>>9))
2235                         s = PAGE_SIZE >> 9;
2236
2237                 do {
2238                         sector_t first_bad;
2239                         int bad_sectors;
2240
2241                         rcu_read_lock();
2242                         rdev = rcu_dereference(conf->mirrors[d].rdev);
2243                         if (rdev &&
2244                             (test_bit(In_sync, &rdev->flags) ||
2245                              (!test_bit(Faulty, &rdev->flags) &&
2246                               rdev->recovery_offset >= sect + s)) &&
2247                             is_badblock(rdev, sect, s,
2248                                         &first_bad, &bad_sectors) == 0) {
2249                                 atomic_inc(&rdev->nr_pending);
2250                                 rcu_read_unlock();
2251                                 if (sync_page_io(rdev, sect, s<<9,
2252                                          conf->tmppage, REQ_OP_READ, 0, false))
2253                                         success = 1;
2254                                 rdev_dec_pending(rdev, mddev);
2255                                 if (success)
2256                                         break;
2257                         } else
2258                                 rcu_read_unlock();
2259                         d++;
2260                         if (d == conf->raid_disks * 2)
2261                                 d = 0;
2262                 } while (!success && d != read_disk);
2263
2264                 if (!success) {
2265                         /* Cannot read from anywhere - mark it bad */
2266                         struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2267                         if (!rdev_set_badblocks(rdev, sect, s, 0))
2268                                 md_error(mddev, rdev);
2269                         break;
2270                 }
2271                 /* write it back and re-read */
2272                 start = d;
2273                 while (d != read_disk) {
2274                         if (d==0)
2275                                 d = conf->raid_disks * 2;
2276                         d--;
2277                         rcu_read_lock();
2278                         rdev = rcu_dereference(conf->mirrors[d].rdev);
2279                         if (rdev &&
2280                             !test_bit(Faulty, &rdev->flags)) {
2281                                 atomic_inc(&rdev->nr_pending);
2282                                 rcu_read_unlock();
2283                                 r1_sync_page_io(rdev, sect, s,
2284                                                 conf->tmppage, WRITE);
2285                                 rdev_dec_pending(rdev, mddev);
2286                         } else
2287                                 rcu_read_unlock();
2288                 }
2289                 d = start;
2290                 while (d != read_disk) {
2291                         char b[BDEVNAME_SIZE];
2292                         if (d==0)
2293                                 d = conf->raid_disks * 2;
2294                         d--;
2295                         rcu_read_lock();
2296                         rdev = rcu_dereference(conf->mirrors[d].rdev);
2297                         if (rdev &&
2298                             !test_bit(Faulty, &rdev->flags)) {
2299                                 atomic_inc(&rdev->nr_pending);
2300                                 rcu_read_unlock();
2301                                 if (r1_sync_page_io(rdev, sect, s,
2302                                                     conf->tmppage, READ)) {
2303                                         atomic_add(s, &rdev->corrected_errors);
2304                                         pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
2305                                                 mdname(mddev), s,
2306                                                 (unsigned long long)(sect +
2307                                                                      rdev->data_offset),
2308                                                 bdevname(rdev->bdev, b));
2309                                 }
2310                                 rdev_dec_pending(rdev, mddev);
2311                         } else
2312                                 rcu_read_unlock();
2313                 }
2314                 sectors -= s;
2315                 sect += s;
2316         }
2317 }
2318
2319 static int narrow_write_error(struct r1bio *r1_bio, int i)
2320 {
2321         struct mddev *mddev = r1_bio->mddev;
2322         struct r1conf *conf = mddev->private;
2323         struct md_rdev *rdev = conf->mirrors[i].rdev;
2324
2325         /* bio has the data to be written to device 'i' where
2326          * we just recently had a write error.
2327          * We repeatedly clone the bio and trim down to one block,
2328          * then try the write.  Where the write fails we record
2329          * a bad block.
2330          * It is conceivable that the bio doesn't exactly align with
2331          * blocks.  We must handle this somehow.
2332          *
2333          * We currently own a reference on the rdev.
2334          */
2335
2336         int block_sectors;
2337         sector_t sector;
2338         int sectors;
2339         int sect_to_write = r1_bio->sectors;
2340         int ok = 1;
2341
2342         if (rdev->badblocks.shift < 0)
2343                 return 0;
2344
2345         block_sectors = roundup(1 << rdev->badblocks.shift,
2346                                 bdev_logical_block_size(rdev->bdev) >> 9);
2347         sector = r1_bio->sector;
2348         sectors = ((sector + block_sectors)
2349                    & ~(sector_t)(block_sectors - 1))
2350                 - sector;
2351
2352         while (sect_to_write) {
2353                 struct bio *wbio;
2354                 if (sectors > sect_to_write)
2355                         sectors = sect_to_write;
2356                 /* Write at 'sector' for 'sectors'*/
2357
2358                 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2359                         wbio = bio_clone_fast(r1_bio->behind_master_bio,
2360                                               GFP_NOIO,
2361                                               mddev->bio_set);
2362                         /* We really need a _all clone */
2363                         wbio->bi_iter = (struct bvec_iter){ 0 };
2364                 } else {
2365                         wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
2366                                               mddev->bio_set);
2367                 }
2368
2369                 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2370                 wbio->bi_iter.bi_sector = r1_bio->sector;
2371                 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2372
2373                 bio_trim(wbio, sector - r1_bio->sector, sectors);
2374                 wbio->bi_iter.bi_sector += rdev->data_offset;
2375                 wbio->bi_bdev = rdev->bdev;
2376
2377                 if (submit_bio_wait(wbio) < 0)
2378                         /* failure! */
2379                         ok = rdev_set_badblocks(rdev, sector,
2380                                                 sectors, 0)
2381                                 && ok;
2382
2383                 bio_put(wbio);
2384                 sect_to_write -= sectors;
2385                 sector += sectors;
2386                 sectors = block_sectors;
2387         }
2388         return ok;
2389 }
2390
2391 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2392 {
2393         int m;
2394         int s = r1_bio->sectors;
2395         for (m = 0; m < conf->raid_disks * 2 ; m++) {
2396                 struct md_rdev *rdev = conf->mirrors[m].rdev;
2397                 struct bio *bio = r1_bio->bios[m];
2398                 if (bio->bi_end_io == NULL)
2399                         continue;
2400                 if (!bio->bi_error &&
2401                     test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2402                         rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2403                 }
2404                 if (bio->bi_error &&
2405                     test_bit(R1BIO_WriteError, &r1_bio->state)) {
2406                         if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2407                                 md_error(conf->mddev, rdev);
2408                 }
2409         }
2410         put_buf(r1_bio);
2411         md_done_sync(conf->mddev, s, 1);
2412 }
2413
2414 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2415 {
2416         int m, idx;
2417         bool fail = false;
2418
2419         for (m = 0; m < conf->raid_disks * 2 ; m++)
2420                 if (r1_bio->bios[m] == IO_MADE_GOOD) {
2421                         struct md_rdev *rdev = conf->mirrors[m].rdev;
2422                         rdev_clear_badblocks(rdev,
2423                                              r1_bio->sector,
2424                                              r1_bio->sectors, 0);
2425                         rdev_dec_pending(rdev, conf->mddev);
2426                 } else if (r1_bio->bios[m] != NULL) {
2427                         /* This drive got a write error.  We need to
2428                          * narrow down and record precise write
2429                          * errors.
2430                          */
2431                         fail = true;
2432                         if (!narrow_write_error(r1_bio, m)) {
2433                                 md_error(conf->mddev,
2434                                          conf->mirrors[m].rdev);
2435                                 /* an I/O failed, we can't clear the bitmap */
2436                                 set_bit(R1BIO_Degraded, &r1_bio->state);
2437                         }
2438                         rdev_dec_pending(conf->mirrors[m].rdev,
2439                                          conf->mddev);
2440                 }
2441         if (fail) {
2442                 spin_lock_irq(&conf->device_lock);
2443                 list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2444                 idx = sector_to_idx(r1_bio->sector);
2445                 atomic_inc(&conf->nr_queued[idx]);
2446                 spin_unlock_irq(&conf->device_lock);
2447                 /*
2448                  * In case freeze_array() is waiting for condition
2449                  * get_unqueued_pending() == extra to be true.
2450                  */
2451                 wake_up(&conf->wait_barrier);
2452                 md_wakeup_thread(conf->mddev->thread);
2453         } else {
2454                 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2455                         close_write(r1_bio);
2456                 raid_end_bio_io(r1_bio);
2457         }
2458 }
2459
2460 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2461 {
2462         struct mddev *mddev = conf->mddev;
2463         struct bio *bio;
2464         struct md_rdev *rdev;
2465         dev_t bio_dev;
2466         sector_t bio_sector;
2467
2468         clear_bit(R1BIO_ReadError, &r1_bio->state);
2469         /* we got a read error. Maybe the drive is bad.  Maybe just
2470          * the block and we can fix it.
2471          * We freeze all other IO, and try reading the block from
2472          * other devices.  When we find one, we re-write
2473          * and check it that fixes the read error.
2474          * This is all done synchronously while the array is
2475          * frozen
2476          */
2477
2478         bio = r1_bio->bios[r1_bio->read_disk];
2479         bio_dev = bio->bi_bdev->bd_dev;
2480         bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector;
2481         bio_put(bio);
2482         r1_bio->bios[r1_bio->read_disk] = NULL;
2483
2484         rdev = conf->mirrors[r1_bio->read_disk].rdev;
2485         if (mddev->ro == 0
2486             && !test_bit(FailFast, &rdev->flags)) {
2487                 freeze_array(conf, 1);
2488                 fix_read_error(conf, r1_bio->read_disk,
2489                                r1_bio->sector, r1_bio->sectors);
2490                 unfreeze_array(conf);
2491         } else {
2492                 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2493         }
2494
2495         rdev_dec_pending(rdev, conf->mddev);
2496         allow_barrier(conf, r1_bio->sector);
2497         bio = r1_bio->master_bio;
2498
2499         /* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */
2500         r1_bio->state = 0;
2501         raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
2502 }
2503
2504 static void raid1d(struct md_thread *thread)
2505 {
2506         struct mddev *mddev = thread->mddev;
2507         struct r1bio *r1_bio;
2508         unsigned long flags;
2509         struct r1conf *conf = mddev->private;
2510         struct list_head *head = &conf->retry_list;
2511         struct blk_plug plug;
2512         int idx;
2513
2514         md_check_recovery(mddev);
2515
2516         if (!list_empty_careful(&conf->bio_end_io_list) &&
2517             !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2518                 LIST_HEAD(tmp);
2519                 spin_lock_irqsave(&conf->device_lock, flags);
2520                 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
2521                         list_splice_init(&conf->bio_end_io_list, &tmp);
2522                 spin_unlock_irqrestore(&conf->device_lock, flags);
2523                 while (!list_empty(&tmp)) {
2524                         r1_bio = list_first_entry(&tmp, struct r1bio,
2525                                                   retry_list);
2526                         list_del(&r1_bio->retry_list);
2527                         idx = sector_to_idx(r1_bio->sector);
2528                         atomic_dec(&conf->nr_queued[idx]);
2529                         if (mddev->degraded)
2530                                 set_bit(R1BIO_Degraded, &r1_bio->state);
2531                         if (test_bit(R1BIO_WriteError, &r1_bio->state))
2532                                 close_write(r1_bio);
2533                         raid_end_bio_io(r1_bio);
2534                 }
2535         }
2536
2537         blk_start_plug(&plug);
2538         for (;;) {
2539
2540                 flush_pending_writes(conf);
2541
2542                 spin_lock_irqsave(&conf->device_lock, flags);
2543                 if (list_empty(head)) {
2544                         spin_unlock_irqrestore(&conf->device_lock, flags);
2545                         break;
2546                 }
2547                 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2548                 list_del(head->prev);
2549                 idx = sector_to_idx(r1_bio->sector);
2550                 atomic_dec(&conf->nr_queued[idx]);
2551                 spin_unlock_irqrestore(&conf->device_lock, flags);
2552
2553                 mddev = r1_bio->mddev;
2554                 conf = mddev->private;
2555                 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2556                         if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2557                             test_bit(R1BIO_WriteError, &r1_bio->state))
2558                                 handle_sync_write_finished(conf, r1_bio);
2559                         else
2560                                 sync_request_write(mddev, r1_bio);
2561                 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2562                            test_bit(R1BIO_WriteError, &r1_bio->state))
2563                         handle_write_finished(conf, r1_bio);
2564                 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2565                         handle_read_error(conf, r1_bio);
2566                 else
2567                         WARN_ON_ONCE(1);
2568
2569                 cond_resched();
2570                 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2571                         md_check_recovery(mddev);
2572         }
2573         blk_finish_plug(&plug);
2574 }
2575
2576 static int init_resync(struct r1conf *conf)
2577 {
2578         int buffs;
2579
2580         buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2581         BUG_ON(conf->r1buf_pool);
2582         conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
2583                                           conf->poolinfo);
2584         if (!conf->r1buf_pool)
2585                 return -ENOMEM;
2586         return 0;
2587 }
2588
2589 /*
2590  * perform a "sync" on one "block"
2591  *
2592  * We need to make sure that no normal I/O request - particularly write
2593  * requests - conflict with active sync requests.
2594  *
2595  * This is achieved by tracking pending requests and a 'barrier' concept
2596  * that can be installed to exclude normal IO requests.
2597  */
2598
2599 static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2600                                    int *skipped)
2601 {
2602         struct r1conf *conf = mddev->private;
2603         struct r1bio *r1_bio;
2604         struct bio *bio;
2605         sector_t max_sector, nr_sectors;
2606         int disk = -1;
2607         int i;
2608         int wonly = -1;
2609         int write_targets = 0, read_targets = 0;
2610         sector_t sync_blocks;
2611         int still_degraded = 0;
2612         int good_sectors = RESYNC_SECTORS;
2613         int min_bad = 0; /* number of sectors that are bad in all devices */
2614         int idx = sector_to_idx(sector_nr);
2615
2616         if (!conf->r1buf_pool)
2617                 if (init_resync(conf))
2618                         return 0;
2619
2620         max_sector = mddev->dev_sectors;
2621         if (sector_nr >= max_sector) {
2622                 /* If we aborted, we need to abort the
2623                  * sync on the 'current' bitmap chunk (there will
2624                  * only be one in raid1 resync.
2625                  * We can find the current addess in mddev->curr_resync
2626                  */
2627                 if (mddev->curr_resync < max_sector) /* aborted */
2628                         bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2629                                                 &sync_blocks, 1);
2630                 else /* completed sync */
2631                         conf->fullsync = 0;
2632
2633                 bitmap_close_sync(mddev->bitmap);
2634                 close_sync(conf);
2635
2636                 if (mddev_is_clustered(mddev)) {
2637                         conf->cluster_sync_low = 0;
2638                         conf->cluster_sync_high = 0;
2639                 }
2640                 return 0;
2641         }
2642
2643         if (mddev->bitmap == NULL &&
2644             mddev->recovery_cp == MaxSector &&
2645             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2646             conf->fullsync == 0) {
2647                 *skipped = 1;
2648                 return max_sector - sector_nr;
2649         }
2650         /* before building a request, check if we can skip these blocks..
2651          * This call the bitmap_start_sync doesn't actually record anything
2652          */
2653         if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2654             !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2655                 /* We can skip this block, and probably several more */
2656                 *skipped = 1;
2657                 return sync_blocks;
2658         }
2659
2660         /*
2661          * If there is non-resync activity waiting for a turn, then let it
2662          * though before starting on this new sync request.
2663          */
2664         if (atomic_read(&conf->nr_waiting[idx]))
2665                 schedule_timeout_uninterruptible(1);
2666
2667         /* we are incrementing sector_nr below. To be safe, we check against
2668          * sector_nr + two times RESYNC_SECTORS
2669          */
2670
2671         bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2672                 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2673         r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
2674
2675         raise_barrier(conf, sector_nr);
2676
2677         rcu_read_lock();
2678         /*
2679          * If we get a correctably read error during resync or recovery,
2680          * we might want to read from a different device.  So we
2681          * flag all drives that could conceivably be read from for READ,
2682          * and any others (which will be non-In_sync devices) for WRITE.
2683          * If a read fails, we try reading from something else for which READ
2684          * is OK.
2685          */
2686
2687         r1_bio->mddev = mddev;
2688         r1_bio->sector = sector_nr;
2689         r1_bio->state = 0;
2690         set_bit(R1BIO_IsSync, &r1_bio->state);
2691         /* make sure good_sectors won't go across barrier unit boundary */
2692         good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors);
2693
2694         for (i = 0; i < conf->raid_disks * 2; i++) {
2695                 struct md_rdev *rdev;
2696                 bio = r1_bio->bios[i];
2697
2698                 rdev = rcu_dereference(conf->mirrors[i].rdev);
2699                 if (rdev == NULL ||
2700                     test_bit(Faulty, &rdev->flags)) {
2701                         if (i < conf->raid_disks)
2702                                 still_degraded = 1;
2703                 } else if (!test_bit(In_sync, &rdev->flags)) {
2704                         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2705                         bio->bi_end_io = end_sync_write;
2706                         write_targets ++;
2707                 } else {
2708                         /* may need to read from here */
2709                         sector_t first_bad = MaxSector;
2710                         int bad_sectors;
2711
2712                         if (is_badblock(rdev, sector_nr, good_sectors,
2713                                         &first_bad, &bad_sectors)) {
2714                                 if (first_bad > sector_nr)
2715                                         good_sectors = first_bad - sector_nr;
2716                                 else {
2717                                         bad_sectors -= (sector_nr - first_bad);
2718                                         if (min_bad == 0 ||
2719                                             min_bad > bad_sectors)
2720                                                 min_bad = bad_sectors;
2721                                 }
2722                         }
2723                         if (sector_nr < first_bad) {
2724                                 if (test_bit(WriteMostly, &rdev->flags)) {
2725                                         if (wonly < 0)
2726                                                 wonly = i;
2727                                 } else {
2728                                         if (disk < 0)
2729                                                 disk = i;
2730                                 }
2731                                 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2732                                 bio->bi_end_io = end_sync_read;
2733                                 read_targets++;
2734                         } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2735                                 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2736                                 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2737                                 /*
2738                                  * The device is suitable for reading (InSync),
2739                                  * but has bad block(s) here. Let's try to correct them,
2740                                  * if we are doing resync or repair. Otherwise, leave
2741                                  * this device alone for this sync request.
2742                                  */
2743                                 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2744                                 bio->bi_end_io = end_sync_write;
2745                                 write_targets++;
2746                         }
2747                 }
2748                 if (bio->bi_end_io) {
2749                         atomic_inc(&rdev->nr_pending);
2750                         bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2751                         bio->bi_bdev = rdev->bdev;
2752                         if (test_bit(FailFast, &rdev->flags))
2753                                 bio->bi_opf |= MD_FAILFAST;
2754                 }
2755         }
2756         rcu_read_unlock();
2757         if (disk < 0)
2758                 disk = wonly;
2759         r1_bio->read_disk = disk;
2760
2761         if (read_targets == 0 && min_bad > 0) {
2762                 /* These sectors are bad on all InSync devices, so we
2763                  * need to mark them bad on all write targets
2764                  */
2765                 int ok = 1;
2766                 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2767                         if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2768                                 struct md_rdev *rdev = conf->mirrors[i].rdev;
2769                                 ok = rdev_set_badblocks(rdev, sector_nr,
2770                                                         min_bad, 0
2771                                         ) && ok;
2772                         }
2773                 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2774                 *skipped = 1;
2775                 put_buf(r1_bio);
2776
2777                 if (!ok) {
2778                         /* Cannot record the badblocks, so need to
2779                          * abort the resync.
2780                          * If there are multiple read targets, could just
2781                          * fail the really bad ones ???
2782                          */
2783                         conf->recovery_disabled = mddev->recovery_disabled;
2784                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2785                         return 0;
2786                 } else
2787                         return min_bad;
2788
2789         }
2790         if (min_bad > 0 && min_bad < good_sectors) {
2791                 /* only resync enough to reach the next bad->good
2792                  * transition */
2793                 good_sectors = min_bad;
2794         }
2795
2796         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2797                 /* extra read targets are also write targets */
2798                 write_targets += read_targets-1;
2799
2800         if (write_targets == 0 || read_targets == 0) {
2801                 /* There is nowhere to write, so all non-sync
2802                  * drives must be failed - so we are finished
2803                  */
2804                 sector_t rv;
2805                 if (min_bad > 0)
2806                         max_sector = sector_nr + min_bad;
2807                 rv = max_sector - sector_nr;
2808                 *skipped = 1;
2809                 put_buf(r1_bio);
2810                 return rv;
2811         }
2812
2813         if (max_sector > mddev->resync_max)
2814                 max_sector = mddev->resync_max; /* Don't do IO beyond here */
2815         if (max_sector > sector_nr + good_sectors)
2816                 max_sector = sector_nr + good_sectors;
2817         nr_sectors = 0;
2818         sync_blocks = 0;
2819         do {
2820                 struct page *page;
2821                 int len = PAGE_SIZE;
2822                 if (sector_nr + (len>>9) > max_sector)
2823                         len = (max_sector - sector_nr) << 9;
2824                 if (len == 0)
2825                         break;
2826                 if (sync_blocks == 0) {
2827                         if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2828                                                &sync_blocks, still_degraded) &&
2829                             !conf->fullsync &&
2830                             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2831                                 break;
2832                         if ((len >> 9) > sync_blocks)
2833                                 len = sync_blocks<<9;
2834                 }
2835
2836                 for (i = 0 ; i < conf->raid_disks * 2; i++) {
2837                         struct resync_pages *rp;
2838
2839                         bio = r1_bio->bios[i];
2840                         rp = get_resync_pages(bio);
2841                         if (bio->bi_end_io) {
2842                                 page = resync_fetch_page(rp, rp->idx++);
2843
2844                                 /*
2845                                  * won't fail because the vec table is big
2846                                  * enough to hold all these pages
2847                                  */
2848                                 bio_add_page(bio, page, len, 0);
2849                         }
2850                 }
2851                 nr_sectors += len>>9;
2852                 sector_nr += len>>9;
2853                 sync_blocks -= (len>>9);
2854         } while (get_resync_pages(r1_bio->bios[disk]->bi_private)->idx < RESYNC_PAGES);
2855
2856         r1_bio->sectors = nr_sectors;
2857
2858         if (mddev_is_clustered(mddev) &&
2859                         conf->cluster_sync_high < sector_nr + nr_sectors) {
2860                 conf->cluster_sync_low = mddev->curr_resync_completed;
2861                 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
2862                 /* Send resync message */
2863                 md_cluster_ops->resync_info_update(mddev,
2864                                 conf->cluster_sync_low,
2865                                 conf->cluster_sync_high);
2866         }
2867
2868         /* For a user-requested sync, we read all readable devices and do a
2869          * compare
2870          */
2871         if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2872                 atomic_set(&r1_bio->remaining, read_targets);
2873                 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2874                         bio = r1_bio->bios[i];
2875                         if (bio->bi_end_io == end_sync_read) {
2876                                 read_targets--;
2877                                 md_sync_acct(bio->bi_bdev, nr_sectors);
2878                                 if (read_targets == 1)
2879                                         bio->bi_opf &= ~MD_FAILFAST;
2880                                 generic_make_request(bio);
2881                         }
2882                 }
2883         } else {
2884                 atomic_set(&r1_bio->remaining, 1);
2885                 bio = r1_bio->bios[r1_bio->read_disk];
2886                 md_sync_acct(bio->bi_bdev, nr_sectors);
2887                 if (read_targets == 1)
2888                         bio->bi_opf &= ~MD_FAILFAST;
2889                 generic_make_request(bio);
2890
2891         }
2892         return nr_sectors;
2893 }
2894
2895 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2896 {
2897         if (sectors)
2898                 return sectors;
2899
2900         return mddev->dev_sectors;
2901 }
2902
2903 static struct r1conf *setup_conf(struct mddev *mddev)
2904 {
2905         struct r1conf *conf;
2906         int i;
2907         struct raid1_info *disk;
2908         struct md_rdev *rdev;
2909         int err = -ENOMEM;
2910
2911         conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2912         if (!conf)
2913                 goto abort;
2914
2915         conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR,
2916                                    sizeof(atomic_t), GFP_KERNEL);
2917         if (!conf->nr_pending)
2918                 goto abort;
2919
2920         conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR,
2921                                    sizeof(atomic_t), GFP_KERNEL);
2922         if (!conf->nr_waiting)
2923                 goto abort;
2924
2925         conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR,
2926                                   sizeof(atomic_t), GFP_KERNEL);
2927         if (!conf->nr_queued)
2928                 goto abort;
2929
2930         conf->barrier = kcalloc(BARRIER_BUCKETS_NR,
2931                                 sizeof(atomic_t), GFP_KERNEL);
2932         if (!conf->barrier)
2933                 goto abort;
2934
2935         conf->mirrors = kzalloc(sizeof(struct raid1_info)
2936                                 * mddev->raid_disks * 2,
2937                                  GFP_KERNEL);
2938         if (!conf->mirrors)
2939                 goto abort;
2940
2941         conf->tmppage = alloc_page(GFP_KERNEL);
2942         if (!conf->tmppage)
2943                 goto abort;
2944
2945         conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2946         if (!conf->poolinfo)
2947                 goto abort;
2948         conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2949         conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2950                                           r1bio_pool_free,
2951                                           conf->poolinfo);
2952         if (!conf->r1bio_pool)
2953                 goto abort;
2954
2955         conf->bio_split = bioset_create(BIO_POOL_SIZE, 0);
2956         if (!conf->bio_split)
2957                 goto abort;
2958
2959         conf->poolinfo->mddev = mddev;
2960
2961         err = -EINVAL;
2962         spin_lock_init(&conf->device_lock);
2963         rdev_for_each(rdev, mddev) {
2964                 struct request_queue *q;
2965                 int disk_idx = rdev->raid_disk;
2966                 if (disk_idx >= mddev->raid_disks
2967                     || disk_idx < 0)
2968                         continue;
2969                 if (test_bit(Replacement, &rdev->flags))
2970                         disk = conf->mirrors + mddev->raid_disks + disk_idx;
2971                 else
2972                         disk = conf->mirrors + disk_idx;
2973
2974                 if (disk->rdev)
2975                         goto abort;
2976                 disk->rdev = rdev;
2977                 q = bdev_get_queue(rdev->bdev);
2978
2979                 disk->head_position = 0;
2980                 disk->seq_start = MaxSector;
2981         }
2982         conf->raid_disks = mddev->raid_disks;
2983         conf->mddev = mddev;
2984         INIT_LIST_HEAD(&conf->retry_list);
2985         INIT_LIST_HEAD(&conf->bio_end_io_list);
2986
2987         spin_lock_init(&conf->resync_lock);
2988         init_waitqueue_head(&conf->wait_barrier);
2989
2990         bio_list_init(&conf->pending_bio_list);
2991         conf->pending_count = 0;
2992         conf->recovery_disabled = mddev->recovery_disabled - 1;
2993
2994         err = -EIO;
2995         for (i = 0; i < conf->raid_disks * 2; i++) {
2996
2997                 disk = conf->mirrors + i;
2998
2999                 if (i < conf->raid_disks &&
3000                     disk[conf->raid_disks].rdev) {
3001                         /* This slot has a replacement. */
3002                         if (!disk->rdev) {
3003                                 /* No original, just make the replacement
3004                                  * a recovering spare
3005                                  */
3006                                 disk->rdev =
3007                                         disk[conf->raid_disks].rdev;
3008                                 disk[conf->raid_disks].rdev = NULL;
3009                         } else if (!test_bit(In_sync, &disk->rdev->flags))
3010                                 /* Original is not in_sync - bad */
3011                                 goto abort;
3012                 }
3013
3014                 if (!disk->rdev ||
3015                     !test_bit(In_sync, &disk->rdev->flags)) {
3016                         disk->head_position = 0;
3017                         if (disk->rdev &&
3018                             (disk->rdev->saved_raid_disk < 0))
3019                                 conf->fullsync = 1;
3020                 }
3021         }
3022
3023         err = -ENOMEM;
3024         conf->thread = md_register_thread(raid1d, mddev, "raid1");
3025         if (!conf->thread)
3026                 goto abort;
3027
3028         return conf;
3029
3030  abort:
3031         if (conf) {
3032                 mempool_destroy(conf->r1bio_pool);
3033                 kfree(conf->mirrors);
3034                 safe_put_page(conf->tmppage);
3035                 kfree(conf->poolinfo);
3036                 kfree(conf->nr_pending);
3037                 kfree(conf->nr_waiting);
3038                 kfree(conf->nr_queued);
3039                 kfree(conf->barrier);
3040                 if (conf->bio_split)
3041                         bioset_free(conf->bio_split);
3042                 kfree(conf);
3043         }
3044         return ERR_PTR(err);
3045 }
3046
3047 static void raid1_free(struct mddev *mddev, void *priv);
3048 static int raid1_run(struct mddev *mddev)
3049 {
3050         struct r1conf *conf;
3051         int i;
3052         struct md_rdev *rdev;
3053         int ret;
3054         bool discard_supported = false;
3055
3056         if (mddev->level != 1) {
3057                 pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
3058                         mdname(mddev), mddev->level);
3059                 return -EIO;
3060         }
3061         if (mddev->reshape_position != MaxSector) {
3062                 pr_warn("md/raid1:%s: reshape_position set but not supported\n",
3063                         mdname(mddev));
3064                 return -EIO;
3065         }
3066         /*
3067          * copy the already verified devices into our private RAID1
3068          * bookkeeping area. [whatever we allocate in run(),
3069          * should be freed in raid1_free()]
3070          */
3071         if (mddev->private == NULL)
3072                 conf = setup_conf(mddev);
3073         else
3074                 conf = mddev->private;
3075
3076         if (IS_ERR(conf))
3077                 return PTR_ERR(conf);
3078
3079         if (mddev->queue)
3080                 blk_queue_max_write_same_sectors(mddev->queue, 0);
3081
3082         rdev_for_each(rdev, mddev) {
3083                 if (!mddev->gendisk)
3084                         continue;
3085                 disk_stack_limits(mddev->gendisk, rdev->bdev,
3086                                   rdev->data_offset << 9);
3087                 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3088                         discard_supported = true;
3089         }
3090
3091         mddev->degraded = 0;
3092         for (i=0; i < conf->raid_disks; i++)
3093                 if (conf->mirrors[i].rdev == NULL ||
3094                     !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
3095                     test_bit(Faulty, &conf->mirrors[i].rdev->flags))
3096                         mddev->degraded++;
3097
3098         if (conf->raid_disks - mddev->degraded == 1)
3099                 mddev->recovery_cp = MaxSector;
3100
3101         if (mddev->recovery_cp != MaxSector)
3102                 pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
3103                         mdname(mddev));
3104         pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
3105                 mdname(mddev), mddev->raid_disks - mddev->degraded,
3106                 mddev->raid_disks);
3107
3108         /*
3109          * Ok, everything is just fine now
3110          */
3111         mddev->thread = conf->thread;
3112         conf->thread = NULL;
3113         mddev->private = conf;
3114         set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3115
3116         md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
3117
3118         if (mddev->queue) {
3119                 if (discard_supported)
3120                         queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
3121                                                 mddev->queue);
3122                 else
3123                         queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
3124                                                   mddev->queue);
3125         }
3126
3127         ret =  md_integrity_register(mddev);
3128         if (ret) {
3129                 md_unregister_thread(&mddev->thread);
3130                 raid1_free(mddev, conf);
3131         }
3132         return ret;
3133 }
3134
3135 static void raid1_free(struct mddev *mddev, void *priv)
3136 {
3137         struct r1conf *conf = priv;
3138
3139         mempool_destroy(conf->r1bio_pool);
3140         kfree(conf->mirrors);
3141         safe_put_page(conf->tmppage);
3142         kfree(conf->poolinfo);
3143         kfree(conf->nr_pending);
3144         kfree(conf->nr_waiting);
3145         kfree(conf->nr_queued);
3146         kfree(conf->barrier);
3147         if (conf->bio_split)
3148                 bioset_free(conf->bio_split);
3149         kfree(conf);
3150 }
3151
3152 static int raid1_resize(struct mddev *mddev, sector_t sectors)
3153 {
3154         /* no resync is happening, and there is enough space
3155          * on all devices, so we can resize.
3156          * We need to make sure resync covers any new space.
3157          * If the array is shrinking we should possibly wait until
3158          * any io in the removed space completes, but it hardly seems
3159          * worth it.
3160          */
3161         sector_t newsize = raid1_size(mddev, sectors, 0);
3162         if (mddev->external_size &&
3163             mddev->array_sectors > newsize)
3164                 return -EINVAL;
3165         if (mddev->bitmap) {
3166                 int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
3167                 if (ret)
3168                         return ret;
3169         }
3170         md_set_array_sectors(mddev, newsize);
3171         if (sectors > mddev->dev_sectors &&
3172             mddev->recovery_cp > mddev->dev_sectors) {
3173                 mddev->recovery_cp = mddev->dev_sectors;
3174                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3175         }
3176         mddev->dev_sectors = sectors;
3177         mddev->resync_max_sectors = sectors;
3178         return 0;
3179 }
3180
3181 static int raid1_reshape(struct mddev *mddev)
3182 {
3183         /* We need to:
3184          * 1/ resize the r1bio_pool
3185          * 2/ resize conf->mirrors
3186          *
3187          * We allocate a new r1bio_pool if we can.
3188          * Then raise a device barrier and wait until all IO stops.
3189          * Then resize conf->mirrors and swap in the new r1bio pool.
3190          *
3191          * At the same time, we "pack" the devices so that all the missing
3192          * devices have the higher raid_disk numbers.
3193          */
3194         mempool_t *newpool, *oldpool;
3195         struct pool_info *newpoolinfo;
3196         struct raid1_info *newmirrors;
3197         struct r1conf *conf = mddev->private;
3198         int cnt, raid_disks;
3199         unsigned long flags;
3200         int d, d2, err;
3201
3202         /* Cannot change chunk_size, layout, or level */
3203         if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3204             mddev->layout != mddev->new_layout ||
3205             mddev->level != mddev->new_level) {
3206                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3207                 mddev->new_layout = mddev->layout;
3208                 mddev->new_level = mddev->level;
3209                 return -EINVAL;
3210         }
3211
3212         if (!mddev_is_clustered(mddev)) {
3213                 err = md_allow_write(mddev);
3214                 if (err)
3215                         return err;
3216         }
3217
3218         raid_disks = mddev->raid_disks + mddev->delta_disks;
3219
3220         if (raid_disks < conf->raid_disks) {
3221                 cnt=0;
3222                 for (d= 0; d < conf->raid_disks; d++)
3223                         if (conf->mirrors[d].rdev)
3224                                 cnt++;
3225                 if (cnt > raid_disks)
3226                         return -EBUSY;
3227         }
3228
3229         newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
3230         if (!newpoolinfo)
3231                 return -ENOMEM;
3232         newpoolinfo->mddev = mddev;
3233         newpoolinfo->raid_disks = raid_disks * 2;
3234
3235         newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
3236                                  r1bio_pool_free, newpoolinfo);
3237         if (!newpool) {
3238                 kfree(newpoolinfo);
3239                 return -ENOMEM;
3240         }
3241         newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
3242                              GFP_KERNEL);
3243         if (!newmirrors) {
3244                 kfree(newpoolinfo);
3245                 mempool_destroy(newpool);
3246                 return -ENOMEM;
3247         }
3248
3249         freeze_array(conf, 0);
3250
3251         /* ok, everything is stopped */
3252         oldpool = conf->r1bio_pool;
3253         conf->r1bio_pool = newpool;
3254
3255         for (d = d2 = 0; d < conf->raid_disks; d++) {
3256                 struct md_rdev *rdev = conf->mirrors[d].rdev;
3257                 if (rdev && rdev->raid_disk != d2) {
3258                         sysfs_unlink_rdev(mddev, rdev);
3259                         rdev->raid_disk = d2;
3260                         sysfs_unlink_rdev(mddev, rdev);
3261                         if (sysfs_link_rdev(mddev, rdev))
3262                                 pr_warn("md/raid1:%s: cannot register rd%d\n",
3263                                         mdname(mddev), rdev->raid_disk);
3264                 }
3265                 if (rdev)
3266                         newmirrors[d2++].rdev = rdev;
3267         }
3268         kfree(conf->mirrors);
3269         conf->mirrors = newmirrors;
3270         kfree(conf->poolinfo);
3271         conf->poolinfo = newpoolinfo;
3272
3273         spin_lock_irqsave(&conf->device_lock, flags);
3274         mddev->degraded += (raid_disks - conf->raid_disks);
3275         spin_unlock_irqrestore(&conf->device_lock, flags);
3276         conf->raid_disks = mddev->raid_disks = raid_disks;
3277         mddev->delta_disks = 0;
3278
3279         unfreeze_array(conf);
3280
3281         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3282         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3283         md_wakeup_thread(mddev->thread);
3284
3285         mempool_destroy(oldpool);
3286         return 0;
3287 }
3288
3289 static void raid1_quiesce(struct mddev *mddev, int state)
3290 {
3291         struct r1conf *conf = mddev->private;
3292
3293         switch(state) {
3294         case 2: /* wake for suspend */
3295                 wake_up(&conf->wait_barrier);
3296                 break;
3297         case 1:
3298                 freeze_array(conf, 0);
3299                 break;
3300         case 0:
3301                 unfreeze_array(conf);
3302                 break;
3303         }
3304 }
3305
3306 static void *raid1_takeover(struct mddev *mddev)
3307 {
3308         /* raid1 can take over:
3309          *  raid5 with 2 devices, any layout or chunk size
3310          */
3311         if (mddev->level == 5 && mddev->raid_disks == 2) {
3312                 struct r1conf *conf;
3313                 mddev->new_level = 1;
3314                 mddev->new_layout = 0;
3315                 mddev->new_chunk_sectors = 0;
3316                 conf = setup_conf(mddev);
3317                 if (!IS_ERR(conf)) {
3318                         /* Array must appear to be quiesced */
3319                         conf->array_frozen = 1;
3320                         mddev_clear_unsupported_flags(mddev,
3321                                 UNSUPPORTED_MDDEV_FLAGS);
3322                 }
3323                 return conf;
3324         }
3325         return ERR_PTR(-EINVAL);
3326 }
3327
3328 static struct md_personality raid1_personality =
3329 {
3330         .name           = "raid1",
3331         .level          = 1,
3332         .owner          = THIS_MODULE,
3333         .make_request   = raid1_make_request,
3334         .run            = raid1_run,
3335         .free           = raid1_free,
3336         .status         = raid1_status,
3337         .error_handler  = raid1_error,
3338         .hot_add_disk   = raid1_add_disk,
3339         .hot_remove_disk= raid1_remove_disk,
3340         .spare_active   = raid1_spare_active,
3341         .sync_request   = raid1_sync_request,
3342         .resize         = raid1_resize,
3343         .size           = raid1_size,
3344         .check_reshape  = raid1_reshape,
3345         .quiesce        = raid1_quiesce,
3346         .takeover       = raid1_takeover,
3347         .congested      = raid1_congested,
3348 };
3349
3350 static int __init raid_init(void)
3351 {
3352         return register_md_personality(&raid1_personality);
3353 }
3354
3355 static void raid_exit(void)
3356 {
3357         unregister_md_personality(&raid1_personality);
3358 }
3359
3360 module_init(raid_init);
3361 module_exit(raid_exit);
3362 MODULE_LICENSE("GPL");
3363 MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3364 MODULE_ALIAS("md-personality-3"); /* RAID1 */
3365 MODULE_ALIAS("md-raid1");
3366 MODULE_ALIAS("md-level-1");
3367
3368 module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);