2 * raid10.c : Multiple Devices driver for Linux
4 * Copyright (C) 2000-2004 Neil Brown
6 * RAID-10 support for md.
8 * Base on code in raid1.c. See raid1.c for further copyright information.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/blkdev.h>
24 #include <linux/module.h>
25 #include <linux/seq_file.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
34 * RAID10 provides a combination of RAID0 and RAID1 functionality.
35 * The layout of data is defined by
38 * near_copies (stored in low byte of layout)
39 * far_copies (stored in second byte of layout)
40 * far_offset (stored in bit 16 of layout )
41 * use_far_sets (stored in bit 17 of layout )
43 * The data to be stored is divided into chunks using chunksize. Each device
44 * is divided into far_copies sections. In each section, chunks are laid out
45 * in a style similar to raid0, but near_copies copies of each chunk is stored
46 * (each on a different drive). The starting device for each section is offset
47 * near_copies from the starting device of the previous section. Thus there
48 * are (near_copies * far_copies) of each chunk, and each is on a different
49 * drive. near_copies and far_copies must be at least one, and their product
50 * is at most raid_disks.
52 * If far_offset is true, then the far_copies are handled a bit differently.
53 * The copies are still in different stripes, but instead of being very far
54 * apart on disk, there are adjacent stripes.
56 * The far and offset algorithms are handled slightly differently if
57 * 'use_far_sets' is true. In this case, the array's devices are grouped into
58 * sets that are (near_copies * far_copies) in size. The far copied stripes
59 * are still shifted by 'near_copies' devices, but this shifting stays confined
60 * to the set rather than the entire array. This is done to improve the number
61 * of device combinations that can fail without causing the array to fail.
62 * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
67 * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
68 * [A B] [C D] [A B] [C D E]
69 * |...| |...| |...| | ... |
70 * [B A] [D C] [B A] [E C D]
74 * Number of guaranteed r10bios in case of extreme VM load:
76 #define NR_RAID10_BIOS 256
78 /* when we get a read error on a read-only array, we redirect to another
79 * device without failing the first device, or trying to over-write to
80 * correct the read error. To keep track of bad blocks on a per-bio
81 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
83 #define IO_BLOCKED ((struct bio *)1)
84 /* When we successfully write to a known bad-block, we need to remove the
85 * bad-block marking which must be done from process context. So we record
86 * the success by setting devs[n].bio to IO_MADE_GOOD
88 #define IO_MADE_GOOD ((struct bio *)2)
90 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
92 /* When there are this many requests queued to be written by
93 * the raid10 thread, we become 'congested' to provide back-pressure
96 static int max_queued_requests = 1024;
98 static void allow_barrier(struct r10conf *conf);
99 static void lower_barrier(struct r10conf *conf);
100 static int _enough(struct r10conf *conf, int previous, int ignore);
101 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
103 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
104 static void end_reshape_write(struct bio *bio, int error);
105 static void end_reshape(struct r10conf *conf);
107 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
109 struct r10conf *conf = data;
110 int size = offsetof(struct r10bio, devs[conf->copies]);
112 /* allocate a r10bio with room for raid_disks entries in the
114 return kzalloc(size, gfp_flags);
117 static void r10bio_pool_free(void *r10_bio, void *data)
122 /* Maximum size of each resync request */
123 #define RESYNC_BLOCK_SIZE (64*1024)
124 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
125 /* amount of memory to reserve for resync requests */
126 #define RESYNC_WINDOW (1024*1024)
127 /* maximum number of concurrent requests, memory permitting */
128 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
131 * When performing a resync, we need to read and compare, so
132 * we need as many pages are there are copies.
133 * When performing a recovery, we need 2 bios, one for read,
134 * one for write (we recover only one drive per r10buf)
137 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
139 struct r10conf *conf = data;
141 struct r10bio *r10_bio;
146 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
150 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
151 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
152 nalloc = conf->copies; /* resync */
154 nalloc = 2; /* recovery */
159 for (j = nalloc ; j-- ; ) {
160 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
163 r10_bio->devs[j].bio = bio;
164 if (!conf->have_replacement)
166 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
169 r10_bio->devs[j].repl_bio = bio;
172 * Allocate RESYNC_PAGES data pages and attach them
175 for (j = 0 ; j < nalloc; j++) {
176 struct bio *rbio = r10_bio->devs[j].repl_bio;
177 bio = r10_bio->devs[j].bio;
178 for (i = 0; i < RESYNC_PAGES; i++) {
179 if (j > 0 && !test_bit(MD_RECOVERY_SYNC,
180 &conf->mddev->recovery)) {
181 /* we can share bv_page's during recovery
183 struct bio *rbio = r10_bio->devs[0].bio;
184 page = rbio->bi_io_vec[i].bv_page;
187 page = alloc_page(gfp_flags);
191 bio->bi_io_vec[i].bv_page = page;
193 rbio->bi_io_vec[i].bv_page = page;
201 safe_put_page(bio->bi_io_vec[i-1].bv_page);
203 for (i = 0; i < RESYNC_PAGES ; i++)
204 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
207 for ( ; j < nalloc; j++) {
208 if (r10_bio->devs[j].bio)
209 bio_put(r10_bio->devs[j].bio);
210 if (r10_bio->devs[j].repl_bio)
211 bio_put(r10_bio->devs[j].repl_bio);
213 r10bio_pool_free(r10_bio, conf);
217 static void r10buf_pool_free(void *__r10_bio, void *data)
220 struct r10conf *conf = data;
221 struct r10bio *r10bio = __r10_bio;
224 for (j=0; j < conf->copies; j++) {
225 struct bio *bio = r10bio->devs[j].bio;
227 for (i = 0; i < RESYNC_PAGES; i++) {
228 safe_put_page(bio->bi_io_vec[i].bv_page);
229 bio->bi_io_vec[i].bv_page = NULL;
233 bio = r10bio->devs[j].repl_bio;
237 r10bio_pool_free(r10bio, conf);
240 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
244 for (i = 0; i < conf->copies; i++) {
245 struct bio **bio = & r10_bio->devs[i].bio;
246 if (!BIO_SPECIAL(*bio))
249 bio = &r10_bio->devs[i].repl_bio;
250 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
256 static void free_r10bio(struct r10bio *r10_bio)
258 struct r10conf *conf = r10_bio->mddev->private;
260 put_all_bios(conf, r10_bio);
261 mempool_free(r10_bio, conf->r10bio_pool);
264 static void put_buf(struct r10bio *r10_bio)
266 struct r10conf *conf = r10_bio->mddev->private;
268 mempool_free(r10_bio, conf->r10buf_pool);
273 static void reschedule_retry(struct r10bio *r10_bio)
276 struct mddev *mddev = r10_bio->mddev;
277 struct r10conf *conf = mddev->private;
279 spin_lock_irqsave(&conf->device_lock, flags);
280 list_add(&r10_bio->retry_list, &conf->retry_list);
282 spin_unlock_irqrestore(&conf->device_lock, flags);
284 /* wake up frozen array... */
285 wake_up(&conf->wait_barrier);
287 md_wakeup_thread(mddev->thread);
291 * raid_end_bio_io() is called when we have finished servicing a mirrored
292 * operation and are ready to return a success/failure code to the buffer
295 static void raid_end_bio_io(struct r10bio *r10_bio)
297 struct bio *bio = r10_bio->master_bio;
299 struct r10conf *conf = r10_bio->mddev->private;
301 if (bio->bi_phys_segments) {
303 spin_lock_irqsave(&conf->device_lock, flags);
304 bio->bi_phys_segments--;
305 done = (bio->bi_phys_segments == 0);
306 spin_unlock_irqrestore(&conf->device_lock, flags);
309 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
310 clear_bit(BIO_UPTODATE, &bio->bi_flags);
314 * Wake up any possible resync thread that waits for the device
319 free_r10bio(r10_bio);
323 * Update disk head position estimator based on IRQ completion info.
325 static inline void update_head_pos(int slot, struct r10bio *r10_bio)
327 struct r10conf *conf = r10_bio->mddev->private;
329 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
330 r10_bio->devs[slot].addr + (r10_bio->sectors);
334 * Find the disk number which triggered given bio
336 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
337 struct bio *bio, int *slotp, int *replp)
342 for (slot = 0; slot < conf->copies; slot++) {
343 if (r10_bio->devs[slot].bio == bio)
345 if (r10_bio->devs[slot].repl_bio == bio) {
351 BUG_ON(slot == conf->copies);
352 update_head_pos(slot, r10_bio);
358 return r10_bio->devs[slot].devnum;
361 static void raid10_end_read_request(struct bio *bio, int error)
363 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
364 struct r10bio *r10_bio = bio->bi_private;
366 struct md_rdev *rdev;
367 struct r10conf *conf = r10_bio->mddev->private;
370 slot = r10_bio->read_slot;
371 dev = r10_bio->devs[slot].devnum;
372 rdev = r10_bio->devs[slot].rdev;
374 * this branch is our 'one mirror IO has finished' event handler:
376 update_head_pos(slot, r10_bio);
380 * Set R10BIO_Uptodate in our master bio, so that
381 * we will return a good error code to the higher
382 * levels even if IO on some other mirrored buffer fails.
384 * The 'master' represents the composite IO operation to
385 * user-side. So if something waits for IO, then it will
386 * wait for the 'master' bio.
388 set_bit(R10BIO_Uptodate, &r10_bio->state);
390 /* If all other devices that store this block have
391 * failed, we want to return the error upwards rather
392 * than fail the last device. Here we redefine
393 * "uptodate" to mean "Don't want to retry"
395 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
400 raid_end_bio_io(r10_bio);
401 rdev_dec_pending(rdev, conf->mddev);
404 * oops, read error - keep the refcount on the rdev
406 char b[BDEVNAME_SIZE];
407 printk_ratelimited(KERN_ERR
408 "md/raid10:%s: %s: rescheduling sector %llu\n",
410 bdevname(rdev->bdev, b),
411 (unsigned long long)r10_bio->sector);
412 set_bit(R10BIO_ReadError, &r10_bio->state);
413 reschedule_retry(r10_bio);
417 static void close_write(struct r10bio *r10_bio)
419 /* clear the bitmap if all writes complete successfully */
420 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
422 !test_bit(R10BIO_Degraded, &r10_bio->state),
424 md_write_end(r10_bio->mddev);
427 static void one_write_done(struct r10bio *r10_bio)
429 if (atomic_dec_and_test(&r10_bio->remaining)) {
430 if (test_bit(R10BIO_WriteError, &r10_bio->state))
431 reschedule_retry(r10_bio);
433 close_write(r10_bio);
434 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
435 reschedule_retry(r10_bio);
437 raid_end_bio_io(r10_bio);
442 static void raid10_end_write_request(struct bio *bio, int error)
444 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
445 struct r10bio *r10_bio = bio->bi_private;
448 struct r10conf *conf = r10_bio->mddev->private;
450 struct md_rdev *rdev = NULL;
452 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
455 rdev = conf->mirrors[dev].replacement;
459 rdev = conf->mirrors[dev].rdev;
462 * this branch is our 'one mirror IO has finished' event handler:
466 /* Never record new bad blocks to replacement,
469 md_error(rdev->mddev, rdev);
471 set_bit(WriteErrorSeen, &rdev->flags);
472 if (!test_and_set_bit(WantReplacement, &rdev->flags))
473 set_bit(MD_RECOVERY_NEEDED,
474 &rdev->mddev->recovery);
475 set_bit(R10BIO_WriteError, &r10_bio->state);
480 * Set R10BIO_Uptodate in our master bio, so that
481 * we will return a good error code for to the higher
482 * levels even if IO on some other mirrored buffer fails.
484 * The 'master' represents the composite IO operation to
485 * user-side. So if something waits for IO, then it will
486 * wait for the 'master' bio.
492 * Do not set R10BIO_Uptodate if the current device is
493 * rebuilding or Faulty. This is because we cannot use
494 * such device for properly reading the data back (we could
495 * potentially use it, if the current write would have felt
496 * before rdev->recovery_offset, but for simplicity we don't
499 if (test_bit(In_sync, &rdev->flags) &&
500 !test_bit(Faulty, &rdev->flags))
501 set_bit(R10BIO_Uptodate, &r10_bio->state);
503 /* Maybe we can clear some bad blocks. */
504 if (is_badblock(rdev,
505 r10_bio->devs[slot].addr,
507 &first_bad, &bad_sectors)) {
510 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
512 r10_bio->devs[slot].bio = IO_MADE_GOOD;
514 set_bit(R10BIO_MadeGood, &r10_bio->state);
520 * Let's see if all mirrored write operations have finished
523 one_write_done(r10_bio);
525 rdev_dec_pending(rdev, conf->mddev);
529 * RAID10 layout manager
530 * As well as the chunksize and raid_disks count, there are two
531 * parameters: near_copies and far_copies.
532 * near_copies * far_copies must be <= raid_disks.
533 * Normally one of these will be 1.
534 * If both are 1, we get raid0.
535 * If near_copies == raid_disks, we get raid1.
537 * Chunks are laid out in raid0 style with near_copies copies of the
538 * first chunk, followed by near_copies copies of the next chunk and
540 * If far_copies > 1, then after 1/far_copies of the array has been assigned
541 * as described above, we start again with a device offset of near_copies.
542 * So we effectively have another copy of the whole array further down all
543 * the drives, but with blocks on different drives.
544 * With this layout, and block is never stored twice on the one device.
546 * raid10_find_phys finds the sector offset of a given virtual sector
547 * on each device that it is on.
549 * raid10_find_virt does the reverse mapping, from a device and a
550 * sector offset to a virtual address
553 static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
561 int last_far_set_start, last_far_set_size;
563 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
564 last_far_set_start *= geo->far_set_size;
566 last_far_set_size = geo->far_set_size;
567 last_far_set_size += (geo->raid_disks % geo->far_set_size);
569 /* now calculate first sector/dev */
570 chunk = r10bio->sector >> geo->chunk_shift;
571 sector = r10bio->sector & geo->chunk_mask;
573 chunk *= geo->near_copies;
575 dev = sector_div(stripe, geo->raid_disks);
577 stripe *= geo->far_copies;
579 sector += stripe << geo->chunk_shift;
581 /* and calculate all the others */
582 for (n = 0; n < geo->near_copies; n++) {
586 r10bio->devs[slot].devnum = d;
587 r10bio->devs[slot].addr = s;
590 for (f = 1; f < geo->far_copies; f++) {
591 set = d / geo->far_set_size;
592 d += geo->near_copies;
594 if ((geo->raid_disks % geo->far_set_size) &&
595 (d > last_far_set_start)) {
596 d -= last_far_set_start;
597 d %= last_far_set_size;
598 d += last_far_set_start;
600 d %= geo->far_set_size;
601 d += geo->far_set_size * set;
604 r10bio->devs[slot].devnum = d;
605 r10bio->devs[slot].addr = s;
609 if (dev >= geo->raid_disks) {
611 sector += (geo->chunk_mask + 1);
616 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
618 struct geom *geo = &conf->geo;
620 if (conf->reshape_progress != MaxSector &&
621 ((r10bio->sector >= conf->reshape_progress) !=
622 conf->mddev->reshape_backwards)) {
623 set_bit(R10BIO_Previous, &r10bio->state);
626 clear_bit(R10BIO_Previous, &r10bio->state);
628 __raid10_find_phys(geo, r10bio);
631 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
633 sector_t offset, chunk, vchunk;
634 /* Never use conf->prev as this is only called during resync
635 * or recovery, so reshape isn't happening
637 struct geom *geo = &conf->geo;
638 int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
639 int far_set_size = geo->far_set_size;
640 int last_far_set_start;
642 if (geo->raid_disks % geo->far_set_size) {
643 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
644 last_far_set_start *= geo->far_set_size;
646 if (dev >= last_far_set_start) {
647 far_set_size = geo->far_set_size;
648 far_set_size += (geo->raid_disks % geo->far_set_size);
649 far_set_start = last_far_set_start;
653 offset = sector & geo->chunk_mask;
654 if (geo->far_offset) {
656 chunk = sector >> geo->chunk_shift;
657 fc = sector_div(chunk, geo->far_copies);
658 dev -= fc * geo->near_copies;
659 if (dev < far_set_start)
662 while (sector >= geo->stride) {
663 sector -= geo->stride;
664 if (dev < (geo->near_copies + far_set_start))
665 dev += far_set_size - geo->near_copies;
667 dev -= geo->near_copies;
669 chunk = sector >> geo->chunk_shift;
671 vchunk = chunk * geo->raid_disks + dev;
672 sector_div(vchunk, geo->near_copies);
673 return (vchunk << geo->chunk_shift) + offset;
677 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
679 * @bvm: properties of new bio
680 * @biovec: the request that could be merged to it.
682 * Return amount of bytes we can accept at this offset
683 * This requires checking for end-of-chunk if near_copies != raid_disks,
684 * and for subordinate merge_bvec_fns if merge_check_needed.
686 static int raid10_mergeable_bvec(struct request_queue *q,
687 struct bvec_merge_data *bvm,
688 struct bio_vec *biovec)
690 struct mddev *mddev = q->queuedata;
691 struct r10conf *conf = mddev->private;
692 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
694 unsigned int chunk_sectors;
695 unsigned int bio_sectors = bvm->bi_size >> 9;
696 struct geom *geo = &conf->geo;
698 chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1;
699 if (conf->reshape_progress != MaxSector &&
700 ((sector >= conf->reshape_progress) !=
701 conf->mddev->reshape_backwards))
704 if (geo->near_copies < geo->raid_disks) {
705 max = (chunk_sectors - ((sector & (chunk_sectors - 1))
706 + bio_sectors)) << 9;
708 /* bio_add cannot handle a negative return */
710 if (max <= biovec->bv_len && bio_sectors == 0)
711 return biovec->bv_len;
713 max = biovec->bv_len;
715 if (mddev->merge_check_needed) {
717 struct r10bio r10_bio;
718 struct r10dev devs[conf->copies];
720 struct r10bio *r10_bio = &on_stack.r10_bio;
722 if (conf->reshape_progress != MaxSector) {
723 /* Cannot give any guidance during reshape */
724 if (max <= biovec->bv_len && bio_sectors == 0)
725 return biovec->bv_len;
728 r10_bio->sector = sector;
729 raid10_find_phys(conf, r10_bio);
731 for (s = 0; s < conf->copies; s++) {
732 int disk = r10_bio->devs[s].devnum;
733 struct md_rdev *rdev = rcu_dereference(
734 conf->mirrors[disk].rdev);
735 if (rdev && !test_bit(Faulty, &rdev->flags)) {
736 struct request_queue *q =
737 bdev_get_queue(rdev->bdev);
738 if (q->merge_bvec_fn) {
739 bvm->bi_sector = r10_bio->devs[s].addr
741 bvm->bi_bdev = rdev->bdev;
742 max = min(max, q->merge_bvec_fn(
746 rdev = rcu_dereference(conf->mirrors[disk].replacement);
747 if (rdev && !test_bit(Faulty, &rdev->flags)) {
748 struct request_queue *q =
749 bdev_get_queue(rdev->bdev);
750 if (q->merge_bvec_fn) {
751 bvm->bi_sector = r10_bio->devs[s].addr
753 bvm->bi_bdev = rdev->bdev;
754 max = min(max, q->merge_bvec_fn(
765 * This routine returns the disk from which the requested read should
766 * be done. There is a per-array 'next expected sequential IO' sector
767 * number - if this matches on the next IO then we use the last disk.
768 * There is also a per-disk 'last know head position' sector that is
769 * maintained from IRQ contexts, both the normal and the resync IO
770 * completion handlers update this position correctly. If there is no
771 * perfect sequential match then we pick the disk whose head is closest.
773 * If there are 2 mirrors in the same 2 devices, performance degrades
774 * because position is mirror, not device based.
776 * The rdev for the device selected will have nr_pending incremented.
780 * FIXME: possibly should rethink readbalancing and do it differently
781 * depending on near_copies / far_copies geometry.
783 static struct md_rdev *read_balance(struct r10conf *conf,
784 struct r10bio *r10_bio,
787 const sector_t this_sector = r10_bio->sector;
789 int sectors = r10_bio->sectors;
790 int best_good_sectors;
791 sector_t new_distance, best_dist;
792 struct md_rdev *best_rdev, *rdev = NULL;
795 struct geom *geo = &conf->geo;
797 raid10_find_phys(conf, r10_bio);
800 sectors = r10_bio->sectors;
803 best_dist = MaxSector;
804 best_good_sectors = 0;
807 * Check if we can balance. We can balance on the whole
808 * device if no resync is going on (recovery is ok), or below
809 * the resync window. We take the first readable disk when
810 * above the resync window.
812 if (conf->mddev->recovery_cp < MaxSector
813 && (this_sector + sectors >= conf->next_resync))
816 for (slot = 0; slot < conf->copies ; slot++) {
821 if (r10_bio->devs[slot].bio == IO_BLOCKED)
823 disk = r10_bio->devs[slot].devnum;
824 rdev = rcu_dereference(conf->mirrors[disk].replacement);
825 if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
826 test_bit(Unmerged, &rdev->flags) ||
827 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
828 rdev = rcu_dereference(conf->mirrors[disk].rdev);
830 test_bit(Faulty, &rdev->flags) ||
831 test_bit(Unmerged, &rdev->flags))
833 if (!test_bit(In_sync, &rdev->flags) &&
834 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
837 dev_sector = r10_bio->devs[slot].addr;
838 if (is_badblock(rdev, dev_sector, sectors,
839 &first_bad, &bad_sectors)) {
840 if (best_dist < MaxSector)
841 /* Already have a better slot */
843 if (first_bad <= dev_sector) {
844 /* Cannot read here. If this is the
845 * 'primary' device, then we must not read
846 * beyond 'bad_sectors' from another device.
848 bad_sectors -= (dev_sector - first_bad);
849 if (!do_balance && sectors > bad_sectors)
850 sectors = bad_sectors;
851 if (best_good_sectors > sectors)
852 best_good_sectors = sectors;
854 sector_t good_sectors =
855 first_bad - dev_sector;
856 if (good_sectors > best_good_sectors) {
857 best_good_sectors = good_sectors;
862 /* Must read from here */
867 best_good_sectors = sectors;
872 /* This optimisation is debatable, and completely destroys
873 * sequential read speed for 'far copies' arrays. So only
874 * keep it for 'near' arrays, and review those later.
876 if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
879 /* for far > 1 always use the lowest address */
880 if (geo->far_copies > 1)
881 new_distance = r10_bio->devs[slot].addr;
883 new_distance = abs(r10_bio->devs[slot].addr -
884 conf->mirrors[disk].head_position);
885 if (new_distance < best_dist) {
886 best_dist = new_distance;
891 if (slot >= conf->copies) {
897 atomic_inc(&rdev->nr_pending);
898 if (test_bit(Faulty, &rdev->flags)) {
899 /* Cannot risk returning a device that failed
900 * before we inc'ed nr_pending
902 rdev_dec_pending(rdev, conf->mddev);
905 r10_bio->read_slot = slot;
909 *max_sectors = best_good_sectors;
914 int md_raid10_congested(struct mddev *mddev, int bits)
916 struct r10conf *conf = mddev->private;
919 if ((bits & (1 << BDI_async_congested)) &&
920 conf->pending_count >= max_queued_requests)
925 (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
928 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
929 if (rdev && !test_bit(Faulty, &rdev->flags)) {
930 struct request_queue *q = bdev_get_queue(rdev->bdev);
932 ret |= bdi_congested(&q->backing_dev_info, bits);
938 EXPORT_SYMBOL_GPL(md_raid10_congested);
940 static int raid10_congested(void *data, int bits)
942 struct mddev *mddev = data;
944 return mddev_congested(mddev, bits) ||
945 md_raid10_congested(mddev, bits);
948 static void flush_pending_writes(struct r10conf *conf)
950 /* Any writes that have been queued but are awaiting
951 * bitmap updates get flushed here.
953 spin_lock_irq(&conf->device_lock);
955 if (conf->pending_bio_list.head) {
957 bio = bio_list_get(&conf->pending_bio_list);
958 conf->pending_count = 0;
959 spin_unlock_irq(&conf->device_lock);
960 /* flush any pending bitmap writes to disk
961 * before proceeding w/ I/O */
962 bitmap_unplug(conf->mddev->bitmap);
963 wake_up(&conf->wait_barrier);
965 while (bio) { /* submit pending writes */
966 struct bio *next = bio->bi_next;
968 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
969 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
973 generic_make_request(bio);
977 spin_unlock_irq(&conf->device_lock);
981 * Sometimes we need to suspend IO while we do something else,
982 * either some resync/recovery, or reconfigure the array.
983 * To do this we raise a 'barrier'.
984 * The 'barrier' is a counter that can be raised multiple times
985 * to count how many activities are happening which preclude
987 * We can only raise the barrier if there is no pending IO.
988 * i.e. if nr_pending == 0.
989 * We choose only to raise the barrier if no-one is waiting for the
990 * barrier to go down. This means that as soon as an IO request
991 * is ready, no other operations which require a barrier will start
992 * until the IO request has had a chance.
994 * So: regular IO calls 'wait_barrier'. When that returns there
995 * is no backgroup IO happening, It must arrange to call
996 * allow_barrier when it has finished its IO.
997 * backgroup IO calls must call raise_barrier. Once that returns
998 * there is no normal IO happeing. It must arrange to call
999 * lower_barrier when the particular background IO completes.
1002 static void raise_barrier(struct r10conf *conf, int force)
1004 BUG_ON(force && !conf->barrier);
1005 spin_lock_irq(&conf->resync_lock);
1007 /* Wait until no block IO is waiting (unless 'force') */
1008 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
1011 /* block any new IO from starting */
1014 /* Now wait for all pending IO to complete */
1015 wait_event_lock_irq(conf->wait_barrier,
1016 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
1019 spin_unlock_irq(&conf->resync_lock);
1022 static void lower_barrier(struct r10conf *conf)
1024 unsigned long flags;
1025 spin_lock_irqsave(&conf->resync_lock, flags);
1027 spin_unlock_irqrestore(&conf->resync_lock, flags);
1028 wake_up(&conf->wait_barrier);
1031 static void wait_barrier(struct r10conf *conf)
1033 spin_lock_irq(&conf->resync_lock);
1034 if (conf->barrier) {
1036 /* Wait for the barrier to drop.
1037 * However if there are already pending
1038 * requests (preventing the barrier from
1039 * rising completely), and the
1040 * pre-process bio queue isn't empty,
1041 * then don't wait, as we need to empty
1042 * that queue to get the nr_pending
1045 wait_event_lock_irq(conf->wait_barrier,
1047 (conf->nr_pending &&
1048 current->bio_list &&
1049 !bio_list_empty(current->bio_list)),
1054 spin_unlock_irq(&conf->resync_lock);
1057 static void allow_barrier(struct r10conf *conf)
1059 unsigned long flags;
1060 spin_lock_irqsave(&conf->resync_lock, flags);
1062 spin_unlock_irqrestore(&conf->resync_lock, flags);
1063 wake_up(&conf->wait_barrier);
1066 static void freeze_array(struct r10conf *conf, int extra)
1068 /* stop syncio and normal IO and wait for everything to
1070 * We increment barrier and nr_waiting, and then
1071 * wait until nr_pending match nr_queued+extra
1072 * This is called in the context of one normal IO request
1073 * that has failed. Thus any sync request that might be pending
1074 * will be blocked by nr_pending, and we need to wait for
1075 * pending IO requests to complete or be queued for re-try.
1076 * Thus the number queued (nr_queued) plus this request (extra)
1077 * must match the number of pending IOs (nr_pending) before
1080 spin_lock_irq(&conf->resync_lock);
1083 wait_event_lock_irq_cmd(conf->wait_barrier,
1084 conf->nr_pending == conf->nr_queued+extra,
1086 flush_pending_writes(conf));
1088 spin_unlock_irq(&conf->resync_lock);
1091 static void unfreeze_array(struct r10conf *conf)
1093 /* reverse the effect of the freeze */
1094 spin_lock_irq(&conf->resync_lock);
1097 wake_up(&conf->wait_barrier);
1098 spin_unlock_irq(&conf->resync_lock);
1101 static sector_t choose_data_offset(struct r10bio *r10_bio,
1102 struct md_rdev *rdev)
1104 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1105 test_bit(R10BIO_Previous, &r10_bio->state))
1106 return rdev->data_offset;
1108 return rdev->new_data_offset;
1111 struct raid10_plug_cb {
1112 struct blk_plug_cb cb;
1113 struct bio_list pending;
1117 static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1119 struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
1121 struct mddev *mddev = plug->cb.data;
1122 struct r10conf *conf = mddev->private;
1125 if (from_schedule || current->bio_list) {
1126 spin_lock_irq(&conf->device_lock);
1127 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1128 conf->pending_count += plug->pending_cnt;
1129 spin_unlock_irq(&conf->device_lock);
1130 wake_up(&conf->wait_barrier);
1131 md_wakeup_thread(mddev->thread);
1136 /* we aren't scheduling, so we can do the write-out directly. */
1137 bio = bio_list_get(&plug->pending);
1138 bitmap_unplug(mddev->bitmap);
1139 wake_up(&conf->wait_barrier);
1141 while (bio) { /* submit pending writes */
1142 struct bio *next = bio->bi_next;
1143 bio->bi_next = NULL;
1144 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
1145 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
1146 /* Just ignore it */
1149 generic_make_request(bio);
1155 static void make_request(struct mddev *mddev, struct bio * bio)
1157 struct r10conf *conf = mddev->private;
1158 struct r10bio *r10_bio;
1159 struct bio *read_bio;
1161 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1162 int chunk_sects = chunk_mask + 1;
1163 const int rw = bio_data_dir(bio);
1164 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
1165 const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
1166 const unsigned long do_discard = (bio->bi_rw
1167 & (REQ_DISCARD | REQ_SECURE));
1168 const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
1169 unsigned long flags;
1170 struct md_rdev *blocked_rdev;
1171 struct blk_plug_cb *cb;
1172 struct raid10_plug_cb *plug = NULL;
1173 int sectors_handled;
1177 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
1178 md_flush_request(mddev, bio);
1182 /* If this request crosses a chunk boundary, we need to
1183 * split it. This will only happen for 1 PAGE (or less) requests.
1185 if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
1187 && (conf->geo.near_copies < conf->geo.raid_disks
1188 || conf->prev.near_copies < conf->prev.raid_disks))) {
1189 struct bio_pair *bp;
1190 /* Sanity check -- queue functions should prevent this happening */
1191 if (bio_segments(bio) > 1)
1193 /* This is a one page bio that upper layers
1194 * refuse to split for us, so we need to split it.
1197 chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
1199 /* Each of these 'make_request' calls will call 'wait_barrier'.
1200 * If the first succeeds but the second blocks due to the resync
1201 * thread raising the barrier, we will deadlock because the
1202 * IO to the underlying device will be queued in generic_make_request
1203 * and will never complete, so will never reduce nr_pending.
1204 * So increment nr_waiting here so no new raise_barriers will
1205 * succeed, and so the second wait_barrier cannot block.
1207 spin_lock_irq(&conf->resync_lock);
1209 spin_unlock_irq(&conf->resync_lock);
1211 make_request(mddev, &bp->bio1);
1212 make_request(mddev, &bp->bio2);
1214 spin_lock_irq(&conf->resync_lock);
1216 wake_up(&conf->wait_barrier);
1217 spin_unlock_irq(&conf->resync_lock);
1219 bio_pair_release(bp);
1222 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
1223 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
1224 (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
1230 md_write_start(mddev, bio);
1233 * Register the new request and wait if the reconstruction
1234 * thread has put up a bar for new requests.
1235 * Continue immediately if no resync is active currently.
1239 sectors = bio_sectors(bio);
1240 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1241 bio->bi_sector < conf->reshape_progress &&
1242 bio->bi_sector + sectors > conf->reshape_progress) {
1243 /* IO spans the reshape position. Need to wait for
1246 allow_barrier(conf);
1247 wait_event(conf->wait_barrier,
1248 conf->reshape_progress <= bio->bi_sector ||
1249 conf->reshape_progress >= bio->bi_sector + sectors);
1252 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1253 bio_data_dir(bio) == WRITE &&
1254 (mddev->reshape_backwards
1255 ? (bio->bi_sector < conf->reshape_safe &&
1256 bio->bi_sector + sectors > conf->reshape_progress)
1257 : (bio->bi_sector + sectors > conf->reshape_safe &&
1258 bio->bi_sector < conf->reshape_progress))) {
1259 /* Need to update reshape_position in metadata */
1260 mddev->reshape_position = conf->reshape_progress;
1261 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1262 set_bit(MD_CHANGE_PENDING, &mddev->flags);
1263 md_wakeup_thread(mddev->thread);
1264 wait_event(mddev->sb_wait,
1265 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
1267 conf->reshape_safe = mddev->reshape_position;
1270 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1272 r10_bio->master_bio = bio;
1273 r10_bio->sectors = sectors;
1275 r10_bio->mddev = mddev;
1276 r10_bio->sector = bio->bi_sector;
1279 /* We might need to issue multiple reads to different
1280 * devices if there are bad blocks around, so we keep
1281 * track of the number of reads in bio->bi_phys_segments.
1282 * If this is 0, there is only one r10_bio and no locking
1283 * will be needed when the request completes. If it is
1284 * non-zero, then it is the number of not-completed requests.
1286 bio->bi_phys_segments = 0;
1287 clear_bit(BIO_SEG_VALID, &bio->bi_flags);
1291 * read balancing logic:
1293 struct md_rdev *rdev;
1297 rdev = read_balance(conf, r10_bio, &max_sectors);
1299 raid_end_bio_io(r10_bio);
1302 slot = r10_bio->read_slot;
1304 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1305 md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
1308 r10_bio->devs[slot].bio = read_bio;
1309 r10_bio->devs[slot].rdev = rdev;
1311 read_bio->bi_sector = r10_bio->devs[slot].addr +
1312 choose_data_offset(r10_bio, rdev);
1313 read_bio->bi_bdev = rdev->bdev;
1314 read_bio->bi_end_io = raid10_end_read_request;
1315 read_bio->bi_rw = READ | do_sync;
1316 read_bio->bi_private = r10_bio;
1318 if (max_sectors < r10_bio->sectors) {
1319 /* Could not read all from this device, so we will
1320 * need another r10_bio.
1322 sectors_handled = (r10_bio->sectors + max_sectors
1324 r10_bio->sectors = max_sectors;
1325 spin_lock_irq(&conf->device_lock);
1326 if (bio->bi_phys_segments == 0)
1327 bio->bi_phys_segments = 2;
1329 bio->bi_phys_segments++;
1330 spin_unlock(&conf->device_lock);
1331 /* Cannot call generic_make_request directly
1332 * as that will be queued in __generic_make_request
1333 * and subsequent mempool_alloc might block
1334 * waiting for it. so hand bio over to raid10d.
1336 reschedule_retry(r10_bio);
1338 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1340 r10_bio->master_bio = bio;
1341 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1343 r10_bio->mddev = mddev;
1344 r10_bio->sector = bio->bi_sector + sectors_handled;
1347 generic_make_request(read_bio);
1354 if (conf->pending_count >= max_queued_requests) {
1355 md_wakeup_thread(mddev->thread);
1356 wait_event(conf->wait_barrier,
1357 conf->pending_count < max_queued_requests);
1359 /* first select target devices under rcu_lock and
1360 * inc refcount on their rdev. Record them by setting
1362 * If there are known/acknowledged bad blocks on any device
1363 * on which we have seen a write error, we want to avoid
1364 * writing to those blocks. This potentially requires several
1365 * writes to write around the bad blocks. Each set of writes
1366 * gets its own r10_bio with a set of bios attached. The number
1367 * of r10_bios is recored in bio->bi_phys_segments just as with
1371 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1372 raid10_find_phys(conf, r10_bio);
1374 blocked_rdev = NULL;
1376 max_sectors = r10_bio->sectors;
1378 for (i = 0; i < conf->copies; i++) {
1379 int d = r10_bio->devs[i].devnum;
1380 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
1381 struct md_rdev *rrdev = rcu_dereference(
1382 conf->mirrors[d].replacement);
1385 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1386 atomic_inc(&rdev->nr_pending);
1387 blocked_rdev = rdev;
1390 if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
1391 atomic_inc(&rrdev->nr_pending);
1392 blocked_rdev = rrdev;
1395 if (rdev && (test_bit(Faulty, &rdev->flags)
1396 || test_bit(Unmerged, &rdev->flags)))
1398 if (rrdev && (test_bit(Faulty, &rrdev->flags)
1399 || test_bit(Unmerged, &rrdev->flags)))
1402 r10_bio->devs[i].bio = NULL;
1403 r10_bio->devs[i].repl_bio = NULL;
1405 if (!rdev && !rrdev) {
1406 set_bit(R10BIO_Degraded, &r10_bio->state);
1409 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1411 sector_t dev_sector = r10_bio->devs[i].addr;
1415 is_bad = is_badblock(rdev, dev_sector,
1417 &first_bad, &bad_sectors);
1419 /* Mustn't write here until the bad block
1422 atomic_inc(&rdev->nr_pending);
1423 set_bit(BlockedBadBlocks, &rdev->flags);
1424 blocked_rdev = rdev;
1427 if (is_bad && first_bad <= dev_sector) {
1428 /* Cannot write here at all */
1429 bad_sectors -= (dev_sector - first_bad);
1430 if (bad_sectors < max_sectors)
1431 /* Mustn't write more than bad_sectors
1432 * to other devices yet
1434 max_sectors = bad_sectors;
1435 /* We don't set R10BIO_Degraded as that
1436 * only applies if the disk is missing,
1437 * so it might be re-added, and we want to
1438 * know to recover this chunk.
1439 * In this case the device is here, and the
1440 * fact that this chunk is not in-sync is
1441 * recorded in the bad block log.
1446 int good_sectors = first_bad - dev_sector;
1447 if (good_sectors < max_sectors)
1448 max_sectors = good_sectors;
1452 r10_bio->devs[i].bio = bio;
1453 atomic_inc(&rdev->nr_pending);
1456 r10_bio->devs[i].repl_bio = bio;
1457 atomic_inc(&rrdev->nr_pending);
1462 if (unlikely(blocked_rdev)) {
1463 /* Have to wait for this device to get unblocked, then retry */
1467 for (j = 0; j < i; j++) {
1468 if (r10_bio->devs[j].bio) {
1469 d = r10_bio->devs[j].devnum;
1470 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1472 if (r10_bio->devs[j].repl_bio) {
1473 struct md_rdev *rdev;
1474 d = r10_bio->devs[j].devnum;
1475 rdev = conf->mirrors[d].replacement;
1477 /* Race with remove_disk */
1479 rdev = conf->mirrors[d].rdev;
1481 rdev_dec_pending(rdev, mddev);
1484 allow_barrier(conf);
1485 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1490 if (max_sectors < r10_bio->sectors) {
1491 /* We are splitting this into multiple parts, so
1492 * we need to prepare for allocating another r10_bio.
1494 r10_bio->sectors = max_sectors;
1495 spin_lock_irq(&conf->device_lock);
1496 if (bio->bi_phys_segments == 0)
1497 bio->bi_phys_segments = 2;
1499 bio->bi_phys_segments++;
1500 spin_unlock_irq(&conf->device_lock);
1502 sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
1504 atomic_set(&r10_bio->remaining, 1);
1505 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1507 for (i = 0; i < conf->copies; i++) {
1509 int d = r10_bio->devs[i].devnum;
1510 if (r10_bio->devs[i].bio) {
1511 struct md_rdev *rdev = conf->mirrors[d].rdev;
1512 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1513 md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
1515 r10_bio->devs[i].bio = mbio;
1517 mbio->bi_sector = (r10_bio->devs[i].addr+
1518 choose_data_offset(r10_bio,
1520 mbio->bi_bdev = rdev->bdev;
1521 mbio->bi_end_io = raid10_end_write_request;
1523 WRITE | do_sync | do_fua | do_discard | do_same;
1524 mbio->bi_private = r10_bio;
1526 atomic_inc(&r10_bio->remaining);
1528 cb = blk_check_plugged(raid10_unplug, mddev,
1531 plug = container_of(cb, struct raid10_plug_cb,
1535 spin_lock_irqsave(&conf->device_lock, flags);
1537 bio_list_add(&plug->pending, mbio);
1538 plug->pending_cnt++;
1540 bio_list_add(&conf->pending_bio_list, mbio);
1541 conf->pending_count++;
1543 spin_unlock_irqrestore(&conf->device_lock, flags);
1545 md_wakeup_thread(mddev->thread);
1548 if (r10_bio->devs[i].repl_bio) {
1549 struct md_rdev *rdev = conf->mirrors[d].replacement;
1551 /* Replacement just got moved to main 'rdev' */
1553 rdev = conf->mirrors[d].rdev;
1555 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1556 md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
1558 r10_bio->devs[i].repl_bio = mbio;
1560 mbio->bi_sector = (r10_bio->devs[i].addr +
1563 mbio->bi_bdev = rdev->bdev;
1564 mbio->bi_end_io = raid10_end_write_request;
1566 WRITE | do_sync | do_fua | do_discard | do_same;
1567 mbio->bi_private = r10_bio;
1569 atomic_inc(&r10_bio->remaining);
1570 spin_lock_irqsave(&conf->device_lock, flags);
1571 bio_list_add(&conf->pending_bio_list, mbio);
1572 conf->pending_count++;
1573 spin_unlock_irqrestore(&conf->device_lock, flags);
1574 if (!mddev_check_plugged(mddev))
1575 md_wakeup_thread(mddev->thread);
1579 /* Don't remove the bias on 'remaining' (one_write_done) until
1580 * after checking if we need to go around again.
1583 if (sectors_handled < bio_sectors(bio)) {
1584 one_write_done(r10_bio);
1585 /* We need another r10_bio. It has already been counted
1586 * in bio->bi_phys_segments.
1588 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1590 r10_bio->master_bio = bio;
1591 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1593 r10_bio->mddev = mddev;
1594 r10_bio->sector = bio->bi_sector + sectors_handled;
1598 one_write_done(r10_bio);
1600 /* In case raid10d snuck in to freeze_array */
1601 wake_up(&conf->wait_barrier);
1604 static void status(struct seq_file *seq, struct mddev *mddev)
1606 struct r10conf *conf = mddev->private;
1609 if (conf->geo.near_copies < conf->geo.raid_disks)
1610 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1611 if (conf->geo.near_copies > 1)
1612 seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1613 if (conf->geo.far_copies > 1) {
1614 if (conf->geo.far_offset)
1615 seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1617 seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1619 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1620 conf->geo.raid_disks - mddev->degraded);
1621 for (i = 0; i < conf->geo.raid_disks; i++)
1622 seq_printf(seq, "%s",
1623 conf->mirrors[i].rdev &&
1624 test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
1625 seq_printf(seq, "]");
1628 /* check if there are enough drives for
1629 * every block to appear on atleast one.
1630 * Don't consider the device numbered 'ignore'
1631 * as we might be about to remove it.
1633 static int _enough(struct r10conf *conf, int previous, int ignore)
1638 disks = conf->prev.raid_disks;
1639 ncopies = conf->prev.near_copies;
1641 disks = conf->geo.raid_disks;
1642 ncopies = conf->geo.near_copies;
1646 int n = conf->copies;
1650 if (conf->mirrors[this].rdev &&
1653 this = (this+1) % disks;
1657 first = (first + ncopies) % disks;
1658 } while (first != 0);
1662 static int enough(struct r10conf *conf, int ignore)
1664 /* when calling 'enough', both 'prev' and 'geo' must
1666 * This is ensured if ->reconfig_mutex or ->device_lock
1669 return _enough(conf, 0, ignore) &&
1670 _enough(conf, 1, ignore);
1673 static void error(struct mddev *mddev, struct md_rdev *rdev)
1675 char b[BDEVNAME_SIZE];
1676 struct r10conf *conf = mddev->private;
1677 unsigned long flags;
1680 * If it is not operational, then we have already marked it as dead
1681 * else if it is the last working disks, ignore the error, let the
1682 * next level up know.
1683 * else mark the drive as failed
1685 spin_lock_irqsave(&conf->device_lock, flags);
1686 if (test_bit(In_sync, &rdev->flags)
1687 && !enough(conf, rdev->raid_disk)) {
1689 * Don't fail the drive, just return an IO error.
1691 spin_unlock_irqrestore(&conf->device_lock, flags);
1694 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1697 * if recovery is running, make sure it aborts.
1699 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1701 set_bit(Blocked, &rdev->flags);
1702 set_bit(Faulty, &rdev->flags);
1703 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1704 spin_unlock_irqrestore(&conf->device_lock, flags);
1706 "md/raid10:%s: Disk failure on %s, disabling device.\n"
1707 "md/raid10:%s: Operation continuing on %d devices.\n",
1708 mdname(mddev), bdevname(rdev->bdev, b),
1709 mdname(mddev), conf->geo.raid_disks - mddev->degraded);
1712 static void print_conf(struct r10conf *conf)
1715 struct raid10_info *tmp;
1717 printk(KERN_DEBUG "RAID10 conf printout:\n");
1719 printk(KERN_DEBUG "(!conf)\n");
1722 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
1723 conf->geo.raid_disks);
1725 for (i = 0; i < conf->geo.raid_disks; i++) {
1726 char b[BDEVNAME_SIZE];
1727 tmp = conf->mirrors + i;
1729 printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1730 i, !test_bit(In_sync, &tmp->rdev->flags),
1731 !test_bit(Faulty, &tmp->rdev->flags),
1732 bdevname(tmp->rdev->bdev,b));
1736 static void close_sync(struct r10conf *conf)
1739 allow_barrier(conf);
1741 mempool_destroy(conf->r10buf_pool);
1742 conf->r10buf_pool = NULL;
1745 static int raid10_spare_active(struct mddev *mddev)
1748 struct r10conf *conf = mddev->private;
1749 struct raid10_info *tmp;
1751 unsigned long flags;
1754 * Find all non-in_sync disks within the RAID10 configuration
1755 * and mark them in_sync
1757 for (i = 0; i < conf->geo.raid_disks; i++) {
1758 tmp = conf->mirrors + i;
1759 if (tmp->replacement
1760 && tmp->replacement->recovery_offset == MaxSector
1761 && !test_bit(Faulty, &tmp->replacement->flags)
1762 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
1763 /* Replacement has just become active */
1765 || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
1768 /* Replaced device not technically faulty,
1769 * but we need to be sure it gets removed
1770 * and never re-added.
1772 set_bit(Faulty, &tmp->rdev->flags);
1773 sysfs_notify_dirent_safe(
1774 tmp->rdev->sysfs_state);
1776 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
1777 } else if (tmp->rdev
1778 && !test_bit(Faulty, &tmp->rdev->flags)
1779 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1781 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
1784 spin_lock_irqsave(&conf->device_lock, flags);
1785 mddev->degraded -= count;
1786 spin_unlock_irqrestore(&conf->device_lock, flags);
1793 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1795 struct r10conf *conf = mddev->private;
1799 int last = conf->geo.raid_disks - 1;
1800 struct request_queue *q = bdev_get_queue(rdev->bdev);
1802 if (mddev->recovery_cp < MaxSector)
1803 /* only hot-add to in-sync arrays, as recovery is
1804 * very different from resync
1807 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
1810 if (rdev->raid_disk >= 0)
1811 first = last = rdev->raid_disk;
1813 if (q->merge_bvec_fn) {
1814 set_bit(Unmerged, &rdev->flags);
1815 mddev->merge_check_needed = 1;
1818 if (rdev->saved_raid_disk >= first &&
1819 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1820 mirror = rdev->saved_raid_disk;
1823 for ( ; mirror <= last ; mirror++) {
1824 struct raid10_info *p = &conf->mirrors[mirror];
1825 if (p->recovery_disabled == mddev->recovery_disabled)
1828 if (!test_bit(WantReplacement, &p->rdev->flags) ||
1829 p->replacement != NULL)
1831 clear_bit(In_sync, &rdev->flags);
1832 set_bit(Replacement, &rdev->flags);
1833 rdev->raid_disk = mirror;
1836 disk_stack_limits(mddev->gendisk, rdev->bdev,
1837 rdev->data_offset << 9);
1839 rcu_assign_pointer(p->replacement, rdev);
1844 disk_stack_limits(mddev->gendisk, rdev->bdev,
1845 rdev->data_offset << 9);
1847 p->head_position = 0;
1848 p->recovery_disabled = mddev->recovery_disabled - 1;
1849 rdev->raid_disk = mirror;
1851 if (rdev->saved_raid_disk != mirror)
1853 rcu_assign_pointer(p->rdev, rdev);
1856 if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
1857 /* Some requests might not have seen this new
1858 * merge_bvec_fn. We must wait for them to complete
1859 * before merging the device fully.
1860 * First we make sure any code which has tested
1861 * our function has submitted the request, then
1862 * we wait for all outstanding requests to complete.
1864 synchronize_sched();
1865 freeze_array(conf, 0);
1866 unfreeze_array(conf);
1867 clear_bit(Unmerged, &rdev->flags);
1869 md_integrity_add_rdev(rdev, mddev);
1870 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1871 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1877 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1879 struct r10conf *conf = mddev->private;
1881 int number = rdev->raid_disk;
1882 struct md_rdev **rdevp;
1883 struct raid10_info *p = conf->mirrors + number;
1886 if (rdev == p->rdev)
1888 else if (rdev == p->replacement)
1889 rdevp = &p->replacement;
1893 if (test_bit(In_sync, &rdev->flags) ||
1894 atomic_read(&rdev->nr_pending)) {
1898 /* Only remove faulty devices if recovery
1901 if (!test_bit(Faulty, &rdev->flags) &&
1902 mddev->recovery_disabled != p->recovery_disabled &&
1903 (!p->replacement || p->replacement == rdev) &&
1904 number < conf->geo.raid_disks &&
1911 if (atomic_read(&rdev->nr_pending)) {
1912 /* lost the race, try later */
1916 } else if (p->replacement) {
1917 /* We must have just cleared 'rdev' */
1918 p->rdev = p->replacement;
1919 clear_bit(Replacement, &p->replacement->flags);
1920 smp_mb(); /* Make sure other CPUs may see both as identical
1921 * but will never see neither -- if they are careful.
1923 p->replacement = NULL;
1924 clear_bit(WantReplacement, &rdev->flags);
1926 /* We might have just remove the Replacement as faulty
1927 * Clear the flag just in case
1929 clear_bit(WantReplacement, &rdev->flags);
1931 err = md_integrity_register(mddev);
1940 static void end_sync_read(struct bio *bio, int error)
1942 struct r10bio *r10_bio = bio->bi_private;
1943 struct r10conf *conf = r10_bio->mddev->private;
1946 if (bio == r10_bio->master_bio) {
1947 /* this is a reshape read */
1948 d = r10_bio->read_slot; /* really the read dev */
1950 d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
1952 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1953 set_bit(R10BIO_Uptodate, &r10_bio->state);
1955 /* The write handler will notice the lack of
1956 * R10BIO_Uptodate and record any errors etc
1958 atomic_add(r10_bio->sectors,
1959 &conf->mirrors[d].rdev->corrected_errors);
1961 /* for reconstruct, we always reschedule after a read.
1962 * for resync, only after all reads
1964 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1965 if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1966 atomic_dec_and_test(&r10_bio->remaining)) {
1967 /* we have read all the blocks,
1968 * do the comparison in process context in raid10d
1970 reschedule_retry(r10_bio);
1974 static void end_sync_request(struct r10bio *r10_bio)
1976 struct mddev *mddev = r10_bio->mddev;
1978 while (atomic_dec_and_test(&r10_bio->remaining)) {
1979 if (r10_bio->master_bio == NULL) {
1980 /* the primary of several recovery bios */
1981 sector_t s = r10_bio->sectors;
1982 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1983 test_bit(R10BIO_WriteError, &r10_bio->state))
1984 reschedule_retry(r10_bio);
1987 md_done_sync(mddev, s, 1);
1990 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
1991 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1992 test_bit(R10BIO_WriteError, &r10_bio->state))
1993 reschedule_retry(r10_bio);
2001 static void end_sync_write(struct bio *bio, int error)
2003 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
2004 struct r10bio *r10_bio = bio->bi_private;
2005 struct mddev *mddev = r10_bio->mddev;
2006 struct r10conf *conf = mddev->private;
2012 struct md_rdev *rdev = NULL;
2014 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
2016 rdev = conf->mirrors[d].replacement;
2018 rdev = conf->mirrors[d].rdev;
2022 md_error(mddev, rdev);
2024 set_bit(WriteErrorSeen, &rdev->flags);
2025 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2026 set_bit(MD_RECOVERY_NEEDED,
2027 &rdev->mddev->recovery);
2028 set_bit(R10BIO_WriteError, &r10_bio->state);
2030 } else if (is_badblock(rdev,
2031 r10_bio->devs[slot].addr,
2033 &first_bad, &bad_sectors))
2034 set_bit(R10BIO_MadeGood, &r10_bio->state);
2036 rdev_dec_pending(rdev, mddev);
2038 end_sync_request(r10_bio);
2042 * Note: sync and recover and handled very differently for raid10
2043 * This code is for resync.
2044 * For resync, we read through virtual addresses and read all blocks.
2045 * If there is any error, we schedule a write. The lowest numbered
2046 * drive is authoritative.
2047 * However requests come for physical address, so we need to map.
2048 * For every physical address there are raid_disks/copies virtual addresses,
2049 * which is always are least one, but is not necessarly an integer.
2050 * This means that a physical address can span multiple chunks, so we may
2051 * have to submit multiple io requests for a single sync request.
2054 * We check if all blocks are in-sync and only write to blocks that
2057 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2059 struct r10conf *conf = mddev->private;
2061 struct bio *tbio, *fbio;
2064 atomic_set(&r10_bio->remaining, 1);
2066 /* find the first device with a block */
2067 for (i=0; i<conf->copies; i++)
2068 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
2071 if (i == conf->copies)
2075 fbio = r10_bio->devs[i].bio;
2077 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
2078 /* now find blocks with errors */
2079 for (i=0 ; i < conf->copies ; i++) {
2082 tbio = r10_bio->devs[i].bio;
2084 if (tbio->bi_end_io != end_sync_read)
2088 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
2089 /* We know that the bi_io_vec layout is the same for
2090 * both 'first' and 'i', so we just compare them.
2091 * All vec entries are PAGE_SIZE;
2093 for (j = 0; j < vcnt; j++)
2094 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
2095 page_address(tbio->bi_io_vec[j].bv_page),
2096 fbio->bi_io_vec[j].bv_len))
2100 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
2101 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2102 /* Don't fix anything. */
2105 /* Ok, we need to write this bio, either to correct an
2106 * inconsistency or to correct an unreadable block.
2107 * First we need to fixup bv_offset, bv_len and
2108 * bi_vecs, as the read request might have corrupted these
2112 tbio->bi_vcnt = vcnt;
2113 tbio->bi_size = r10_bio->sectors << 9;
2114 tbio->bi_rw = WRITE;
2115 tbio->bi_private = r10_bio;
2116 tbio->bi_sector = r10_bio->devs[i].addr;
2118 for (j=0; j < vcnt ; j++) {
2119 tbio->bi_io_vec[j].bv_offset = 0;
2120 tbio->bi_io_vec[j].bv_len = PAGE_SIZE;
2122 memcpy(page_address(tbio->bi_io_vec[j].bv_page),
2123 page_address(fbio->bi_io_vec[j].bv_page),
2126 tbio->bi_end_io = end_sync_write;
2128 d = r10_bio->devs[i].devnum;
2129 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2130 atomic_inc(&r10_bio->remaining);
2131 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2133 tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
2134 tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
2135 generic_make_request(tbio);
2138 /* Now write out to any replacement devices
2141 for (i = 0; i < conf->copies; i++) {
2144 tbio = r10_bio->devs[i].repl_bio;
2145 if (!tbio || !tbio->bi_end_io)
2147 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2148 && r10_bio->devs[i].bio != fbio)
2149 for (j = 0; j < vcnt; j++)
2150 memcpy(page_address(tbio->bi_io_vec[j].bv_page),
2151 page_address(fbio->bi_io_vec[j].bv_page),
2153 d = r10_bio->devs[i].devnum;
2154 atomic_inc(&r10_bio->remaining);
2155 md_sync_acct(conf->mirrors[d].replacement->bdev,
2157 generic_make_request(tbio);
2161 if (atomic_dec_and_test(&r10_bio->remaining)) {
2162 md_done_sync(mddev, r10_bio->sectors, 1);
2168 * Now for the recovery code.
2169 * Recovery happens across physical sectors.
2170 * We recover all non-is_sync drives by finding the virtual address of
2171 * each, and then choose a working drive that also has that virt address.
2172 * There is a separate r10_bio for each non-in_sync drive.
2173 * Only the first two slots are in use. The first for reading,
2174 * The second for writing.
2177 static void fix_recovery_read_error(struct r10bio *r10_bio)
2179 /* We got a read error during recovery.
2180 * We repeat the read in smaller page-sized sections.
2181 * If a read succeeds, write it to the new device or record
2182 * a bad block if we cannot.
2183 * If a read fails, record a bad block on both old and
2186 struct mddev *mddev = r10_bio->mddev;
2187 struct r10conf *conf = mddev->private;
2188 struct bio *bio = r10_bio->devs[0].bio;
2190 int sectors = r10_bio->sectors;
2192 int dr = r10_bio->devs[0].devnum;
2193 int dw = r10_bio->devs[1].devnum;
2197 struct md_rdev *rdev;
2201 if (s > (PAGE_SIZE>>9))
2204 rdev = conf->mirrors[dr].rdev;
2205 addr = r10_bio->devs[0].addr + sect,
2206 ok = sync_page_io(rdev,
2209 bio->bi_io_vec[idx].bv_page,
2212 rdev = conf->mirrors[dw].rdev;
2213 addr = r10_bio->devs[1].addr + sect;
2214 ok = sync_page_io(rdev,
2217 bio->bi_io_vec[idx].bv_page,
2220 set_bit(WriteErrorSeen, &rdev->flags);
2221 if (!test_and_set_bit(WantReplacement,
2223 set_bit(MD_RECOVERY_NEEDED,
2224 &rdev->mddev->recovery);
2228 /* We don't worry if we cannot set a bad block -
2229 * it really is bad so there is no loss in not
2232 rdev_set_badblocks(rdev, addr, s, 0);
2234 if (rdev != conf->mirrors[dw].rdev) {
2235 /* need bad block on destination too */
2236 struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2237 addr = r10_bio->devs[1].addr + sect;
2238 ok = rdev_set_badblocks(rdev2, addr, s, 0);
2240 /* just abort the recovery */
2242 "md/raid10:%s: recovery aborted"
2243 " due to read error\n",
2246 conf->mirrors[dw].recovery_disabled
2247 = mddev->recovery_disabled;
2248 set_bit(MD_RECOVERY_INTR,
2261 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2263 struct r10conf *conf = mddev->private;
2265 struct bio *wbio, *wbio2;
2267 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2268 fix_recovery_read_error(r10_bio);
2269 end_sync_request(r10_bio);
2274 * share the pages with the first bio
2275 * and submit the write request
2277 d = r10_bio->devs[1].devnum;
2278 wbio = r10_bio->devs[1].bio;
2279 wbio2 = r10_bio->devs[1].repl_bio;
2280 if (wbio->bi_end_io) {
2281 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2282 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2283 generic_make_request(wbio);
2285 if (wbio2 && wbio2->bi_end_io) {
2286 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2287 md_sync_acct(conf->mirrors[d].replacement->bdev,
2288 bio_sectors(wbio2));
2289 generic_make_request(wbio2);
2295 * Used by fix_read_error() to decay the per rdev read_errors.
2296 * We halve the read error count for every hour that has elapsed
2297 * since the last recorded read error.
2300 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
2302 struct timespec cur_time_mon;
2303 unsigned long hours_since_last;
2304 unsigned int read_errors = atomic_read(&rdev->read_errors);
2306 ktime_get_ts(&cur_time_mon);
2308 if (rdev->last_read_error.tv_sec == 0 &&
2309 rdev->last_read_error.tv_nsec == 0) {
2310 /* first time we've seen a read error */
2311 rdev->last_read_error = cur_time_mon;
2315 hours_since_last = (cur_time_mon.tv_sec -
2316 rdev->last_read_error.tv_sec) / 3600;
2318 rdev->last_read_error = cur_time_mon;
2321 * if hours_since_last is > the number of bits in read_errors
2322 * just set read errors to 0. We do this to avoid
2323 * overflowing the shift of read_errors by hours_since_last.
2325 if (hours_since_last >= 8 * sizeof(read_errors))
2326 atomic_set(&rdev->read_errors, 0);
2328 atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
2331 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2332 int sectors, struct page *page, int rw)
2337 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
2338 && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
2340 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
2344 set_bit(WriteErrorSeen, &rdev->flags);
2345 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2346 set_bit(MD_RECOVERY_NEEDED,
2347 &rdev->mddev->recovery);
2349 /* need to record an error - either for the block or the device */
2350 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2351 md_error(rdev->mddev, rdev);
2356 * This is a kernel thread which:
2358 * 1. Retries failed read operations on working mirrors.
2359 * 2. Updates the raid superblock when problems encounter.
2360 * 3. Performs writes following reads for array synchronising.
2363 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2365 int sect = 0; /* Offset from r10_bio->sector */
2366 int sectors = r10_bio->sectors;
2367 struct md_rdev*rdev;
2368 int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
2369 int d = r10_bio->devs[r10_bio->read_slot].devnum;
2371 /* still own a reference to this rdev, so it cannot
2372 * have been cleared recently.
2374 rdev = conf->mirrors[d].rdev;
2376 if (test_bit(Faulty, &rdev->flags))
2377 /* drive has already been failed, just ignore any
2378 more fix_read_error() attempts */
2381 check_decay_read_errors(mddev, rdev);
2382 atomic_inc(&rdev->read_errors);
2383 if (atomic_read(&rdev->read_errors) > max_read_errors) {
2384 char b[BDEVNAME_SIZE];
2385 bdevname(rdev->bdev, b);
2388 "md/raid10:%s: %s: Raid device exceeded "
2389 "read_error threshold [cur %d:max %d]\n",
2391 atomic_read(&rdev->read_errors), max_read_errors);
2393 "md/raid10:%s: %s: Failing raid device\n",
2395 md_error(mddev, conf->mirrors[d].rdev);
2396 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
2402 int sl = r10_bio->read_slot;
2406 if (s > (PAGE_SIZE>>9))
2414 d = r10_bio->devs[sl].devnum;
2415 rdev = rcu_dereference(conf->mirrors[d].rdev);
2417 !test_bit(Unmerged, &rdev->flags) &&
2418 test_bit(In_sync, &rdev->flags) &&
2419 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2420 &first_bad, &bad_sectors) == 0) {
2421 atomic_inc(&rdev->nr_pending);
2423 success = sync_page_io(rdev,
2424 r10_bio->devs[sl].addr +
2427 conf->tmppage, READ, false);
2428 rdev_dec_pending(rdev, mddev);
2434 if (sl == conf->copies)
2436 } while (!success && sl != r10_bio->read_slot);
2440 /* Cannot read from anywhere, just mark the block
2441 * as bad on the first device to discourage future
2444 int dn = r10_bio->devs[r10_bio->read_slot].devnum;
2445 rdev = conf->mirrors[dn].rdev;
2447 if (!rdev_set_badblocks(
2449 r10_bio->devs[r10_bio->read_slot].addr
2452 md_error(mddev, rdev);
2453 r10_bio->devs[r10_bio->read_slot].bio
2460 /* write it back and re-read */
2462 while (sl != r10_bio->read_slot) {
2463 char b[BDEVNAME_SIZE];
2468 d = r10_bio->devs[sl].devnum;
2469 rdev = rcu_dereference(conf->mirrors[d].rdev);
2471 test_bit(Unmerged, &rdev->flags) ||
2472 !test_bit(In_sync, &rdev->flags))
2475 atomic_inc(&rdev->nr_pending);
2477 if (r10_sync_page_io(rdev,
2478 r10_bio->devs[sl].addr +
2480 s, conf->tmppage, WRITE)
2482 /* Well, this device is dead */
2484 "md/raid10:%s: read correction "
2486 " (%d sectors at %llu on %s)\n",
2488 (unsigned long long)(
2490 choose_data_offset(r10_bio,
2492 bdevname(rdev->bdev, b));
2493 printk(KERN_NOTICE "md/raid10:%s: %s: failing "
2496 bdevname(rdev->bdev, b));
2498 rdev_dec_pending(rdev, mddev);
2502 while (sl != r10_bio->read_slot) {
2503 char b[BDEVNAME_SIZE];
2508 d = r10_bio->devs[sl].devnum;
2509 rdev = rcu_dereference(conf->mirrors[d].rdev);
2511 !test_bit(In_sync, &rdev->flags))
2514 atomic_inc(&rdev->nr_pending);
2516 switch (r10_sync_page_io(rdev,
2517 r10_bio->devs[sl].addr +
2522 /* Well, this device is dead */
2524 "md/raid10:%s: unable to read back "
2526 " (%d sectors at %llu on %s)\n",
2528 (unsigned long long)(
2530 choose_data_offset(r10_bio, rdev)),
2531 bdevname(rdev->bdev, b));
2532 printk(KERN_NOTICE "md/raid10:%s: %s: failing "
2535 bdevname(rdev->bdev, b));
2539 "md/raid10:%s: read error corrected"
2540 " (%d sectors at %llu on %s)\n",
2542 (unsigned long long)(
2544 choose_data_offset(r10_bio, rdev)),
2545 bdevname(rdev->bdev, b));
2546 atomic_add(s, &rdev->corrected_errors);
2549 rdev_dec_pending(rdev, mddev);
2559 static int narrow_write_error(struct r10bio *r10_bio, int i)
2561 struct bio *bio = r10_bio->master_bio;
2562 struct mddev *mddev = r10_bio->mddev;
2563 struct r10conf *conf = mddev->private;
2564 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2565 /* bio has the data to be written to slot 'i' where
2566 * we just recently had a write error.
2567 * We repeatedly clone the bio and trim down to one block,
2568 * then try the write. Where the write fails we record
2570 * It is conceivable that the bio doesn't exactly align with
2571 * blocks. We must handle this.
2573 * We currently own a reference to the rdev.
2579 int sect_to_write = r10_bio->sectors;
2582 if (rdev->badblocks.shift < 0)
2585 block_sectors = 1 << rdev->badblocks.shift;
2586 sector = r10_bio->sector;
2587 sectors = ((r10_bio->sector + block_sectors)
2588 & ~(sector_t)(block_sectors - 1))
2591 while (sect_to_write) {
2593 if (sectors > sect_to_write)
2594 sectors = sect_to_write;
2595 /* Write at 'sector' for 'sectors' */
2596 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
2597 md_trim_bio(wbio, sector - bio->bi_sector, sectors);
2598 wbio->bi_sector = (r10_bio->devs[i].addr+
2599 choose_data_offset(r10_bio, rdev) +
2600 (sector - r10_bio->sector));
2601 wbio->bi_bdev = rdev->bdev;
2602 if (submit_bio_wait(WRITE, wbio) == 0)
2604 ok = rdev_set_badblocks(rdev, sector,
2609 sect_to_write -= sectors;
2611 sectors = block_sectors;
2616 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2618 int slot = r10_bio->read_slot;
2620 struct r10conf *conf = mddev->private;
2621 struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2622 char b[BDEVNAME_SIZE];
2623 unsigned long do_sync;
2626 /* we got a read error. Maybe the drive is bad. Maybe just
2627 * the block and we can fix it.
2628 * We freeze all other IO, and try reading the block from
2629 * other devices. When we find one, we re-write
2630 * and check it that fixes the read error.
2631 * This is all done synchronously while the array is
2634 bio = r10_bio->devs[slot].bio;
2635 bdevname(bio->bi_bdev, b);
2637 r10_bio->devs[slot].bio = NULL;
2639 if (mddev->ro == 0) {
2640 freeze_array(conf, 1);
2641 fix_read_error(conf, mddev, r10_bio);
2642 unfreeze_array(conf);
2644 r10_bio->devs[slot].bio = IO_BLOCKED;
2646 rdev_dec_pending(rdev, mddev);
2649 rdev = read_balance(conf, r10_bio, &max_sectors);
2651 printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
2652 " read error for block %llu\n",
2654 (unsigned long long)r10_bio->sector);
2655 raid_end_bio_io(r10_bio);
2659 do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
2660 slot = r10_bio->read_slot;
2663 "md/raid10:%s: %s: redirecting "
2664 "sector %llu to another mirror\n",
2666 bdevname(rdev->bdev, b),
2667 (unsigned long long)r10_bio->sector);
2668 bio = bio_clone_mddev(r10_bio->master_bio,
2671 r10_bio->sector - bio->bi_sector,
2673 r10_bio->devs[slot].bio = bio;
2674 r10_bio->devs[slot].rdev = rdev;
2675 bio->bi_sector = r10_bio->devs[slot].addr
2676 + choose_data_offset(r10_bio, rdev);
2677 bio->bi_bdev = rdev->bdev;
2678 bio->bi_rw = READ | do_sync;
2679 bio->bi_private = r10_bio;
2680 bio->bi_end_io = raid10_end_read_request;
2681 if (max_sectors < r10_bio->sectors) {
2682 /* Drat - have to split this up more */
2683 struct bio *mbio = r10_bio->master_bio;
2684 int sectors_handled =
2685 r10_bio->sector + max_sectors
2687 r10_bio->sectors = max_sectors;
2688 spin_lock_irq(&conf->device_lock);
2689 if (mbio->bi_phys_segments == 0)
2690 mbio->bi_phys_segments = 2;
2692 mbio->bi_phys_segments++;
2693 spin_unlock_irq(&conf->device_lock);
2694 generic_make_request(bio);
2696 r10_bio = mempool_alloc(conf->r10bio_pool,
2698 r10_bio->master_bio = mbio;
2699 r10_bio->sectors = bio_sectors(mbio) - sectors_handled;
2701 set_bit(R10BIO_ReadError,
2703 r10_bio->mddev = mddev;
2704 r10_bio->sector = mbio->bi_sector
2709 generic_make_request(bio);
2712 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2714 /* Some sort of write request has finished and it
2715 * succeeded in writing where we thought there was a
2716 * bad block. So forget the bad block.
2717 * Or possibly if failed and we need to record
2721 struct md_rdev *rdev;
2723 if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2724 test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2725 for (m = 0; m < conf->copies; m++) {
2726 int dev = r10_bio->devs[m].devnum;
2727 rdev = conf->mirrors[dev].rdev;
2728 if (r10_bio->devs[m].bio == NULL)
2730 if (test_bit(BIO_UPTODATE,
2731 &r10_bio->devs[m].bio->bi_flags)) {
2732 rdev_clear_badblocks(
2734 r10_bio->devs[m].addr,
2735 r10_bio->sectors, 0);
2737 if (!rdev_set_badblocks(
2739 r10_bio->devs[m].addr,
2740 r10_bio->sectors, 0))
2741 md_error(conf->mddev, rdev);
2743 rdev = conf->mirrors[dev].replacement;
2744 if (r10_bio->devs[m].repl_bio == NULL)
2746 if (test_bit(BIO_UPTODATE,
2747 &r10_bio->devs[m].repl_bio->bi_flags)) {
2748 rdev_clear_badblocks(
2750 r10_bio->devs[m].addr,
2751 r10_bio->sectors, 0);
2753 if (!rdev_set_badblocks(
2755 r10_bio->devs[m].addr,
2756 r10_bio->sectors, 0))
2757 md_error(conf->mddev, rdev);
2762 for (m = 0; m < conf->copies; m++) {
2763 int dev = r10_bio->devs[m].devnum;
2764 struct bio *bio = r10_bio->devs[m].bio;
2765 rdev = conf->mirrors[dev].rdev;
2766 if (bio == IO_MADE_GOOD) {
2767 rdev_clear_badblocks(
2769 r10_bio->devs[m].addr,
2770 r10_bio->sectors, 0);
2771 rdev_dec_pending(rdev, conf->mddev);
2772 } else if (bio != NULL &&
2773 !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
2774 if (!narrow_write_error(r10_bio, m)) {
2775 md_error(conf->mddev, rdev);
2776 set_bit(R10BIO_Degraded,
2779 rdev_dec_pending(rdev, conf->mddev);
2781 bio = r10_bio->devs[m].repl_bio;
2782 rdev = conf->mirrors[dev].replacement;
2783 if (rdev && bio == IO_MADE_GOOD) {
2784 rdev_clear_badblocks(
2786 r10_bio->devs[m].addr,
2787 r10_bio->sectors, 0);
2788 rdev_dec_pending(rdev, conf->mddev);
2791 if (test_bit(R10BIO_WriteError,
2793 close_write(r10_bio);
2794 raid_end_bio_io(r10_bio);
2798 static void raid10d(struct md_thread *thread)
2800 struct mddev *mddev = thread->mddev;
2801 struct r10bio *r10_bio;
2802 unsigned long flags;
2803 struct r10conf *conf = mddev->private;
2804 struct list_head *head = &conf->retry_list;
2805 struct blk_plug plug;
2807 md_check_recovery(mddev);
2809 blk_start_plug(&plug);
2812 flush_pending_writes(conf);
2814 spin_lock_irqsave(&conf->device_lock, flags);
2815 if (list_empty(head)) {
2816 spin_unlock_irqrestore(&conf->device_lock, flags);
2819 r10_bio = list_entry(head->prev, struct r10bio, retry_list);
2820 list_del(head->prev);
2822 spin_unlock_irqrestore(&conf->device_lock, flags);
2824 mddev = r10_bio->mddev;
2825 conf = mddev->private;
2826 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2827 test_bit(R10BIO_WriteError, &r10_bio->state))
2828 handle_write_completed(conf, r10_bio);
2829 else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
2830 reshape_request_write(mddev, r10_bio);
2831 else if (test_bit(R10BIO_IsSync, &r10_bio->state))
2832 sync_request_write(mddev, r10_bio);
2833 else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
2834 recovery_request_write(mddev, r10_bio);
2835 else if (test_bit(R10BIO_ReadError, &r10_bio->state))
2836 handle_read_error(mddev, r10_bio);
2838 /* just a partial read to be scheduled from a
2841 int slot = r10_bio->read_slot;
2842 generic_make_request(r10_bio->devs[slot].bio);
2846 if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2847 md_check_recovery(mddev);
2849 blk_finish_plug(&plug);
2853 static int init_resync(struct r10conf *conf)
2858 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2859 BUG_ON(conf->r10buf_pool);
2860 conf->have_replacement = 0;
2861 for (i = 0; i < conf->geo.raid_disks; i++)
2862 if (conf->mirrors[i].replacement)
2863 conf->have_replacement = 1;
2864 conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
2865 if (!conf->r10buf_pool)
2867 conf->next_resync = 0;
2872 * perform a "sync" on one "block"
2874 * We need to make sure that no normal I/O request - particularly write
2875 * requests - conflict with active sync requests.
2877 * This is achieved by tracking pending requests and a 'barrier' concept
2878 * that can be installed to exclude normal IO requests.
2880 * Resync and recovery are handled very differently.
2881 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
2883 * For resync, we iterate over virtual addresses, read all copies,
2884 * and update if there are differences. If only one copy is live,
2886 * For recovery, we iterate over physical addresses, read a good
2887 * value for each non-in_sync drive, and over-write.
2889 * So, for recovery we may have several outstanding complex requests for a
2890 * given address, one for each out-of-sync device. We model this by allocating
2891 * a number of r10_bio structures, one for each out-of-sync device.
2892 * As we setup these structures, we collect all bio's together into a list
2893 * which we then process collectively to add pages, and then process again
2894 * to pass to generic_make_request.
2896 * The r10_bio structures are linked using a borrowed master_bio pointer.
2897 * This link is counted in ->remaining. When the r10_bio that points to NULL
2898 * has its remaining count decremented to 0, the whole complex operation
2903 static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
2904 int *skipped, int go_faster)
2906 struct r10conf *conf = mddev->private;
2907 struct r10bio *r10_bio;
2908 struct bio *biolist = NULL, *bio;
2909 sector_t max_sector, nr_sectors;
2912 sector_t sync_blocks;
2913 sector_t sectors_skipped = 0;
2914 int chunks_skipped = 0;
2915 sector_t chunk_mask = conf->geo.chunk_mask;
2917 if (!conf->r10buf_pool)
2918 if (init_resync(conf))
2922 * Allow skipping a full rebuild for incremental assembly
2923 * of a clean array, like RAID1 does.
2925 if (mddev->bitmap == NULL &&
2926 mddev->recovery_cp == MaxSector &&
2927 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2928 conf->fullsync == 0) {
2930 max_sector = mddev->dev_sectors;
2931 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
2932 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2933 max_sector = mddev->resync_max_sectors;
2934 return max_sector - sector_nr;
2938 max_sector = mddev->dev_sectors;
2939 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
2940 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2941 max_sector = mddev->resync_max_sectors;
2942 if (sector_nr >= max_sector) {
2943 /* If we aborted, we need to abort the
2944 * sync on the 'current' bitmap chucks (there can
2945 * be several when recovering multiple devices).
2946 * as we may have started syncing it but not finished.
2947 * We can find the current address in
2948 * mddev->curr_resync, but for recovery,
2949 * we need to convert that to several
2950 * virtual addresses.
2952 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
2957 if (mddev->curr_resync < max_sector) { /* aborted */
2958 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2959 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2961 else for (i = 0; i < conf->geo.raid_disks; i++) {
2963 raid10_find_virt(conf, mddev->curr_resync, i);
2964 bitmap_end_sync(mddev->bitmap, sect,
2968 /* completed sync */
2969 if ((!mddev->bitmap || conf->fullsync)
2970 && conf->have_replacement
2971 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2972 /* Completed a full sync so the replacements
2973 * are now fully recovered.
2975 for (i = 0; i < conf->geo.raid_disks; i++)
2976 if (conf->mirrors[i].replacement)
2977 conf->mirrors[i].replacement
2983 bitmap_close_sync(mddev->bitmap);
2986 return sectors_skipped;
2989 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2990 return reshape_request(mddev, sector_nr, skipped);
2992 if (chunks_skipped >= conf->geo.raid_disks) {
2993 /* if there has been nothing to do on any drive,
2994 * then there is nothing to do at all..
2997 return (max_sector - sector_nr) + sectors_skipped;
3000 if (max_sector > mddev->resync_max)
3001 max_sector = mddev->resync_max; /* Don't do IO beyond here */
3003 /* make sure whole request will fit in a chunk - if chunks
3006 if (conf->geo.near_copies < conf->geo.raid_disks &&
3007 max_sector > (sector_nr | chunk_mask))
3008 max_sector = (sector_nr | chunk_mask) + 1;
3010 * If there is non-resync activity waiting for us then
3011 * put in a delay to throttle resync.
3013 if (!go_faster && conf->nr_waiting)
3014 msleep_interruptible(1000);
3016 /* Again, very different code for resync and recovery.
3017 * Both must result in an r10bio with a list of bios that
3018 * have bi_end_io, bi_sector, bi_bdev set,
3019 * and bi_private set to the r10bio.
3020 * For recovery, we may actually create several r10bios
3021 * with 2 bios in each, that correspond to the bios in the main one.
3022 * In this case, the subordinate r10bios link back through a
3023 * borrowed master_bio pointer, and the counter in the master
3024 * includes a ref from each subordinate.
3026 /* First, we decide what to do and set ->bi_end_io
3027 * To end_sync_read if we want to read, and
3028 * end_sync_write if we will want to write.
3031 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
3032 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3033 /* recovery... the complicated one */
3037 for (i = 0 ; i < conf->geo.raid_disks; i++) {
3043 struct raid10_info *mirror = &conf->mirrors[i];
3045 if ((mirror->rdev == NULL ||
3046 test_bit(In_sync, &mirror->rdev->flags))
3048 (mirror->replacement == NULL ||
3050 &mirror->replacement->flags)))
3054 /* want to reconstruct this device */
3056 sect = raid10_find_virt(conf, sector_nr, i);
3057 if (sect >= mddev->resync_max_sectors) {
3058 /* last stripe is not complete - don't
3059 * try to recover this sector.
3063 /* Unless we are doing a full sync, or a replacement
3064 * we only need to recover the block if it is set in
3067 must_sync = bitmap_start_sync(mddev->bitmap, sect,
3069 if (sync_blocks < max_sync)
3070 max_sync = sync_blocks;
3072 mirror->replacement == NULL &&
3074 /* yep, skip the sync_blocks here, but don't assume
3075 * that there will never be anything to do here
3077 chunks_skipped = -1;
3081 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
3082 raise_barrier(conf, rb2 != NULL);
3083 atomic_set(&r10_bio->remaining, 0);
3085 r10_bio->master_bio = (struct bio*)rb2;
3087 atomic_inc(&rb2->remaining);
3088 r10_bio->mddev = mddev;
3089 set_bit(R10BIO_IsRecover, &r10_bio->state);
3090 r10_bio->sector = sect;
3092 raid10_find_phys(conf, r10_bio);
3094 /* Need to check if the array will still be
3097 for (j = 0; j < conf->geo.raid_disks; j++)
3098 if (conf->mirrors[j].rdev == NULL ||
3099 test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
3104 must_sync = bitmap_start_sync(mddev->bitmap, sect,
3105 &sync_blocks, still_degraded);
3108 for (j=0; j<conf->copies;j++) {
3110 int d = r10_bio->devs[j].devnum;
3111 sector_t from_addr, to_addr;
3112 struct md_rdev *rdev;
3113 sector_t sector, first_bad;
3115 if (!conf->mirrors[d].rdev ||
3116 !test_bit(In_sync, &conf->mirrors[d].rdev->flags))
3118 /* This is where we read from */
3120 rdev = conf->mirrors[d].rdev;
3121 sector = r10_bio->devs[j].addr;
3123 if (is_badblock(rdev, sector, max_sync,
3124 &first_bad, &bad_sectors)) {
3125 if (first_bad > sector)
3126 max_sync = first_bad - sector;
3128 bad_sectors -= (sector
3130 if (max_sync > bad_sectors)
3131 max_sync = bad_sectors;
3135 bio = r10_bio->devs[0].bio;
3137 bio->bi_next = biolist;
3139 bio->bi_private = r10_bio;
3140 bio->bi_end_io = end_sync_read;
3142 from_addr = r10_bio->devs[j].addr;
3143 bio->bi_sector = from_addr + rdev->data_offset;
3144 bio->bi_bdev = rdev->bdev;
3145 atomic_inc(&rdev->nr_pending);
3146 /* and we write to 'i' (if not in_sync) */
3148 for (k=0; k<conf->copies; k++)
3149 if (r10_bio->devs[k].devnum == i)
3151 BUG_ON(k == conf->copies);
3152 to_addr = r10_bio->devs[k].addr;
3153 r10_bio->devs[0].devnum = d;
3154 r10_bio->devs[0].addr = from_addr;
3155 r10_bio->devs[1].devnum = i;
3156 r10_bio->devs[1].addr = to_addr;
3158 rdev = mirror->rdev;
3159 if (!test_bit(In_sync, &rdev->flags)) {
3160 bio = r10_bio->devs[1].bio;
3162 bio->bi_next = biolist;
3164 bio->bi_private = r10_bio;
3165 bio->bi_end_io = end_sync_write;
3167 bio->bi_sector = to_addr
3168 + rdev->data_offset;
3169 bio->bi_bdev = rdev->bdev;
3170 atomic_inc(&r10_bio->remaining);
3172 r10_bio->devs[1].bio->bi_end_io = NULL;
3174 /* and maybe write to replacement */
3175 bio = r10_bio->devs[1].repl_bio;
3177 bio->bi_end_io = NULL;
3178 rdev = mirror->replacement;
3179 /* Note: if rdev != NULL, then bio
3180 * cannot be NULL as r10buf_pool_alloc will
3181 * have allocated it.
3182 * So the second test here is pointless.
3183 * But it keeps semantic-checkers happy, and
3184 * this comment keeps human reviewers
3187 if (rdev == NULL || bio == NULL ||
3188 test_bit(Faulty, &rdev->flags))
3191 bio->bi_next = biolist;
3193 bio->bi_private = r10_bio;
3194 bio->bi_end_io = end_sync_write;
3196 bio->bi_sector = to_addr + rdev->data_offset;
3197 bio->bi_bdev = rdev->bdev;
3198 atomic_inc(&r10_bio->remaining);
3201 if (j == conf->copies) {
3202 /* Cannot recover, so abort the recovery or
3203 * record a bad block */
3206 atomic_dec(&rb2->remaining);
3209 /* problem is that there are bad blocks
3210 * on other device(s)
3213 for (k = 0; k < conf->copies; k++)
3214 if (r10_bio->devs[k].devnum == i)
3216 if (!test_bit(In_sync,
3217 &mirror->rdev->flags)
3218 && !rdev_set_badblocks(
3220 r10_bio->devs[k].addr,
3223 if (mirror->replacement &&
3224 !rdev_set_badblocks(
3225 mirror->replacement,
3226 r10_bio->devs[k].addr,
3231 if (!test_and_set_bit(MD_RECOVERY_INTR,
3233 printk(KERN_INFO "md/raid10:%s: insufficient "
3234 "working devices for recovery.\n",
3236 mirror->recovery_disabled
3237 = mddev->recovery_disabled;
3242 if (biolist == NULL) {
3244 struct r10bio *rb2 = r10_bio;
3245 r10_bio = (struct r10bio*) rb2->master_bio;
3246 rb2->master_bio = NULL;
3252 /* resync. Schedule a read for every block at this virt offset */
3255 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
3257 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
3258 &sync_blocks, mddev->degraded) &&
3259 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3260 &mddev->recovery)) {
3261 /* We can skip this block */
3263 return sync_blocks + sectors_skipped;
3265 if (sync_blocks < max_sync)
3266 max_sync = sync_blocks;
3267 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
3269 r10_bio->mddev = mddev;
3270 atomic_set(&r10_bio->remaining, 0);
3271 raise_barrier(conf, 0);
3272 conf->next_resync = sector_nr;
3274 r10_bio->master_bio = NULL;
3275 r10_bio->sector = sector_nr;
3276 set_bit(R10BIO_IsSync, &r10_bio->state);
3277 raid10_find_phys(conf, r10_bio);
3278 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
3280 for (i = 0; i < conf->copies; i++) {
3281 int d = r10_bio->devs[i].devnum;
3282 sector_t first_bad, sector;
3285 if (r10_bio->devs[i].repl_bio)
3286 r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3288 bio = r10_bio->devs[i].bio;
3290 clear_bit(BIO_UPTODATE, &bio->bi_flags);
3291 if (conf->mirrors[d].rdev == NULL ||
3292 test_bit(Faulty, &conf->mirrors[d].rdev->flags))
3294 sector = r10_bio->devs[i].addr;
3295 if (is_badblock(conf->mirrors[d].rdev,
3297 &first_bad, &bad_sectors)) {
3298 if (first_bad > sector)
3299 max_sync = first_bad - sector;
3301 bad_sectors -= (sector - first_bad);
3302 if (max_sync > bad_sectors)
3303 max_sync = bad_sectors;
3307 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
3308 atomic_inc(&r10_bio->remaining);
3309 bio->bi_next = biolist;
3311 bio->bi_private = r10_bio;
3312 bio->bi_end_io = end_sync_read;
3314 bio->bi_sector = sector +
3315 conf->mirrors[d].rdev->data_offset;
3316 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
3319 if (conf->mirrors[d].replacement == NULL ||
3321 &conf->mirrors[d].replacement->flags))
3324 /* Need to set up for writing to the replacement */
3325 bio = r10_bio->devs[i].repl_bio;
3327 clear_bit(BIO_UPTODATE, &bio->bi_flags);
3329 sector = r10_bio->devs[i].addr;
3330 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
3331 bio->bi_next = biolist;
3333 bio->bi_private = r10_bio;
3334 bio->bi_end_io = end_sync_write;
3336 bio->bi_sector = sector +
3337 conf->mirrors[d].replacement->data_offset;
3338 bio->bi_bdev = conf->mirrors[d].replacement->bdev;
3343 for (i=0; i<conf->copies; i++) {
3344 int d = r10_bio->devs[i].devnum;
3345 if (r10_bio->devs[i].bio->bi_end_io)
3346 rdev_dec_pending(conf->mirrors[d].rdev,
3348 if (r10_bio->devs[i].repl_bio &&
3349 r10_bio->devs[i].repl_bio->bi_end_io)
3351 conf->mirrors[d].replacement,
3361 if (sector_nr + max_sync < max_sector)
3362 max_sector = sector_nr + max_sync;
3365 int len = PAGE_SIZE;
3366 if (sector_nr + (len>>9) > max_sector)
3367 len = (max_sector - sector_nr) << 9;
3370 for (bio= biolist ; bio ; bio=bio->bi_next) {
3372 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
3373 if (bio_add_page(bio, page, len, 0))
3377 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
3378 for (bio2 = biolist;
3379 bio2 && bio2 != bio;
3380 bio2 = bio2->bi_next) {
3381 /* remove last page from this bio */
3383 bio2->bi_size -= len;
3384 bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
3388 nr_sectors += len>>9;
3389 sector_nr += len>>9;
3390 } while (biolist->bi_vcnt < RESYNC_PAGES);
3392 r10_bio->sectors = nr_sectors;
3396 biolist = biolist->bi_next;
3398 bio->bi_next = NULL;
3399 r10_bio = bio->bi_private;
3400 r10_bio->sectors = nr_sectors;
3402 if (bio->bi_end_io == end_sync_read) {
3403 md_sync_acct(bio->bi_bdev, nr_sectors);
3404 generic_make_request(bio);
3408 if (sectors_skipped)
3409 /* pretend they weren't skipped, it makes
3410 * no important difference in this case
3412 md_done_sync(mddev, sectors_skipped, 1);
3414 return sectors_skipped + nr_sectors;
3416 /* There is nowhere to write, so all non-sync
3417 * drives must be failed or in resync, all drives
3418 * have a bad block, so try the next chunk...
3420 if (sector_nr + max_sync < max_sector)
3421 max_sector = sector_nr + max_sync;
3423 sectors_skipped += (max_sector - sector_nr);
3425 sector_nr = max_sector;
3430 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3433 struct r10conf *conf = mddev->private;
3436 raid_disks = min(conf->geo.raid_disks,
3437 conf->prev.raid_disks);
3439 sectors = conf->dev_sectors;
3441 size = sectors >> conf->geo.chunk_shift;
3442 sector_div(size, conf->geo.far_copies);
3443 size = size * raid_disks;
3444 sector_div(size, conf->geo.near_copies);
3446 return size << conf->geo.chunk_shift;
3449 static void calc_sectors(struct r10conf *conf, sector_t size)
3451 /* Calculate the number of sectors-per-device that will
3452 * actually be used, and set conf->dev_sectors and
3456 size = size >> conf->geo.chunk_shift;
3457 sector_div(size, conf->geo.far_copies);
3458 size = size * conf->geo.raid_disks;
3459 sector_div(size, conf->geo.near_copies);
3460 /* 'size' is now the number of chunks in the array */
3461 /* calculate "used chunks per device" */
3462 size = size * conf->copies;
3464 /* We need to round up when dividing by raid_disks to
3465 * get the stride size.
3467 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
3469 conf->dev_sectors = size << conf->geo.chunk_shift;
3471 if (conf->geo.far_offset)
3472 conf->geo.stride = 1 << conf->geo.chunk_shift;
3474 sector_div(size, conf->geo.far_copies);
3475 conf->geo.stride = size << conf->geo.chunk_shift;
3479 enum geo_type {geo_new, geo_old, geo_start};
3480 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3483 int layout, chunk, disks;
3486 layout = mddev->layout;
3487 chunk = mddev->chunk_sectors;
3488 disks = mddev->raid_disks - mddev->delta_disks;
3491 layout = mddev->new_layout;
3492 chunk = mddev->new_chunk_sectors;
3493 disks = mddev->raid_disks;
3495 default: /* avoid 'may be unused' warnings */
3496 case geo_start: /* new when starting reshape - raid_disks not
3498 layout = mddev->new_layout;
3499 chunk = mddev->new_chunk_sectors;
3500 disks = mddev->raid_disks + mddev->delta_disks;
3505 if (chunk < (PAGE_SIZE >> 9) ||
3506 !is_power_of_2(chunk))
3509 fc = (layout >> 8) & 255;
3510 fo = layout & (1<<16);
3511 geo->raid_disks = disks;
3512 geo->near_copies = nc;
3513 geo->far_copies = fc;
3514 geo->far_offset = fo;
3515 geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks;
3516 geo->chunk_mask = chunk - 1;
3517 geo->chunk_shift = ffz(~chunk);
3521 static struct r10conf *setup_conf(struct mddev *mddev)
3523 struct r10conf *conf = NULL;
3528 copies = setup_geo(&geo, mddev, geo_new);
3531 printk(KERN_ERR "md/raid10:%s: chunk size must be "
3532 "at least PAGE_SIZE(%ld) and be a power of 2.\n",
3533 mdname(mddev), PAGE_SIZE);
3537 if (copies < 2 || copies > mddev->raid_disks) {
3538 printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
3539 mdname(mddev), mddev->new_layout);
3544 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
3548 /* FIXME calc properly */
3549 conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks +
3550 max(0,mddev->delta_disks)),
3555 conf->tmppage = alloc_page(GFP_KERNEL);
3560 conf->copies = copies;
3561 conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
3562 r10bio_pool_free, conf);
3563 if (!conf->r10bio_pool)
3566 calc_sectors(conf, mddev->dev_sectors);
3567 if (mddev->reshape_position == MaxSector) {
3568 conf->prev = conf->geo;
3569 conf->reshape_progress = MaxSector;
3571 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
3575 conf->reshape_progress = mddev->reshape_position;
3576 if (conf->prev.far_offset)
3577 conf->prev.stride = 1 << conf->prev.chunk_shift;
3579 /* far_copies must be 1 */
3580 conf->prev.stride = conf->dev_sectors;
3582 spin_lock_init(&conf->device_lock);
3583 INIT_LIST_HEAD(&conf->retry_list);
3585 spin_lock_init(&conf->resync_lock);
3586 init_waitqueue_head(&conf->wait_barrier);
3588 conf->thread = md_register_thread(raid10d, mddev, "raid10");
3592 conf->mddev = mddev;
3597 printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
3600 if (conf->r10bio_pool)
3601 mempool_destroy(conf->r10bio_pool);
3602 kfree(conf->mirrors);
3603 safe_put_page(conf->tmppage);
3606 return ERR_PTR(err);
3609 static int run(struct mddev *mddev)
3611 struct r10conf *conf;
3612 int i, disk_idx, chunk_size;
3613 struct raid10_info *disk;
3614 struct md_rdev *rdev;
3616 sector_t min_offset_diff = 0;
3618 bool discard_supported = false;
3620 if (mddev->private == NULL) {
3621 conf = setup_conf(mddev);
3623 return PTR_ERR(conf);
3624 mddev->private = conf;
3626 conf = mddev->private;
3630 mddev->thread = conf->thread;
3631 conf->thread = NULL;
3633 chunk_size = mddev->chunk_sectors << 9;
3635 blk_queue_max_discard_sectors(mddev->queue,
3636 mddev->chunk_sectors);
3637 blk_queue_max_write_same_sectors(mddev->queue, 0);
3638 blk_queue_io_min(mddev->queue, chunk_size);
3639 if (conf->geo.raid_disks % conf->geo.near_copies)
3640 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
3642 blk_queue_io_opt(mddev->queue, chunk_size *
3643 (conf->geo.raid_disks / conf->geo.near_copies));
3646 rdev_for_each(rdev, mddev) {
3648 struct request_queue *q;
3650 disk_idx = rdev->raid_disk;
3653 if (disk_idx >= conf->geo.raid_disks &&
3654 disk_idx >= conf->prev.raid_disks)
3656 disk = conf->mirrors + disk_idx;
3658 if (test_bit(Replacement, &rdev->flags)) {
3659 if (disk->replacement)
3661 disk->replacement = rdev;
3667 q = bdev_get_queue(rdev->bdev);
3668 if (q->merge_bvec_fn)
3669 mddev->merge_check_needed = 1;
3670 diff = (rdev->new_data_offset - rdev->data_offset);
3671 if (!mddev->reshape_backwards)
3675 if (first || diff < min_offset_diff)
3676 min_offset_diff = diff;
3679 disk_stack_limits(mddev->gendisk, rdev->bdev,
3680 rdev->data_offset << 9);
3682 disk->head_position = 0;
3684 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3685 discard_supported = true;
3689 if (discard_supported)
3690 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
3693 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
3696 /* need to check that every block has at least one working mirror */
3697 if (!enough(conf, -1)) {
3698 printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
3703 if (conf->reshape_progress != MaxSector) {
3704 /* must ensure that shape change is supported */
3705 if (conf->geo.far_copies != 1 &&
3706 conf->geo.far_offset == 0)
3708 if (conf->prev.far_copies != 1 &&
3709 conf->geo.far_offset == 0)
3713 mddev->degraded = 0;
3715 i < conf->geo.raid_disks
3716 || i < conf->prev.raid_disks;
3719 disk = conf->mirrors + i;
3721 if (!disk->rdev && disk->replacement) {
3722 /* The replacement is all we have - use it */
3723 disk->rdev = disk->replacement;
3724 disk->replacement = NULL;
3725 clear_bit(Replacement, &disk->rdev->flags);
3729 !test_bit(In_sync, &disk->rdev->flags)) {
3730 disk->head_position = 0;
3735 disk->recovery_disabled = mddev->recovery_disabled - 1;
3738 if (mddev->recovery_cp != MaxSector)
3739 printk(KERN_NOTICE "md/raid10:%s: not clean"
3740 " -- starting background reconstruction\n",
3743 "md/raid10:%s: active with %d out of %d devices\n",
3744 mdname(mddev), conf->geo.raid_disks - mddev->degraded,
3745 conf->geo.raid_disks);
3747 * Ok, everything is just fine now
3749 mddev->dev_sectors = conf->dev_sectors;
3750 size = raid10_size(mddev, 0, 0);
3751 md_set_array_sectors(mddev, size);
3752 mddev->resync_max_sectors = size;
3755 int stripe = conf->geo.raid_disks *
3756 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
3757 mddev->queue->backing_dev_info.congested_fn = raid10_congested;
3758 mddev->queue->backing_dev_info.congested_data = mddev;
3760 /* Calculate max read-ahead size.
3761 * We need to readahead at least twice a whole stripe....
3764 stripe /= conf->geo.near_copies;
3765 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
3766 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
3767 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
3771 if (md_integrity_register(mddev))
3774 if (conf->reshape_progress != MaxSector) {
3775 unsigned long before_length, after_length;
3777 before_length = ((1 << conf->prev.chunk_shift) *
3778 conf->prev.far_copies);
3779 after_length = ((1 << conf->geo.chunk_shift) *
3780 conf->geo.far_copies);
3782 if (max(before_length, after_length) > min_offset_diff) {
3783 /* This cannot work */
3784 printk("md/raid10: offset difference not enough to continue reshape\n");
3787 conf->offset_diff = min_offset_diff;
3789 conf->reshape_safe = conf->reshape_progress;
3790 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3791 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3792 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
3793 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3794 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
3801 md_unregister_thread(&mddev->thread);
3802 if (conf->r10bio_pool)
3803 mempool_destroy(conf->r10bio_pool);
3804 safe_put_page(conf->tmppage);
3805 kfree(conf->mirrors);
3807 mddev->private = NULL;
3812 static int stop(struct mddev *mddev)
3814 struct r10conf *conf = mddev->private;
3816 raise_barrier(conf, 0);
3817 lower_barrier(conf);
3819 md_unregister_thread(&mddev->thread);
3821 /* the unplug fn references 'conf'*/
3822 blk_sync_queue(mddev->queue);
3824 if (conf->r10bio_pool)
3825 mempool_destroy(conf->r10bio_pool);
3826 safe_put_page(conf->tmppage);
3827 kfree(conf->mirrors);
3829 mddev->private = NULL;
3833 static void raid10_quiesce(struct mddev *mddev, int state)
3835 struct r10conf *conf = mddev->private;
3839 raise_barrier(conf, 0);
3842 lower_barrier(conf);
3847 static int raid10_resize(struct mddev *mddev, sector_t sectors)
3849 /* Resize of 'far' arrays is not supported.
3850 * For 'near' and 'offset' arrays we can set the
3851 * number of sectors used to be an appropriate multiple
3852 * of the chunk size.
3853 * For 'offset', this is far_copies*chunksize.
3854 * For 'near' the multiplier is the LCM of
3855 * near_copies and raid_disks.
3856 * So if far_copies > 1 && !far_offset, fail.
3857 * Else find LCM(raid_disks, near_copy)*far_copies and
3858 * multiply by chunk_size. Then round to this number.
3859 * This is mostly done by raid10_size()
3861 struct r10conf *conf = mddev->private;
3862 sector_t oldsize, size;
3864 if (mddev->reshape_position != MaxSector)
3867 if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
3870 oldsize = raid10_size(mddev, 0, 0);
3871 size = raid10_size(mddev, sectors, 0);
3872 if (mddev->external_size &&
3873 mddev->array_sectors > size)
3875 if (mddev->bitmap) {
3876 int ret = bitmap_resize(mddev->bitmap, size, 0, 0);
3880 md_set_array_sectors(mddev, size);
3881 set_capacity(mddev->gendisk, mddev->array_sectors);
3882 revalidate_disk(mddev->gendisk);
3883 if (sectors > mddev->dev_sectors &&
3884 mddev->recovery_cp > oldsize) {
3885 mddev->recovery_cp = oldsize;
3886 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3888 calc_sectors(conf, sectors);
3889 mddev->dev_sectors = conf->dev_sectors;
3890 mddev->resync_max_sectors = size;
3894 static void *raid10_takeover_raid0(struct mddev *mddev)
3896 struct md_rdev *rdev;
3897 struct r10conf *conf;
3899 if (mddev->degraded > 0) {
3900 printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
3902 return ERR_PTR(-EINVAL);
3905 /* Set new parameters */
3906 mddev->new_level = 10;
3907 /* new layout: far_copies = 1, near_copies = 2 */
3908 mddev->new_layout = (1<<8) + 2;
3909 mddev->new_chunk_sectors = mddev->chunk_sectors;
3910 mddev->delta_disks = mddev->raid_disks;
3911 mddev->raid_disks *= 2;
3912 /* make sure it will be not marked as dirty */
3913 mddev->recovery_cp = MaxSector;
3915 conf = setup_conf(mddev);
3916 if (!IS_ERR(conf)) {
3917 rdev_for_each(rdev, mddev)
3918 if (rdev->raid_disk >= 0)
3919 rdev->new_raid_disk = rdev->raid_disk * 2;
3926 static void *raid10_takeover(struct mddev *mddev)
3928 struct r0conf *raid0_conf;
3930 /* raid10 can take over:
3931 * raid0 - providing it has only two drives
3933 if (mddev->level == 0) {
3934 /* for raid0 takeover only one zone is supported */
3935 raid0_conf = mddev->private;
3936 if (raid0_conf->nr_strip_zones > 1) {
3937 printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
3938 " with more than one zone.\n",
3940 return ERR_PTR(-EINVAL);
3942 return raid10_takeover_raid0(mddev);
3944 return ERR_PTR(-EINVAL);
3947 static int raid10_check_reshape(struct mddev *mddev)
3949 /* Called when there is a request to change
3950 * - layout (to ->new_layout)
3951 * - chunk size (to ->new_chunk_sectors)
3952 * - raid_disks (by delta_disks)
3953 * or when trying to restart a reshape that was ongoing.
3955 * We need to validate the request and possibly allocate
3956 * space if that might be an issue later.
3958 * Currently we reject any reshape of a 'far' mode array,
3959 * allow chunk size to change if new is generally acceptable,
3960 * allow raid_disks to increase, and allow
3961 * a switch between 'near' mode and 'offset' mode.
3963 struct r10conf *conf = mddev->private;
3966 if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
3969 if (setup_geo(&geo, mddev, geo_start) != conf->copies)
3970 /* mustn't change number of copies */
3972 if (geo.far_copies > 1 && !geo.far_offset)
3973 /* Cannot switch to 'far' mode */
3976 if (mddev->array_sectors & geo.chunk_mask)
3977 /* not factor of array size */
3980 if (!enough(conf, -1))
3983 kfree(conf->mirrors_new);
3984 conf->mirrors_new = NULL;
3985 if (mddev->delta_disks > 0) {
3986 /* allocate new 'mirrors' list */
3987 conf->mirrors_new = kzalloc(
3988 sizeof(struct raid10_info)
3989 *(mddev->raid_disks +
3990 mddev->delta_disks),
3992 if (!conf->mirrors_new)
3999 * Need to check if array has failed when deciding whether to:
4001 * - remove non-faulty devices
4004 * This determination is simple when no reshape is happening.
4005 * However if there is a reshape, we need to carefully check
4006 * both the before and after sections.
4007 * This is because some failed devices may only affect one
4008 * of the two sections, and some non-in_sync devices may
4009 * be insync in the section most affected by failed devices.
4011 static int calc_degraded(struct r10conf *conf)
4013 int degraded, degraded2;
4018 /* 'prev' section first */
4019 for (i = 0; i < conf->prev.raid_disks; i++) {
4020 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4021 if (!rdev || test_bit(Faulty, &rdev->flags))
4023 else if (!test_bit(In_sync, &rdev->flags))
4024 /* When we can reduce the number of devices in
4025 * an array, this might not contribute to
4026 * 'degraded'. It does now.
4031 if (conf->geo.raid_disks == conf->prev.raid_disks)
4035 for (i = 0; i < conf->geo.raid_disks; i++) {
4036 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4037 if (!rdev || test_bit(Faulty, &rdev->flags))
4039 else if (!test_bit(In_sync, &rdev->flags)) {
4040 /* If reshape is increasing the number of devices,
4041 * this section has already been recovered, so
4042 * it doesn't contribute to degraded.
4045 if (conf->geo.raid_disks <= conf->prev.raid_disks)
4050 if (degraded2 > degraded)
4055 static int raid10_start_reshape(struct mddev *mddev)
4057 /* A 'reshape' has been requested. This commits
4058 * the various 'new' fields and sets MD_RECOVER_RESHAPE
4059 * This also checks if there are enough spares and adds them
4061 * We currently require enough spares to make the final
4062 * array non-degraded. We also require that the difference
4063 * between old and new data_offset - on each device - is
4064 * enough that we never risk over-writing.
4067 unsigned long before_length, after_length;
4068 sector_t min_offset_diff = 0;
4071 struct r10conf *conf = mddev->private;
4072 struct md_rdev *rdev;
4076 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4079 if (setup_geo(&new, mddev, geo_start) != conf->copies)
4082 before_length = ((1 << conf->prev.chunk_shift) *
4083 conf->prev.far_copies);
4084 after_length = ((1 << conf->geo.chunk_shift) *
4085 conf->geo.far_copies);
4087 rdev_for_each(rdev, mddev) {
4088 if (!test_bit(In_sync, &rdev->flags)
4089 && !test_bit(Faulty, &rdev->flags))
4091 if (rdev->raid_disk >= 0) {
4092 long long diff = (rdev->new_data_offset
4093 - rdev->data_offset);
4094 if (!mddev->reshape_backwards)
4098 if (first || diff < min_offset_diff)
4099 min_offset_diff = diff;
4103 if (max(before_length, after_length) > min_offset_diff)
4106 if (spares < mddev->delta_disks)
4109 conf->offset_diff = min_offset_diff;
4110 spin_lock_irq(&conf->device_lock);
4111 if (conf->mirrors_new) {
4112 memcpy(conf->mirrors_new, conf->mirrors,
4113 sizeof(struct raid10_info)*conf->prev.raid_disks);
4115 kfree(conf->mirrors_old); /* FIXME and elsewhere */
4116 conf->mirrors_old = conf->mirrors;
4117 conf->mirrors = conf->mirrors_new;
4118 conf->mirrors_new = NULL;
4120 setup_geo(&conf->geo, mddev, geo_start);
4122 if (mddev->reshape_backwards) {
4123 sector_t size = raid10_size(mddev, 0, 0);
4124 if (size < mddev->array_sectors) {
4125 spin_unlock_irq(&conf->device_lock);
4126 printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n",
4130 mddev->resync_max_sectors = size;
4131 conf->reshape_progress = size;
4133 conf->reshape_progress = 0;
4134 spin_unlock_irq(&conf->device_lock);
4136 if (mddev->delta_disks && mddev->bitmap) {
4137 ret = bitmap_resize(mddev->bitmap,
4138 raid10_size(mddev, 0,
4139 conf->geo.raid_disks),
4144 if (mddev->delta_disks > 0) {
4145 rdev_for_each(rdev, mddev)
4146 if (rdev->raid_disk < 0 &&
4147 !test_bit(Faulty, &rdev->flags)) {
4148 if (raid10_add_disk(mddev, rdev) == 0) {
4149 if (rdev->raid_disk >=
4150 conf->prev.raid_disks)
4151 set_bit(In_sync, &rdev->flags);
4153 rdev->recovery_offset = 0;
4155 if (sysfs_link_rdev(mddev, rdev))
4156 /* Failure here is OK */;
4158 } else if (rdev->raid_disk >= conf->prev.raid_disks
4159 && !test_bit(Faulty, &rdev->flags)) {
4160 /* This is a spare that was manually added */
4161 set_bit(In_sync, &rdev->flags);
4164 /* When a reshape changes the number of devices,
4165 * ->degraded is measured against the larger of the
4166 * pre and post numbers.
4168 spin_lock_irq(&conf->device_lock);
4169 mddev->degraded = calc_degraded(conf);
4170 spin_unlock_irq(&conf->device_lock);
4171 mddev->raid_disks = conf->geo.raid_disks;
4172 mddev->reshape_position = conf->reshape_progress;
4173 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4175 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4176 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4177 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4178 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4180 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4182 if (!mddev->sync_thread) {
4186 conf->reshape_checkpoint = jiffies;
4187 md_wakeup_thread(mddev->sync_thread);
4188 md_new_event(mddev);
4192 mddev->recovery = 0;
4193 spin_lock_irq(&conf->device_lock);
4194 conf->geo = conf->prev;
4195 mddev->raid_disks = conf->geo.raid_disks;
4196 rdev_for_each(rdev, mddev)
4197 rdev->new_data_offset = rdev->data_offset;
4199 conf->reshape_progress = MaxSector;
4200 mddev->reshape_position = MaxSector;
4201 spin_unlock_irq(&conf->device_lock);
4205 /* Calculate the last device-address that could contain
4206 * any block from the chunk that includes the array-address 's'
4207 * and report the next address.
4208 * i.e. the address returned will be chunk-aligned and after
4209 * any data that is in the chunk containing 's'.
4211 static sector_t last_dev_address(sector_t s, struct geom *geo)
4213 s = (s | geo->chunk_mask) + 1;
4214 s >>= geo->chunk_shift;
4215 s *= geo->near_copies;
4216 s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4217 s *= geo->far_copies;
4218 s <<= geo->chunk_shift;
4222 /* Calculate the first device-address that could contain
4223 * any block from the chunk that includes the array-address 's'.
4224 * This too will be the start of a chunk
4226 static sector_t first_dev_address(sector_t s, struct geom *geo)
4228 s >>= geo->chunk_shift;
4229 s *= geo->near_copies;
4230 sector_div(s, geo->raid_disks);
4231 s *= geo->far_copies;
4232 s <<= geo->chunk_shift;
4236 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4239 /* We simply copy at most one chunk (smallest of old and new)
4240 * at a time, possibly less if that exceeds RESYNC_PAGES,
4241 * or we hit a bad block or something.
4242 * This might mean we pause for normal IO in the middle of
4243 * a chunk, but that is not a problem was mddev->reshape_position
4244 * can record any location.
4246 * If we will want to write to a location that isn't
4247 * yet recorded as 'safe' (i.e. in metadata on disk) then
4248 * we need to flush all reshape requests and update the metadata.
4250 * When reshaping forwards (e.g. to more devices), we interpret
4251 * 'safe' as the earliest block which might not have been copied
4252 * down yet. We divide this by previous stripe size and multiply
4253 * by previous stripe length to get lowest device offset that we
4254 * cannot write to yet.
4255 * We interpret 'sector_nr' as an address that we want to write to.
4256 * From this we use last_device_address() to find where we might
4257 * write to, and first_device_address on the 'safe' position.
4258 * If this 'next' write position is after the 'safe' position,
4259 * we must update the metadata to increase the 'safe' position.
4261 * When reshaping backwards, we round in the opposite direction
4262 * and perform the reverse test: next write position must not be
4263 * less than current safe position.
4265 * In all this the minimum difference in data offsets
4266 * (conf->offset_diff - always positive) allows a bit of slack,
4267 * so next can be after 'safe', but not by more than offset_disk
4269 * We need to prepare all the bios here before we start any IO
4270 * to ensure the size we choose is acceptable to all devices.
4271 * The means one for each copy for write-out and an extra one for
4273 * We store the read-in bio in ->master_bio and the others in
4274 * ->devs[x].bio and ->devs[x].repl_bio.
4276 struct r10conf *conf = mddev->private;
4277 struct r10bio *r10_bio;
4278 sector_t next, safe, last;
4282 struct md_rdev *rdev;
4285 struct bio *bio, *read_bio;
4286 int sectors_done = 0;
4288 if (sector_nr == 0) {
4289 /* If restarting in the middle, skip the initial sectors */
4290 if (mddev->reshape_backwards &&
4291 conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4292 sector_nr = (raid10_size(mddev, 0, 0)
4293 - conf->reshape_progress);
4294 } else if (!mddev->reshape_backwards &&
4295 conf->reshape_progress > 0)
4296 sector_nr = conf->reshape_progress;
4298 mddev->curr_resync_completed = sector_nr;
4299 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4305 /* We don't use sector_nr to track where we are up to
4306 * as that doesn't work well for ->reshape_backwards.
4307 * So just use ->reshape_progress.
4309 if (mddev->reshape_backwards) {
4310 /* 'next' is the earliest device address that we might
4311 * write to for this chunk in the new layout
4313 next = first_dev_address(conf->reshape_progress - 1,
4316 /* 'safe' is the last device address that we might read from
4317 * in the old layout after a restart
4319 safe = last_dev_address(conf->reshape_safe - 1,
4322 if (next + conf->offset_diff < safe)
4325 last = conf->reshape_progress - 1;
4326 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4327 & conf->prev.chunk_mask);
4328 if (sector_nr + RESYNC_BLOCK_SIZE/512 < last)
4329 sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512;
4331 /* 'next' is after the last device address that we
4332 * might write to for this chunk in the new layout
4334 next = last_dev_address(conf->reshape_progress, &conf->geo);
4336 /* 'safe' is the earliest device address that we might
4337 * read from in the old layout after a restart
4339 safe = first_dev_address(conf->reshape_safe, &conf->prev);
4341 /* Need to update metadata if 'next' might be beyond 'safe'
4342 * as that would possibly corrupt data
4344 if (next > safe + conf->offset_diff)
4347 sector_nr = conf->reshape_progress;
4348 last = sector_nr | (conf->geo.chunk_mask
4349 & conf->prev.chunk_mask);
4351 if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last)
4352 last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1;
4356 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4357 /* Need to update reshape_position in metadata */
4359 mddev->reshape_position = conf->reshape_progress;
4360 if (mddev->reshape_backwards)
4361 mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4362 - conf->reshape_progress;
4364 mddev->curr_resync_completed = conf->reshape_progress;
4365 conf->reshape_checkpoint = jiffies;
4366 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4367 md_wakeup_thread(mddev->thread);
4368 wait_event(mddev->sb_wait, mddev->flags == 0 ||
4369 kthread_should_stop());
4370 conf->reshape_safe = mddev->reshape_position;
4371 allow_barrier(conf);
4375 /* Now schedule reads for blocks from sector_nr to last */
4376 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
4377 raise_barrier(conf, sectors_done != 0);
4378 atomic_set(&r10_bio->remaining, 0);
4379 r10_bio->mddev = mddev;
4380 r10_bio->sector = sector_nr;
4381 set_bit(R10BIO_IsReshape, &r10_bio->state);
4382 r10_bio->sectors = last - sector_nr + 1;
4383 rdev = read_balance(conf, r10_bio, &max_sectors);
4384 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4387 /* Cannot read from here, so need to record bad blocks
4388 * on all the target devices.
4391 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4392 return sectors_done;
4395 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
4397 read_bio->bi_bdev = rdev->bdev;
4398 read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4399 + rdev->data_offset);
4400 read_bio->bi_private = r10_bio;
4401 read_bio->bi_end_io = end_sync_read;
4402 read_bio->bi_rw = READ;
4403 read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
4404 read_bio->bi_flags |= 1 << BIO_UPTODATE;
4405 read_bio->bi_vcnt = 0;
4406 read_bio->bi_size = 0;
4407 r10_bio->master_bio = read_bio;
4408 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4410 /* Now find the locations in the new layout */
4411 __raid10_find_phys(&conf->geo, r10_bio);
4414 read_bio->bi_next = NULL;
4416 for (s = 0; s < conf->copies*2; s++) {
4418 int d = r10_bio->devs[s/2].devnum;
4419 struct md_rdev *rdev2;
4421 rdev2 = conf->mirrors[d].replacement;
4422 b = r10_bio->devs[s/2].repl_bio;
4424 rdev2 = conf->mirrors[d].rdev;
4425 b = r10_bio->devs[s/2].bio;
4427 if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4431 b->bi_bdev = rdev2->bdev;
4432 b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset;
4433 b->bi_private = r10_bio;
4434 b->bi_end_io = end_reshape_write;
4440 /* Now add as many pages as possible to all of these bios. */
4443 for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
4444 struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page;
4445 int len = (max_sectors - s) << 9;
4446 if (len > PAGE_SIZE)
4448 for (bio = blist; bio ; bio = bio->bi_next) {
4450 if (bio_add_page(bio, page, len, 0))
4453 /* Didn't fit, must stop */
4455 bio2 && bio2 != bio;
4456 bio2 = bio2->bi_next) {
4457 /* Remove last page from this bio */
4459 bio2->bi_size -= len;
4460 bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
4464 sector_nr += len >> 9;
4465 nr_sectors += len >> 9;
4468 r10_bio->sectors = nr_sectors;
4470 /* Now submit the read */
4471 md_sync_acct(read_bio->bi_bdev, r10_bio->sectors);
4472 atomic_inc(&r10_bio->remaining);
4473 read_bio->bi_next = NULL;
4474 generic_make_request(read_bio);
4475 sector_nr += nr_sectors;
4476 sectors_done += nr_sectors;
4477 if (sector_nr <= last)
4480 /* Now that we have done the whole section we can
4481 * update reshape_progress
4483 if (mddev->reshape_backwards)
4484 conf->reshape_progress -= sectors_done;
4486 conf->reshape_progress += sectors_done;
4488 return sectors_done;
4491 static void end_reshape_request(struct r10bio *r10_bio);
4492 static int handle_reshape_read_error(struct mddev *mddev,
4493 struct r10bio *r10_bio);
4494 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4496 /* Reshape read completed. Hopefully we have a block
4498 * If we got a read error then we do sync 1-page reads from
4499 * elsewhere until we find the data - or give up.
4501 struct r10conf *conf = mddev->private;
4504 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
4505 if (handle_reshape_read_error(mddev, r10_bio) < 0) {
4506 /* Reshape has been aborted */
4507 md_done_sync(mddev, r10_bio->sectors, 0);
4511 /* We definitely have the data in the pages, schedule the
4514 atomic_set(&r10_bio->remaining, 1);
4515 for (s = 0; s < conf->copies*2; s++) {
4517 int d = r10_bio->devs[s/2].devnum;
4518 struct md_rdev *rdev;
4520 rdev = conf->mirrors[d].replacement;
4521 b = r10_bio->devs[s/2].repl_bio;
4523 rdev = conf->mirrors[d].rdev;
4524 b = r10_bio->devs[s/2].bio;
4526 if (!rdev || test_bit(Faulty, &rdev->flags))
4528 atomic_inc(&rdev->nr_pending);
4529 md_sync_acct(b->bi_bdev, r10_bio->sectors);
4530 atomic_inc(&r10_bio->remaining);
4532 generic_make_request(b);
4534 end_reshape_request(r10_bio);
4537 static void end_reshape(struct r10conf *conf)
4539 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
4542 spin_lock_irq(&conf->device_lock);
4543 conf->prev = conf->geo;
4544 md_finish_reshape(conf->mddev);
4546 conf->reshape_progress = MaxSector;
4547 spin_unlock_irq(&conf->device_lock);
4549 /* read-ahead size must cover two whole stripes, which is
4550 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4552 if (conf->mddev->queue) {
4553 int stripe = conf->geo.raid_disks *
4554 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
4555 stripe /= conf->geo.near_copies;
4556 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4557 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4563 static int handle_reshape_read_error(struct mddev *mddev,
4564 struct r10bio *r10_bio)
4566 /* Use sync reads to get the blocks from somewhere else */
4567 int sectors = r10_bio->sectors;
4568 struct r10conf *conf = mddev->private;
4570 struct r10bio r10_bio;
4571 struct r10dev devs[conf->copies];
4573 struct r10bio *r10b = &on_stack.r10_bio;
4576 struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec;
4578 r10b->sector = r10_bio->sector;
4579 __raid10_find_phys(&conf->prev, r10b);
4584 int first_slot = slot;
4586 if (s > (PAGE_SIZE >> 9))
4590 int d = r10b->devs[slot].devnum;
4591 struct md_rdev *rdev = conf->mirrors[d].rdev;
4594 test_bit(Faulty, &rdev->flags) ||
4595 !test_bit(In_sync, &rdev->flags))
4598 addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
4599 success = sync_page_io(rdev,
4608 if (slot >= conf->copies)
4610 if (slot == first_slot)
4614 /* couldn't read this block, must give up */
4615 set_bit(MD_RECOVERY_INTR,
4625 static void end_reshape_write(struct bio *bio, int error)
4627 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
4628 struct r10bio *r10_bio = bio->bi_private;
4629 struct mddev *mddev = r10_bio->mddev;
4630 struct r10conf *conf = mddev->private;
4634 struct md_rdev *rdev = NULL;
4636 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
4638 rdev = conf->mirrors[d].replacement;
4641 rdev = conf->mirrors[d].rdev;
4645 /* FIXME should record badblock */
4646 md_error(mddev, rdev);
4649 rdev_dec_pending(rdev, mddev);
4650 end_reshape_request(r10_bio);
4653 static void end_reshape_request(struct r10bio *r10_bio)
4655 if (!atomic_dec_and_test(&r10_bio->remaining))
4657 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
4658 bio_put(r10_bio->master_bio);
4662 static void raid10_finish_reshape(struct mddev *mddev)
4664 struct r10conf *conf = mddev->private;
4666 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
4669 if (mddev->delta_disks > 0) {
4670 sector_t size = raid10_size(mddev, 0, 0);
4671 md_set_array_sectors(mddev, size);
4672 if (mddev->recovery_cp > mddev->resync_max_sectors) {
4673 mddev->recovery_cp = mddev->resync_max_sectors;
4674 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4676 mddev->resync_max_sectors = size;
4677 set_capacity(mddev->gendisk, mddev->array_sectors);
4678 revalidate_disk(mddev->gendisk);
4681 for (d = conf->geo.raid_disks ;
4682 d < conf->geo.raid_disks - mddev->delta_disks;
4684 struct md_rdev *rdev = conf->mirrors[d].rdev;
4686 clear_bit(In_sync, &rdev->flags);
4687 rdev = conf->mirrors[d].replacement;
4689 clear_bit(In_sync, &rdev->flags);
4692 mddev->layout = mddev->new_layout;
4693 mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
4694 mddev->reshape_position = MaxSector;
4695 mddev->delta_disks = 0;
4696 mddev->reshape_backwards = 0;
4699 static struct md_personality raid10_personality =
4703 .owner = THIS_MODULE,
4704 .make_request = make_request,
4708 .error_handler = error,
4709 .hot_add_disk = raid10_add_disk,
4710 .hot_remove_disk= raid10_remove_disk,
4711 .spare_active = raid10_spare_active,
4712 .sync_request = sync_request,
4713 .quiesce = raid10_quiesce,
4714 .size = raid10_size,
4715 .resize = raid10_resize,
4716 .takeover = raid10_takeover,
4717 .check_reshape = raid10_check_reshape,
4718 .start_reshape = raid10_start_reshape,
4719 .finish_reshape = raid10_finish_reshape,
4722 static int __init raid_init(void)
4724 return register_md_personality(&raid10_personality);
4727 static void raid_exit(void)
4729 unregister_md_personality(&raid10_personality);
4732 module_init(raid_init);
4733 module_exit(raid_exit);
4734 MODULE_LICENSE("GPL");
4735 MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
4736 MODULE_ALIAS("md-personality-9"); /* RAID10 */
4737 MODULE_ALIAS("md-raid10");
4738 MODULE_ALIAS("md-level-10");
4740 module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);