2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->seq_write is the number of the last batch successfully written.
31 * conf->seq_flush is the number of the last batch that was closed to
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is seq_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
46 #include <linux/blkdev.h>
47 #include <linux/kthread.h>
48 #include <linux/raid/pq.h>
49 #include <linux/async_tx.h>
50 #include <linux/module.h>
51 #include <linux/async.h>
52 #include <linux/seq_file.h>
53 #include <linux/cpu.h>
54 #include <linux/slab.h>
55 #include <linux/ratelimit.h>
65 #define NR_STRIPES 256
66 #define STRIPE_SIZE PAGE_SIZE
67 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
68 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
69 #define IO_THRESHOLD 1
70 #define BYPASS_THRESHOLD 1
71 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
72 #define HASH_MASK (NR_HASH - 1)
74 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
76 int hash = (sect >> STRIPE_SHIFT) & HASH_MASK;
77 return &conf->stripe_hashtbl[hash];
80 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
81 * order without overlap. There may be several bio's per stripe+device, and
82 * a bio could span several devices.
83 * When walking this list for a particular stripe+device, we must never proceed
84 * beyond a bio that extends past this device, as the next bio might no longer
86 * This function is used to determine the 'next' bio in the list, given the sector
87 * of the current stripe+device
89 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
91 int sectors = bio->bi_size >> 9;
92 if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
99 * We maintain a biased count of active stripes in the bottom 16 bits of
100 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
102 static inline int raid5_bi_processed_stripes(struct bio *bio)
104 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
105 return (atomic_read(segments) >> 16) & 0xffff;
108 static inline int raid5_dec_bi_active_stripes(struct bio *bio)
110 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
111 return atomic_sub_return(1, segments) & 0xffff;
114 static inline void raid5_inc_bi_active_stripes(struct bio *bio)
116 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
117 atomic_inc(segments);
120 static inline void raid5_set_bi_processed_stripes(struct bio *bio,
123 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
127 old = atomic_read(segments);
128 new = (old & 0xffff) | (cnt << 16);
129 } while (atomic_cmpxchg(segments, old, new) != old);
132 static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
134 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
135 atomic_set(segments, cnt);
138 /* Find first data disk in a raid6 stripe */
139 static inline int raid6_d0(struct stripe_head *sh)
142 /* ddf always start from first device */
144 /* md starts just after Q block */
145 if (sh->qd_idx == sh->disks - 1)
148 return sh->qd_idx + 1;
150 static inline int raid6_next_disk(int disk, int raid_disks)
153 return (disk < raid_disks) ? disk : 0;
156 /* When walking through the disks in a raid5, starting at raid6_d0,
157 * We need to map each disk to a 'slot', where the data disks are slot
158 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
159 * is raid_disks-1. This help does that mapping.
161 static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
162 int *count, int syndrome_disks)
168 if (idx == sh->pd_idx)
169 return syndrome_disks;
170 if (idx == sh->qd_idx)
171 return syndrome_disks + 1;
177 static void return_io(struct bio *return_bi)
179 struct bio *bi = return_bi;
182 return_bi = bi->bi_next;
190 static void print_raid5_conf (struct r5conf *conf);
192 static int stripe_operations_active(struct stripe_head *sh)
194 return sh->check_state || sh->reconstruct_state ||
195 test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
196 test_bit(STRIPE_COMPUTE_RUN, &sh->state);
199 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
201 BUG_ON(!list_empty(&sh->lru));
202 BUG_ON(atomic_read(&conf->active_stripes)==0);
203 if (test_bit(STRIPE_HANDLE, &sh->state)) {
204 if (test_bit(STRIPE_DELAYED, &sh->state) &&
205 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
206 list_add_tail(&sh->lru, &conf->delayed_list);
207 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
208 sh->bm_seq - conf->seq_write > 0)
209 list_add_tail(&sh->lru, &conf->bitmap_list);
211 clear_bit(STRIPE_DELAYED, &sh->state);
212 clear_bit(STRIPE_BIT_DELAY, &sh->state);
213 list_add_tail(&sh->lru, &conf->handle_list);
215 md_wakeup_thread(conf->mddev->thread);
217 BUG_ON(stripe_operations_active(sh));
218 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
219 if (atomic_dec_return(&conf->preread_active_stripes)
221 md_wakeup_thread(conf->mddev->thread);
222 atomic_dec(&conf->active_stripes);
223 if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
224 list_add_tail(&sh->lru, &conf->inactive_list);
225 wake_up(&conf->wait_for_stripe);
226 if (conf->retry_read_aligned)
227 md_wakeup_thread(conf->mddev->thread);
232 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
234 if (atomic_dec_and_test(&sh->count))
235 do_release_stripe(conf, sh);
238 static void release_stripe(struct stripe_head *sh)
240 struct r5conf *conf = sh->raid_conf;
243 local_irq_save(flags);
244 if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
245 do_release_stripe(conf, sh);
246 spin_unlock(&conf->device_lock);
248 local_irq_restore(flags);
251 static inline void remove_hash(struct stripe_head *sh)
253 pr_debug("remove_hash(), stripe %llu\n",
254 (unsigned long long)sh->sector);
256 hlist_del_init(&sh->hash);
259 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
261 struct hlist_head *hp = stripe_hash(conf, sh->sector);
263 pr_debug("insert_hash(), stripe %llu\n",
264 (unsigned long long)sh->sector);
266 hlist_add_head(&sh->hash, hp);
270 /* find an idle stripe, make sure it is unhashed, and return it. */
271 static struct stripe_head *get_free_stripe(struct r5conf *conf)
273 struct stripe_head *sh = NULL;
274 struct list_head *first;
276 if (list_empty(&conf->inactive_list))
278 first = conf->inactive_list.next;
279 sh = list_entry(first, struct stripe_head, lru);
280 list_del_init(first);
282 atomic_inc(&conf->active_stripes);
287 static void shrink_buffers(struct stripe_head *sh)
291 int num = sh->raid_conf->pool_size;
293 for (i = 0; i < num ; i++) {
297 sh->dev[i].page = NULL;
302 static int grow_buffers(struct stripe_head *sh)
305 int num = sh->raid_conf->pool_size;
307 for (i = 0; i < num; i++) {
310 if (!(page = alloc_page(GFP_KERNEL))) {
313 sh->dev[i].page = page;
318 static void raid5_build_block(struct stripe_head *sh, int i, int previous);
319 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
320 struct stripe_head *sh);
322 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
324 struct r5conf *conf = sh->raid_conf;
327 BUG_ON(atomic_read(&sh->count) != 0);
328 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
329 BUG_ON(stripe_operations_active(sh));
331 pr_debug("init_stripe called, stripe %llu\n",
332 (unsigned long long)sh->sector);
336 sh->generation = conf->generation - previous;
337 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
339 stripe_set_idx(sector, conf, previous, sh);
343 for (i = sh->disks; i--; ) {
344 struct r5dev *dev = &sh->dev[i];
346 if (dev->toread || dev->read || dev->towrite || dev->written ||
347 test_bit(R5_LOCKED, &dev->flags)) {
348 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
349 (unsigned long long)sh->sector, i, dev->toread,
350 dev->read, dev->towrite, dev->written,
351 test_bit(R5_LOCKED, &dev->flags));
355 raid5_build_block(sh, i, previous);
357 insert_hash(conf, sh);
360 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
363 struct stripe_head *sh;
364 struct hlist_node *hn;
366 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
367 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
368 if (sh->sector == sector && sh->generation == generation)
370 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
375 * Need to check if array has failed when deciding whether to:
377 * - remove non-faulty devices
380 * This determination is simple when no reshape is happening.
381 * However if there is a reshape, we need to carefully check
382 * both the before and after sections.
383 * This is because some failed devices may only affect one
384 * of the two sections, and some non-in_sync devices may
385 * be insync in the section most affected by failed devices.
387 static int calc_degraded(struct r5conf *conf)
389 int degraded, degraded2;
394 for (i = 0; i < conf->previous_raid_disks; i++) {
395 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
396 if (!rdev || test_bit(Faulty, &rdev->flags))
398 else if (test_bit(In_sync, &rdev->flags))
401 /* not in-sync or faulty.
402 * If the reshape increases the number of devices,
403 * this is being recovered by the reshape, so
404 * this 'previous' section is not in_sync.
405 * If the number of devices is being reduced however,
406 * the device can only be part of the array if
407 * we are reverting a reshape, so this section will
410 if (conf->raid_disks >= conf->previous_raid_disks)
414 if (conf->raid_disks == conf->previous_raid_disks)
418 for (i = 0; i < conf->raid_disks; i++) {
419 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
420 if (!rdev || test_bit(Faulty, &rdev->flags))
422 else if (test_bit(In_sync, &rdev->flags))
425 /* not in-sync or faulty.
426 * If reshape increases the number of devices, this
427 * section has already been recovered, else it
428 * almost certainly hasn't.
430 if (conf->raid_disks <= conf->previous_raid_disks)
434 if (degraded2 > degraded)
439 static int has_failed(struct r5conf *conf)
443 if (conf->mddev->reshape_position == MaxSector)
444 return conf->mddev->degraded > conf->max_degraded;
446 degraded = calc_degraded(conf);
447 if (degraded > conf->max_degraded)
452 static struct stripe_head *
453 get_active_stripe(struct r5conf *conf, sector_t sector,
454 int previous, int noblock, int noquiesce)
456 struct stripe_head *sh;
458 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
460 spin_lock_irq(&conf->device_lock);
463 wait_event_lock_irq(conf->wait_for_stripe,
464 conf->quiesce == 0 || noquiesce,
465 conf->device_lock, /* nothing */);
466 sh = __find_stripe(conf, sector, conf->generation - previous);
468 if (!conf->inactive_blocked)
469 sh = get_free_stripe(conf);
470 if (noblock && sh == NULL)
473 conf->inactive_blocked = 1;
474 wait_event_lock_irq(conf->wait_for_stripe,
475 !list_empty(&conf->inactive_list) &&
476 (atomic_read(&conf->active_stripes)
477 < (conf->max_nr_stripes *3/4)
478 || !conf->inactive_blocked),
481 conf->inactive_blocked = 0;
483 init_stripe(sh, sector, previous);
485 if (atomic_read(&sh->count)) {
486 BUG_ON(!list_empty(&sh->lru)
487 && !test_bit(STRIPE_EXPANDING, &sh->state));
489 if (!test_bit(STRIPE_HANDLE, &sh->state))
490 atomic_inc(&conf->active_stripes);
491 if (list_empty(&sh->lru) &&
492 !test_bit(STRIPE_EXPANDING, &sh->state))
494 list_del_init(&sh->lru);
497 } while (sh == NULL);
500 atomic_inc(&sh->count);
502 spin_unlock_irq(&conf->device_lock);
506 /* Determine if 'data_offset' or 'new_data_offset' should be used
507 * in this stripe_head.
509 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh)
511 sector_t progress = conf->reshape_progress;
512 /* Need a memory barrier to make sure we see the value
513 * of conf->generation, or ->data_offset that was set before
514 * reshape_progress was updated.
517 if (progress == MaxSector)
519 if (sh->generation == conf->generation - 1)
521 /* We are in a reshape, and this is a new-generation stripe,
522 * so use new_data_offset.
528 raid5_end_read_request(struct bio *bi, int error);
530 raid5_end_write_request(struct bio *bi, int error);
532 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
534 struct r5conf *conf = sh->raid_conf;
535 int i, disks = sh->disks;
539 for (i = disks; i--; ) {
541 int replace_only = 0;
542 struct bio *bi, *rbi;
543 struct md_rdev *rdev, *rrdev = NULL;
544 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
545 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
549 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
551 else if (test_and_clear_bit(R5_WantReplace,
552 &sh->dev[i].flags)) {
557 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
560 bi = &sh->dev[i].req;
561 rbi = &sh->dev[i].rreq; /* For writing to replacement */
566 bi->bi_end_io = raid5_end_write_request;
567 rbi->bi_end_io = raid5_end_write_request;
569 bi->bi_end_io = raid5_end_read_request;
572 rrdev = rcu_dereference(conf->disks[i].replacement);
573 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
574 rdev = rcu_dereference(conf->disks[i].rdev);
583 /* We raced and saw duplicates */
586 if (test_bit(R5_ReadRepl, &sh->dev[i].flags) && rrdev)
591 if (rdev && test_bit(Faulty, &rdev->flags))
594 atomic_inc(&rdev->nr_pending);
595 if (rrdev && test_bit(Faulty, &rrdev->flags))
598 atomic_inc(&rrdev->nr_pending);
601 /* We have already checked bad blocks for reads. Now
602 * need to check for writes. We never accept write errors
603 * on the replacement, so we don't to check rrdev.
605 while ((rw & WRITE) && rdev &&
606 test_bit(WriteErrorSeen, &rdev->flags)) {
609 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
610 &first_bad, &bad_sectors);
615 set_bit(BlockedBadBlocks, &rdev->flags);
616 if (!conf->mddev->external &&
617 conf->mddev->flags) {
618 /* It is very unlikely, but we might
619 * still need to write out the
620 * bad block log - better give it
622 md_check_recovery(conf->mddev);
625 * Because md_wait_for_blocked_rdev
626 * will dec nr_pending, we must
627 * increment it first.
629 atomic_inc(&rdev->nr_pending);
630 md_wait_for_blocked_rdev(rdev, conf->mddev);
632 /* Acknowledged bad block - skip the write */
633 rdev_dec_pending(rdev, conf->mddev);
639 if (s->syncing || s->expanding || s->expanded
641 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
643 set_bit(STRIPE_IO_STARTED, &sh->state);
645 bi->bi_bdev = rdev->bdev;
646 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
647 __func__, (unsigned long long)sh->sector,
649 atomic_inc(&sh->count);
650 if (use_new_offset(conf, sh))
651 bi->bi_sector = (sh->sector
652 + rdev->new_data_offset);
654 bi->bi_sector = (sh->sector
655 + rdev->data_offset);
656 bi->bi_flags = 1 << BIO_UPTODATE;
658 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
659 bi->bi_io_vec[0].bv_offset = 0;
660 bi->bi_size = STRIPE_SIZE;
663 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
664 generic_make_request(bi);
667 if (s->syncing || s->expanding || s->expanded
669 md_sync_acct(rrdev->bdev, STRIPE_SECTORS);
671 set_bit(STRIPE_IO_STARTED, &sh->state);
673 rbi->bi_bdev = rrdev->bdev;
674 pr_debug("%s: for %llu schedule op %ld on "
675 "replacement disc %d\n",
676 __func__, (unsigned long long)sh->sector,
678 atomic_inc(&sh->count);
679 if (use_new_offset(conf, sh))
680 rbi->bi_sector = (sh->sector
681 + rrdev->new_data_offset);
683 rbi->bi_sector = (sh->sector
684 + rrdev->data_offset);
685 rbi->bi_flags = 1 << BIO_UPTODATE;
687 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
688 rbi->bi_io_vec[0].bv_offset = 0;
689 rbi->bi_size = STRIPE_SIZE;
691 generic_make_request(rbi);
693 if (!rdev && !rrdev) {
695 set_bit(STRIPE_DEGRADED, &sh->state);
696 pr_debug("skip op %ld on disc %d for sector %llu\n",
697 bi->bi_rw, i, (unsigned long long)sh->sector);
698 clear_bit(R5_LOCKED, &sh->dev[i].flags);
699 set_bit(STRIPE_HANDLE, &sh->state);
704 static struct dma_async_tx_descriptor *
705 async_copy_data(int frombio, struct bio *bio, struct page *page,
706 sector_t sector, struct dma_async_tx_descriptor *tx)
709 struct page *bio_page;
712 struct async_submit_ctl submit;
713 enum async_tx_flags flags = 0;
715 if (bio->bi_sector >= sector)
716 page_offset = (signed)(bio->bi_sector - sector) * 512;
718 page_offset = (signed)(sector - bio->bi_sector) * -512;
721 flags |= ASYNC_TX_FENCE;
722 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
724 bio_for_each_segment(bvl, bio, i) {
725 int len = bvl->bv_len;
729 if (page_offset < 0) {
730 b_offset = -page_offset;
731 page_offset += b_offset;
735 if (len > 0 && page_offset + len > STRIPE_SIZE)
736 clen = STRIPE_SIZE - page_offset;
741 b_offset += bvl->bv_offset;
742 bio_page = bvl->bv_page;
744 tx = async_memcpy(page, bio_page, page_offset,
745 b_offset, clen, &submit);
747 tx = async_memcpy(bio_page, page, b_offset,
748 page_offset, clen, &submit);
750 /* chain the operations */
751 submit.depend_tx = tx;
753 if (clen < len) /* hit end of page */
761 static void ops_complete_biofill(void *stripe_head_ref)
763 struct stripe_head *sh = stripe_head_ref;
764 struct bio *return_bi = NULL;
765 struct r5conf *conf = sh->raid_conf;
768 pr_debug("%s: stripe %llu\n", __func__,
769 (unsigned long long)sh->sector);
771 /* clear completed biofills */
772 spin_lock_irq(&conf->device_lock);
773 for (i = sh->disks; i--; ) {
774 struct r5dev *dev = &sh->dev[i];
776 /* acknowledge completion of a biofill operation */
777 /* and check if we need to reply to a read request,
778 * new R5_Wantfill requests are held off until
779 * !STRIPE_BIOFILL_RUN
781 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
782 struct bio *rbi, *rbi2;
787 while (rbi && rbi->bi_sector <
788 dev->sector + STRIPE_SECTORS) {
789 rbi2 = r5_next_bio(rbi, dev->sector);
790 if (!raid5_dec_bi_active_stripes(rbi)) {
791 rbi->bi_next = return_bi;
798 spin_unlock_irq(&conf->device_lock);
799 clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
801 return_io(return_bi);
803 set_bit(STRIPE_HANDLE, &sh->state);
807 static void ops_run_biofill(struct stripe_head *sh)
809 struct dma_async_tx_descriptor *tx = NULL;
810 struct r5conf *conf = sh->raid_conf;
811 struct async_submit_ctl submit;
814 pr_debug("%s: stripe %llu\n", __func__,
815 (unsigned long long)sh->sector);
817 for (i = sh->disks; i--; ) {
818 struct r5dev *dev = &sh->dev[i];
819 if (test_bit(R5_Wantfill, &dev->flags)) {
821 spin_lock_irq(&conf->device_lock);
822 dev->read = rbi = dev->toread;
824 spin_unlock_irq(&conf->device_lock);
825 while (rbi && rbi->bi_sector <
826 dev->sector + STRIPE_SECTORS) {
827 tx = async_copy_data(0, rbi, dev->page,
829 rbi = r5_next_bio(rbi, dev->sector);
834 atomic_inc(&sh->count);
835 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
836 async_trigger_callback(&submit);
839 static void mark_target_uptodate(struct stripe_head *sh, int target)
846 tgt = &sh->dev[target];
847 set_bit(R5_UPTODATE, &tgt->flags);
848 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
849 clear_bit(R5_Wantcompute, &tgt->flags);
852 static void ops_complete_compute(void *stripe_head_ref)
854 struct stripe_head *sh = stripe_head_ref;
856 pr_debug("%s: stripe %llu\n", __func__,
857 (unsigned long long)sh->sector);
859 /* mark the computed target(s) as uptodate */
860 mark_target_uptodate(sh, sh->ops.target);
861 mark_target_uptodate(sh, sh->ops.target2);
863 clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
864 if (sh->check_state == check_state_compute_run)
865 sh->check_state = check_state_compute_result;
866 set_bit(STRIPE_HANDLE, &sh->state);
870 /* return a pointer to the address conversion region of the scribble buffer */
871 static addr_conv_t *to_addr_conv(struct stripe_head *sh,
872 struct raid5_percpu *percpu)
874 return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
877 static struct dma_async_tx_descriptor *
878 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
880 int disks = sh->disks;
881 struct page **xor_srcs = percpu->scribble;
882 int target = sh->ops.target;
883 struct r5dev *tgt = &sh->dev[target];
884 struct page *xor_dest = tgt->page;
886 struct dma_async_tx_descriptor *tx;
887 struct async_submit_ctl submit;
890 pr_debug("%s: stripe %llu block: %d\n",
891 __func__, (unsigned long long)sh->sector, target);
892 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
894 for (i = disks; i--; )
896 xor_srcs[count++] = sh->dev[i].page;
898 atomic_inc(&sh->count);
900 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
901 ops_complete_compute, sh, to_addr_conv(sh, percpu));
902 if (unlikely(count == 1))
903 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
905 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
910 /* set_syndrome_sources - populate source buffers for gen_syndrome
911 * @srcs - (struct page *) array of size sh->disks
912 * @sh - stripe_head to parse
914 * Populates srcs in proper layout order for the stripe and returns the
915 * 'count' of sources to be used in a call to async_gen_syndrome. The P
916 * destination buffer is recorded in srcs[count] and the Q destination
917 * is recorded in srcs[count+1]].
919 static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
921 int disks = sh->disks;
922 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
923 int d0_idx = raid6_d0(sh);
927 for (i = 0; i < disks; i++)
933 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
935 srcs[slot] = sh->dev[i].page;
936 i = raid6_next_disk(i, disks);
937 } while (i != d0_idx);
939 return syndrome_disks;
942 static struct dma_async_tx_descriptor *
943 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
945 int disks = sh->disks;
946 struct page **blocks = percpu->scribble;
948 int qd_idx = sh->qd_idx;
949 struct dma_async_tx_descriptor *tx;
950 struct async_submit_ctl submit;
956 if (sh->ops.target < 0)
957 target = sh->ops.target2;
958 else if (sh->ops.target2 < 0)
959 target = sh->ops.target;
961 /* we should only have one valid target */
964 pr_debug("%s: stripe %llu block: %d\n",
965 __func__, (unsigned long long)sh->sector, target);
967 tgt = &sh->dev[target];
968 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
971 atomic_inc(&sh->count);
973 if (target == qd_idx) {
974 count = set_syndrome_sources(blocks, sh);
975 blocks[count] = NULL; /* regenerating p is not necessary */
976 BUG_ON(blocks[count+1] != dest); /* q should already be set */
977 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
978 ops_complete_compute, sh,
979 to_addr_conv(sh, percpu));
980 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
982 /* Compute any data- or p-drive using XOR */
984 for (i = disks; i-- ; ) {
985 if (i == target || i == qd_idx)
987 blocks[count++] = sh->dev[i].page;
990 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
991 NULL, ops_complete_compute, sh,
992 to_addr_conv(sh, percpu));
993 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
999 static struct dma_async_tx_descriptor *
1000 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
1002 int i, count, disks = sh->disks;
1003 int syndrome_disks = sh->ddf_layout ? disks : disks-2;
1004 int d0_idx = raid6_d0(sh);
1005 int faila = -1, failb = -1;
1006 int target = sh->ops.target;
1007 int target2 = sh->ops.target2;
1008 struct r5dev *tgt = &sh->dev[target];
1009 struct r5dev *tgt2 = &sh->dev[target2];
1010 struct dma_async_tx_descriptor *tx;
1011 struct page **blocks = percpu->scribble;
1012 struct async_submit_ctl submit;
1014 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
1015 __func__, (unsigned long long)sh->sector, target, target2);
1016 BUG_ON(target < 0 || target2 < 0);
1017 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1018 BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
1020 /* we need to open-code set_syndrome_sources to handle the
1021 * slot number conversion for 'faila' and 'failb'
1023 for (i = 0; i < disks ; i++)
1028 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
1030 blocks[slot] = sh->dev[i].page;
1036 i = raid6_next_disk(i, disks);
1037 } while (i != d0_idx);
1039 BUG_ON(faila == failb);
1042 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
1043 __func__, (unsigned long long)sh->sector, faila, failb);
1045 atomic_inc(&sh->count);
1047 if (failb == syndrome_disks+1) {
1048 /* Q disk is one of the missing disks */
1049 if (faila == syndrome_disks) {
1050 /* Missing P+Q, just recompute */
1051 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1052 ops_complete_compute, sh,
1053 to_addr_conv(sh, percpu));
1054 return async_gen_syndrome(blocks, 0, syndrome_disks+2,
1055 STRIPE_SIZE, &submit);
1059 int qd_idx = sh->qd_idx;
1061 /* Missing D+Q: recompute D from P, then recompute Q */
1062 if (target == qd_idx)
1063 data_target = target2;
1065 data_target = target;
1068 for (i = disks; i-- ; ) {
1069 if (i == data_target || i == qd_idx)
1071 blocks[count++] = sh->dev[i].page;
1073 dest = sh->dev[data_target].page;
1074 init_async_submit(&submit,
1075 ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
1077 to_addr_conv(sh, percpu));
1078 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
1081 count = set_syndrome_sources(blocks, sh);
1082 init_async_submit(&submit, ASYNC_TX_FENCE, tx,
1083 ops_complete_compute, sh,
1084 to_addr_conv(sh, percpu));
1085 return async_gen_syndrome(blocks, 0, count+2,
1086 STRIPE_SIZE, &submit);
1089 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1090 ops_complete_compute, sh,
1091 to_addr_conv(sh, percpu));
1092 if (failb == syndrome_disks) {
1093 /* We're missing D+P. */
1094 return async_raid6_datap_recov(syndrome_disks+2,
1098 /* We're missing D+D. */
1099 return async_raid6_2data_recov(syndrome_disks+2,
1100 STRIPE_SIZE, faila, failb,
1107 static void ops_complete_prexor(void *stripe_head_ref)
1109 struct stripe_head *sh = stripe_head_ref;
1111 pr_debug("%s: stripe %llu\n", __func__,
1112 (unsigned long long)sh->sector);
1115 static struct dma_async_tx_descriptor *
1116 ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
1117 struct dma_async_tx_descriptor *tx)
1119 int disks = sh->disks;
1120 struct page **xor_srcs = percpu->scribble;
1121 int count = 0, pd_idx = sh->pd_idx, i;
1122 struct async_submit_ctl submit;
1124 /* existing parity data subtracted */
1125 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1127 pr_debug("%s: stripe %llu\n", __func__,
1128 (unsigned long long)sh->sector);
1130 for (i = disks; i--; ) {
1131 struct r5dev *dev = &sh->dev[i];
1132 /* Only process blocks that are known to be uptodate */
1133 if (test_bit(R5_Wantdrain, &dev->flags))
1134 xor_srcs[count++] = dev->page;
1137 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
1138 ops_complete_prexor, sh, to_addr_conv(sh, percpu));
1139 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1144 static struct dma_async_tx_descriptor *
1145 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1147 int disks = sh->disks;
1150 pr_debug("%s: stripe %llu\n", __func__,
1151 (unsigned long long)sh->sector);
1153 for (i = disks; i--; ) {
1154 struct r5dev *dev = &sh->dev[i];
1157 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
1160 spin_lock_irq(&sh->raid_conf->device_lock);
1161 chosen = dev->towrite;
1162 dev->towrite = NULL;
1163 BUG_ON(dev->written);
1164 wbi = dev->written = chosen;
1165 spin_unlock_irq(&sh->raid_conf->device_lock);
1167 while (wbi && wbi->bi_sector <
1168 dev->sector + STRIPE_SECTORS) {
1169 if (wbi->bi_rw & REQ_FUA)
1170 set_bit(R5_WantFUA, &dev->flags);
1171 if (wbi->bi_rw & REQ_SYNC)
1172 set_bit(R5_SyncIO, &dev->flags);
1173 tx = async_copy_data(1, wbi, dev->page,
1175 wbi = r5_next_bio(wbi, dev->sector);
1183 static void ops_complete_reconstruct(void *stripe_head_ref)
1185 struct stripe_head *sh = stripe_head_ref;
1186 int disks = sh->disks;
1187 int pd_idx = sh->pd_idx;
1188 int qd_idx = sh->qd_idx;
1190 bool fua = false, sync = false;
1192 pr_debug("%s: stripe %llu\n", __func__,
1193 (unsigned long long)sh->sector);
1195 for (i = disks; i--; ) {
1196 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
1197 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags);
1200 for (i = disks; i--; ) {
1201 struct r5dev *dev = &sh->dev[i];
1203 if (dev->written || i == pd_idx || i == qd_idx) {
1204 set_bit(R5_UPTODATE, &dev->flags);
1206 set_bit(R5_WantFUA, &dev->flags);
1208 set_bit(R5_SyncIO, &dev->flags);
1212 if (sh->reconstruct_state == reconstruct_state_drain_run)
1213 sh->reconstruct_state = reconstruct_state_drain_result;
1214 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
1215 sh->reconstruct_state = reconstruct_state_prexor_drain_result;
1217 BUG_ON(sh->reconstruct_state != reconstruct_state_run);
1218 sh->reconstruct_state = reconstruct_state_result;
1221 set_bit(STRIPE_HANDLE, &sh->state);
1226 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
1227 struct dma_async_tx_descriptor *tx)
1229 int disks = sh->disks;
1230 struct page **xor_srcs = percpu->scribble;
1231 struct async_submit_ctl submit;
1232 int count = 0, pd_idx = sh->pd_idx, i;
1233 struct page *xor_dest;
1235 unsigned long flags;
1237 pr_debug("%s: stripe %llu\n", __func__,
1238 (unsigned long long)sh->sector);
1240 /* check if prexor is active which means only process blocks
1241 * that are part of a read-modify-write (written)
1243 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
1245 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1246 for (i = disks; i--; ) {
1247 struct r5dev *dev = &sh->dev[i];
1249 xor_srcs[count++] = dev->page;
1252 xor_dest = sh->dev[pd_idx].page;
1253 for (i = disks; i--; ) {
1254 struct r5dev *dev = &sh->dev[i];
1256 xor_srcs[count++] = dev->page;
1260 /* 1/ if we prexor'd then the dest is reused as a source
1261 * 2/ if we did not prexor then we are redoing the parity
1262 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1263 * for the synchronous xor case
1265 flags = ASYNC_TX_ACK |
1266 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
1268 atomic_inc(&sh->count);
1270 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
1271 to_addr_conv(sh, percpu));
1272 if (unlikely(count == 1))
1273 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
1275 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1279 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
1280 struct dma_async_tx_descriptor *tx)
1282 struct async_submit_ctl submit;
1283 struct page **blocks = percpu->scribble;
1286 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
1288 count = set_syndrome_sources(blocks, sh);
1290 atomic_inc(&sh->count);
1292 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
1293 sh, to_addr_conv(sh, percpu));
1294 async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
1297 static void ops_complete_check(void *stripe_head_ref)
1299 struct stripe_head *sh = stripe_head_ref;
1301 pr_debug("%s: stripe %llu\n", __func__,
1302 (unsigned long long)sh->sector);
1304 sh->check_state = check_state_check_result;
1305 set_bit(STRIPE_HANDLE, &sh->state);
1309 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
1311 int disks = sh->disks;
1312 int pd_idx = sh->pd_idx;
1313 int qd_idx = sh->qd_idx;
1314 struct page *xor_dest;
1315 struct page **xor_srcs = percpu->scribble;
1316 struct dma_async_tx_descriptor *tx;
1317 struct async_submit_ctl submit;
1321 pr_debug("%s: stripe %llu\n", __func__,
1322 (unsigned long long)sh->sector);
1325 xor_dest = sh->dev[pd_idx].page;
1326 xor_srcs[count++] = xor_dest;
1327 for (i = disks; i--; ) {
1328 if (i == pd_idx || i == qd_idx)
1330 xor_srcs[count++] = sh->dev[i].page;
1333 init_async_submit(&submit, 0, NULL, NULL, NULL,
1334 to_addr_conv(sh, percpu));
1335 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
1336 &sh->ops.zero_sum_result, &submit);
1338 atomic_inc(&sh->count);
1339 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
1340 tx = async_trigger_callback(&submit);
1343 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
1345 struct page **srcs = percpu->scribble;
1346 struct async_submit_ctl submit;
1349 pr_debug("%s: stripe %llu checkp: %d\n", __func__,
1350 (unsigned long long)sh->sector, checkp);
1352 count = set_syndrome_sources(srcs, sh);
1356 atomic_inc(&sh->count);
1357 init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
1358 sh, to_addr_conv(sh, percpu));
1359 async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
1360 &sh->ops.zero_sum_result, percpu->spare_page, &submit);
1363 static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1365 int overlap_clear = 0, i, disks = sh->disks;
1366 struct dma_async_tx_descriptor *tx = NULL;
1367 struct r5conf *conf = sh->raid_conf;
1368 int level = conf->level;
1369 struct raid5_percpu *percpu;
1373 percpu = per_cpu_ptr(conf->percpu, cpu);
1374 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
1375 ops_run_biofill(sh);
1379 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
1381 tx = ops_run_compute5(sh, percpu);
1383 if (sh->ops.target2 < 0 || sh->ops.target < 0)
1384 tx = ops_run_compute6_1(sh, percpu);
1386 tx = ops_run_compute6_2(sh, percpu);
1388 /* terminate the chain if reconstruct is not set to be run */
1389 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
1393 if (test_bit(STRIPE_OP_PREXOR, &ops_request))
1394 tx = ops_run_prexor(sh, percpu, tx);
1396 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
1397 tx = ops_run_biodrain(sh, tx);
1401 if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
1403 ops_run_reconstruct5(sh, percpu, tx);
1405 ops_run_reconstruct6(sh, percpu, tx);
1408 if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
1409 if (sh->check_state == check_state_run)
1410 ops_run_check_p(sh, percpu);
1411 else if (sh->check_state == check_state_run_q)
1412 ops_run_check_pq(sh, percpu, 0);
1413 else if (sh->check_state == check_state_run_pq)
1414 ops_run_check_pq(sh, percpu, 1);
1420 for (i = disks; i--; ) {
1421 struct r5dev *dev = &sh->dev[i];
1422 if (test_and_clear_bit(R5_Overlap, &dev->flags))
1423 wake_up(&sh->raid_conf->wait_for_overlap);
1428 #ifdef CONFIG_MULTICORE_RAID456
1429 static void async_run_ops(void *param, async_cookie_t cookie)
1431 struct stripe_head *sh = param;
1432 unsigned long ops_request = sh->ops.request;
1434 clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
1435 wake_up(&sh->ops.wait_for_ops);
1437 __raid_run_ops(sh, ops_request);
1441 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1443 /* since handle_stripe can be called outside of raid5d context
1444 * we need to ensure sh->ops.request is de-staged before another
1447 wait_event(sh->ops.wait_for_ops,
1448 !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
1449 sh->ops.request = ops_request;
1451 atomic_inc(&sh->count);
1452 async_schedule(async_run_ops, sh);
1455 #define raid_run_ops __raid_run_ops
1458 static int grow_one_stripe(struct r5conf *conf)
1460 struct stripe_head *sh;
1461 sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
1465 sh->raid_conf = conf;
1466 #ifdef CONFIG_MULTICORE_RAID456
1467 init_waitqueue_head(&sh->ops.wait_for_ops);
1470 if (grow_buffers(sh)) {
1472 kmem_cache_free(conf->slab_cache, sh);
1475 /* we just created an active stripe so... */
1476 atomic_set(&sh->count, 1);
1477 atomic_inc(&conf->active_stripes);
1478 INIT_LIST_HEAD(&sh->lru);
1483 static int grow_stripes(struct r5conf *conf, int num)
1485 struct kmem_cache *sc;
1486 int devs = max(conf->raid_disks, conf->previous_raid_disks);
1488 if (conf->mddev->gendisk)
1489 sprintf(conf->cache_name[0],
1490 "raid%d-%s", conf->level, mdname(conf->mddev));
1492 sprintf(conf->cache_name[0],
1493 "raid%d-%p", conf->level, conf->mddev);
1494 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
1496 conf->active_name = 0;
1497 sc = kmem_cache_create(conf->cache_name[conf->active_name],
1498 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
1502 conf->slab_cache = sc;
1503 conf->pool_size = devs;
1505 if (!grow_one_stripe(conf))
1511 * scribble_len - return the required size of the scribble region
1512 * @num - total number of disks in the array
1514 * The size must be enough to contain:
1515 * 1/ a struct page pointer for each device in the array +2
1516 * 2/ room to convert each entry in (1) to its corresponding dma
1517 * (dma_map_page()) or page (page_address()) address.
1519 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1520 * calculate over all devices (not just the data blocks), using zeros in place
1521 * of the P and Q blocks.
1523 static size_t scribble_len(int num)
1527 len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
1532 static int resize_stripes(struct r5conf *conf, int newsize)
1534 /* Make all the stripes able to hold 'newsize' devices.
1535 * New slots in each stripe get 'page' set to a new page.
1537 * This happens in stages:
1538 * 1/ create a new kmem_cache and allocate the required number of
1540 * 2/ gather all the old stripe_heads and tranfer the pages across
1541 * to the new stripe_heads. This will have the side effect of
1542 * freezing the array as once all stripe_heads have been collected,
1543 * no IO will be possible. Old stripe heads are freed once their
1544 * pages have been transferred over, and the old kmem_cache is
1545 * freed when all stripes are done.
1546 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
1547 * we simple return a failre status - no need to clean anything up.
1548 * 4/ allocate new pages for the new slots in the new stripe_heads.
1549 * If this fails, we don't bother trying the shrink the
1550 * stripe_heads down again, we just leave them as they are.
1551 * As each stripe_head is processed the new one is released into
1554 * Once step2 is started, we cannot afford to wait for a write,
1555 * so we use GFP_NOIO allocations.
1557 struct stripe_head *osh, *nsh;
1558 LIST_HEAD(newstripes);
1559 struct disk_info *ndisks;
1562 struct kmem_cache *sc;
1565 if (newsize <= conf->pool_size)
1566 return 0; /* never bother to shrink */
1568 err = md_allow_write(conf->mddev);
1573 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
1574 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
1579 for (i = conf->max_nr_stripes; i; i--) {
1580 nsh = kmem_cache_zalloc(sc, GFP_KERNEL);
1584 nsh->raid_conf = conf;
1585 #ifdef CONFIG_MULTICORE_RAID456
1586 init_waitqueue_head(&nsh->ops.wait_for_ops);
1589 list_add(&nsh->lru, &newstripes);
1592 /* didn't get enough, give up */
1593 while (!list_empty(&newstripes)) {
1594 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1595 list_del(&nsh->lru);
1596 kmem_cache_free(sc, nsh);
1598 kmem_cache_destroy(sc);
1601 /* Step 2 - Must use GFP_NOIO now.
1602 * OK, we have enough stripes, start collecting inactive
1603 * stripes and copying them over
1605 list_for_each_entry(nsh, &newstripes, lru) {
1606 spin_lock_irq(&conf->device_lock);
1607 wait_event_lock_irq(conf->wait_for_stripe,
1608 !list_empty(&conf->inactive_list),
1611 osh = get_free_stripe(conf);
1612 spin_unlock_irq(&conf->device_lock);
1613 atomic_set(&nsh->count, 1);
1614 for(i=0; i<conf->pool_size; i++)
1615 nsh->dev[i].page = osh->dev[i].page;
1616 for( ; i<newsize; i++)
1617 nsh->dev[i].page = NULL;
1618 kmem_cache_free(conf->slab_cache, osh);
1620 kmem_cache_destroy(conf->slab_cache);
1623 * At this point, we are holding all the stripes so the array
1624 * is completely stalled, so now is a good time to resize
1625 * conf->disks and the scribble region
1627 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
1629 for (i=0; i<conf->raid_disks; i++)
1630 ndisks[i] = conf->disks[i];
1632 conf->disks = ndisks;
1637 conf->scribble_len = scribble_len(newsize);
1638 for_each_present_cpu(cpu) {
1639 struct raid5_percpu *percpu;
1642 percpu = per_cpu_ptr(conf->percpu, cpu);
1643 scribble = kmalloc(conf->scribble_len, GFP_NOIO);
1646 kfree(percpu->scribble);
1647 percpu->scribble = scribble;
1655 /* Step 4, return new stripes to service */
1656 while(!list_empty(&newstripes)) {
1657 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1658 list_del_init(&nsh->lru);
1660 for (i=conf->raid_disks; i < newsize; i++)
1661 if (nsh->dev[i].page == NULL) {
1662 struct page *p = alloc_page(GFP_NOIO);
1663 nsh->dev[i].page = p;
1667 release_stripe(nsh);
1669 /* critical section pass, GFP_NOIO no longer needed */
1671 conf->slab_cache = sc;
1672 conf->active_name = 1-conf->active_name;
1673 conf->pool_size = newsize;
1677 static int drop_one_stripe(struct r5conf *conf)
1679 struct stripe_head *sh;
1681 spin_lock_irq(&conf->device_lock);
1682 sh = get_free_stripe(conf);
1683 spin_unlock_irq(&conf->device_lock);
1686 BUG_ON(atomic_read(&sh->count));
1688 kmem_cache_free(conf->slab_cache, sh);
1689 atomic_dec(&conf->active_stripes);
1693 static void shrink_stripes(struct r5conf *conf)
1695 while (drop_one_stripe(conf))
1698 if (conf->slab_cache)
1699 kmem_cache_destroy(conf->slab_cache);
1700 conf->slab_cache = NULL;
1703 static void raid5_end_read_request(struct bio * bi, int error)
1705 struct stripe_head *sh = bi->bi_private;
1706 struct r5conf *conf = sh->raid_conf;
1707 int disks = sh->disks, i;
1708 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1709 char b[BDEVNAME_SIZE];
1710 struct md_rdev *rdev = NULL;
1713 for (i=0 ; i<disks; i++)
1714 if (bi == &sh->dev[i].req)
1717 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1718 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1724 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
1725 /* If replacement finished while this request was outstanding,
1726 * 'replacement' might be NULL already.
1727 * In that case it moved down to 'rdev'.
1728 * rdev is not removed until all requests are finished.
1730 rdev = conf->disks[i].replacement;
1732 rdev = conf->disks[i].rdev;
1734 if (use_new_offset(conf, sh))
1735 s = sh->sector + rdev->new_data_offset;
1737 s = sh->sector + rdev->data_offset;
1739 set_bit(R5_UPTODATE, &sh->dev[i].flags);
1740 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1741 /* Note that this cannot happen on a
1742 * replacement device. We just fail those on
1747 "md/raid:%s: read error corrected"
1748 " (%lu sectors at %llu on %s)\n",
1749 mdname(conf->mddev), STRIPE_SECTORS,
1750 (unsigned long long)s,
1751 bdevname(rdev->bdev, b));
1752 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1753 clear_bit(R5_ReadError, &sh->dev[i].flags);
1754 clear_bit(R5_ReWrite, &sh->dev[i].flags);
1756 if (atomic_read(&rdev->read_errors))
1757 atomic_set(&rdev->read_errors, 0);
1759 const char *bdn = bdevname(rdev->bdev, b);
1763 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
1764 atomic_inc(&rdev->read_errors);
1765 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
1768 "md/raid:%s: read error on replacement device "
1769 "(sector %llu on %s).\n",
1770 mdname(conf->mddev),
1771 (unsigned long long)s,
1773 else if (conf->mddev->degraded >= conf->max_degraded) {
1777 "md/raid:%s: read error not correctable "
1778 "(sector %llu on %s).\n",
1779 mdname(conf->mddev),
1780 (unsigned long long)s,
1782 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
1787 "md/raid:%s: read error NOT corrected!! "
1788 "(sector %llu on %s).\n",
1789 mdname(conf->mddev),
1790 (unsigned long long)s,
1792 } else if (atomic_read(&rdev->read_errors)
1793 > conf->max_nr_stripes)
1795 "md/raid:%s: Too many read errors, failing device %s.\n",
1796 mdname(conf->mddev), bdn);
1800 set_bit(R5_ReadError, &sh->dev[i].flags);
1802 clear_bit(R5_ReadError, &sh->dev[i].flags);
1803 clear_bit(R5_ReWrite, &sh->dev[i].flags);
1805 && test_bit(In_sync, &rdev->flags)
1806 && rdev_set_badblocks(
1807 rdev, sh->sector, STRIPE_SECTORS, 0)))
1808 md_error(conf->mddev, rdev);
1811 rdev_dec_pending(rdev, conf->mddev);
1812 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1813 set_bit(STRIPE_HANDLE, &sh->state);
1817 static void raid5_end_write_request(struct bio *bi, int error)
1819 struct stripe_head *sh = bi->bi_private;
1820 struct r5conf *conf = sh->raid_conf;
1821 int disks = sh->disks, i;
1822 struct md_rdev *uninitialized_var(rdev);
1823 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1826 int replacement = 0;
1828 for (i = 0 ; i < disks; i++) {
1829 if (bi == &sh->dev[i].req) {
1830 rdev = conf->disks[i].rdev;
1833 if (bi == &sh->dev[i].rreq) {
1834 rdev = conf->disks[i].replacement;
1838 /* rdev was removed and 'replacement'
1839 * replaced it. rdev is not removed
1840 * until all requests are finished.
1842 rdev = conf->disks[i].rdev;
1846 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1847 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1856 md_error(conf->mddev, rdev);
1857 else if (is_badblock(rdev, sh->sector,
1859 &first_bad, &bad_sectors))
1860 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
1863 set_bit(WriteErrorSeen, &rdev->flags);
1864 set_bit(R5_WriteError, &sh->dev[i].flags);
1865 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1866 set_bit(MD_RECOVERY_NEEDED,
1867 &rdev->mddev->recovery);
1868 } else if (is_badblock(rdev, sh->sector,
1870 &first_bad, &bad_sectors))
1871 set_bit(R5_MadeGood, &sh->dev[i].flags);
1873 rdev_dec_pending(rdev, conf->mddev);
1875 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
1876 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1877 set_bit(STRIPE_HANDLE, &sh->state);
1881 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
1883 static void raid5_build_block(struct stripe_head *sh, int i, int previous)
1885 struct r5dev *dev = &sh->dev[i];
1887 bio_init(&dev->req);
1888 dev->req.bi_io_vec = &dev->vec;
1890 dev->req.bi_max_vecs++;
1891 dev->req.bi_private = sh;
1892 dev->vec.bv_page = dev->page;
1894 bio_init(&dev->rreq);
1895 dev->rreq.bi_io_vec = &dev->rvec;
1896 dev->rreq.bi_vcnt++;
1897 dev->rreq.bi_max_vecs++;
1898 dev->rreq.bi_private = sh;
1899 dev->rvec.bv_page = dev->page;
1902 dev->sector = compute_blocknr(sh, i, previous);
1905 static void error(struct mddev *mddev, struct md_rdev *rdev)
1907 char b[BDEVNAME_SIZE];
1908 struct r5conf *conf = mddev->private;
1909 unsigned long flags;
1910 pr_debug("raid456: error called\n");
1912 spin_lock_irqsave(&conf->device_lock, flags);
1913 clear_bit(In_sync, &rdev->flags);
1914 mddev->degraded = calc_degraded(conf);
1915 spin_unlock_irqrestore(&conf->device_lock, flags);
1916 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1918 set_bit(Blocked, &rdev->flags);
1919 set_bit(Faulty, &rdev->flags);
1920 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1922 "md/raid:%s: Disk failure on %s, disabling device.\n"
1923 "md/raid:%s: Operation continuing on %d devices.\n",
1925 bdevname(rdev->bdev, b),
1927 conf->raid_disks - mddev->degraded);
1931 * Input: a 'big' sector number,
1932 * Output: index of the data and parity disk, and the sector # in them.
1934 static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
1935 int previous, int *dd_idx,
1936 struct stripe_head *sh)
1938 sector_t stripe, stripe2;
1939 sector_t chunk_number;
1940 unsigned int chunk_offset;
1943 sector_t new_sector;
1944 int algorithm = previous ? conf->prev_algo
1946 int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1947 : conf->chunk_sectors;
1948 int raid_disks = previous ? conf->previous_raid_disks
1950 int data_disks = raid_disks - conf->max_degraded;
1952 /* First compute the information on this sector */
1955 * Compute the chunk number and the sector offset inside the chunk
1957 chunk_offset = sector_div(r_sector, sectors_per_chunk);
1958 chunk_number = r_sector;
1961 * Compute the stripe number
1963 stripe = chunk_number;
1964 *dd_idx = sector_div(stripe, data_disks);
1967 * Select the parity disk based on the user selected algorithm.
1969 pd_idx = qd_idx = -1;
1970 switch(conf->level) {
1972 pd_idx = data_disks;
1975 switch (algorithm) {
1976 case ALGORITHM_LEFT_ASYMMETRIC:
1977 pd_idx = data_disks - sector_div(stripe2, raid_disks);
1978 if (*dd_idx >= pd_idx)
1981 case ALGORITHM_RIGHT_ASYMMETRIC:
1982 pd_idx = sector_div(stripe2, raid_disks);
1983 if (*dd_idx >= pd_idx)
1986 case ALGORITHM_LEFT_SYMMETRIC:
1987 pd_idx = data_disks - sector_div(stripe2, raid_disks);
1988 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1990 case ALGORITHM_RIGHT_SYMMETRIC:
1991 pd_idx = sector_div(stripe2, raid_disks);
1992 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1994 case ALGORITHM_PARITY_0:
1998 case ALGORITHM_PARITY_N:
1999 pd_idx = data_disks;
2007 switch (algorithm) {
2008 case ALGORITHM_LEFT_ASYMMETRIC:
2009 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2010 qd_idx = pd_idx + 1;
2011 if (pd_idx == raid_disks-1) {
2012 (*dd_idx)++; /* Q D D D P */
2014 } else if (*dd_idx >= pd_idx)
2015 (*dd_idx) += 2; /* D D P Q D */
2017 case ALGORITHM_RIGHT_ASYMMETRIC:
2018 pd_idx = sector_div(stripe2, raid_disks);
2019 qd_idx = pd_idx + 1;
2020 if (pd_idx == raid_disks-1) {
2021 (*dd_idx)++; /* Q D D D P */
2023 } else if (*dd_idx >= pd_idx)
2024 (*dd_idx) += 2; /* D D P Q D */
2026 case ALGORITHM_LEFT_SYMMETRIC:
2027 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2028 qd_idx = (pd_idx + 1) % raid_disks;
2029 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
2031 case ALGORITHM_RIGHT_SYMMETRIC:
2032 pd_idx = sector_div(stripe2, raid_disks);
2033 qd_idx = (pd_idx + 1) % raid_disks;
2034 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
2037 case ALGORITHM_PARITY_0:
2042 case ALGORITHM_PARITY_N:
2043 pd_idx = data_disks;
2044 qd_idx = data_disks + 1;
2047 case ALGORITHM_ROTATING_ZERO_RESTART:
2048 /* Exactly the same as RIGHT_ASYMMETRIC, but or
2049 * of blocks for computing Q is different.
2051 pd_idx = sector_div(stripe2, raid_disks);
2052 qd_idx = pd_idx + 1;
2053 if (pd_idx == raid_disks-1) {
2054 (*dd_idx)++; /* Q D D D P */
2056 } else if (*dd_idx >= pd_idx)
2057 (*dd_idx) += 2; /* D D P Q D */
2061 case ALGORITHM_ROTATING_N_RESTART:
2062 /* Same a left_asymmetric, by first stripe is
2063 * D D D P Q rather than
2067 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2068 qd_idx = pd_idx + 1;
2069 if (pd_idx == raid_disks-1) {
2070 (*dd_idx)++; /* Q D D D P */
2072 } else if (*dd_idx >= pd_idx)
2073 (*dd_idx) += 2; /* D D P Q D */
2077 case ALGORITHM_ROTATING_N_CONTINUE:
2078 /* Same as left_symmetric but Q is before P */
2079 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2080 qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
2081 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
2085 case ALGORITHM_LEFT_ASYMMETRIC_6:
2086 /* RAID5 left_asymmetric, with Q on last device */
2087 pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
2088 if (*dd_idx >= pd_idx)
2090 qd_idx = raid_disks - 1;
2093 case ALGORITHM_RIGHT_ASYMMETRIC_6:
2094 pd_idx = sector_div(stripe2, raid_disks-1);
2095 if (*dd_idx >= pd_idx)
2097 qd_idx = raid_disks - 1;
2100 case ALGORITHM_LEFT_SYMMETRIC_6:
2101 pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
2102 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
2103 qd_idx = raid_disks - 1;
2106 case ALGORITHM_RIGHT_SYMMETRIC_6:
2107 pd_idx = sector_div(stripe2, raid_disks-1);
2108 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
2109 qd_idx = raid_disks - 1;
2112 case ALGORITHM_PARITY_0_6:
2115 qd_idx = raid_disks - 1;
2125 sh->pd_idx = pd_idx;
2126 sh->qd_idx = qd_idx;
2127 sh->ddf_layout = ddf_layout;
2130 * Finally, compute the new sector number
2132 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
2137 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
2139 struct r5conf *conf = sh->raid_conf;
2140 int raid_disks = sh->disks;
2141 int data_disks = raid_disks - conf->max_degraded;
2142 sector_t new_sector = sh->sector, check;
2143 int sectors_per_chunk = previous ? conf->prev_chunk_sectors
2144 : conf->chunk_sectors;
2145 int algorithm = previous ? conf->prev_algo
2149 sector_t chunk_number;
2150 int dummy1, dd_idx = i;
2152 struct stripe_head sh2;
2155 chunk_offset = sector_div(new_sector, sectors_per_chunk);
2156 stripe = new_sector;
2158 if (i == sh->pd_idx)
2160 switch(conf->level) {
2163 switch (algorithm) {
2164 case ALGORITHM_LEFT_ASYMMETRIC:
2165 case ALGORITHM_RIGHT_ASYMMETRIC:
2169 case ALGORITHM_LEFT_SYMMETRIC:
2170 case ALGORITHM_RIGHT_SYMMETRIC:
2173 i -= (sh->pd_idx + 1);
2175 case ALGORITHM_PARITY_0:
2178 case ALGORITHM_PARITY_N:
2185 if (i == sh->qd_idx)
2186 return 0; /* It is the Q disk */
2187 switch (algorithm) {
2188 case ALGORITHM_LEFT_ASYMMETRIC:
2189 case ALGORITHM_RIGHT_ASYMMETRIC:
2190 case ALGORITHM_ROTATING_ZERO_RESTART:
2191 case ALGORITHM_ROTATING_N_RESTART:
2192 if (sh->pd_idx == raid_disks-1)
2193 i--; /* Q D D D P */
2194 else if (i > sh->pd_idx)
2195 i -= 2; /* D D P Q D */
2197 case ALGORITHM_LEFT_SYMMETRIC:
2198 case ALGORITHM_RIGHT_SYMMETRIC:
2199 if (sh->pd_idx == raid_disks-1)
2200 i--; /* Q D D D P */
2205 i -= (sh->pd_idx + 2);
2208 case ALGORITHM_PARITY_0:
2211 case ALGORITHM_PARITY_N:
2213 case ALGORITHM_ROTATING_N_CONTINUE:
2214 /* Like left_symmetric, but P is before Q */
2215 if (sh->pd_idx == 0)
2216 i--; /* P D D D Q */
2221 i -= (sh->pd_idx + 1);
2224 case ALGORITHM_LEFT_ASYMMETRIC_6:
2225 case ALGORITHM_RIGHT_ASYMMETRIC_6:
2229 case ALGORITHM_LEFT_SYMMETRIC_6:
2230 case ALGORITHM_RIGHT_SYMMETRIC_6:
2232 i += data_disks + 1;
2233 i -= (sh->pd_idx + 1);
2235 case ALGORITHM_PARITY_0_6:
2244 chunk_number = stripe * data_disks + i;
2245 r_sector = chunk_number * sectors_per_chunk + chunk_offset;
2247 check = raid5_compute_sector(conf, r_sector,
2248 previous, &dummy1, &sh2);
2249 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
2250 || sh2.qd_idx != sh->qd_idx) {
2251 printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
2252 mdname(conf->mddev));
2260 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2261 int rcw, int expand)
2263 int i, pd_idx = sh->pd_idx, disks = sh->disks;
2264 struct r5conf *conf = sh->raid_conf;
2265 int level = conf->level;
2268 /* if we are not expanding this is a proper write request, and
2269 * there will be bios with new data to be drained into the
2273 sh->reconstruct_state = reconstruct_state_drain_run;
2274 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2276 sh->reconstruct_state = reconstruct_state_run;
2278 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2280 for (i = disks; i--; ) {
2281 struct r5dev *dev = &sh->dev[i];
2284 set_bit(R5_LOCKED, &dev->flags);
2285 set_bit(R5_Wantdrain, &dev->flags);
2287 clear_bit(R5_UPTODATE, &dev->flags);
2291 if (s->locked + conf->max_degraded == disks)
2292 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
2293 atomic_inc(&conf->pending_full_writes);
2296 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
2297 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
2299 sh->reconstruct_state = reconstruct_state_prexor_drain_run;
2300 set_bit(STRIPE_OP_PREXOR, &s->ops_request);
2301 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2302 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2304 for (i = disks; i--; ) {
2305 struct r5dev *dev = &sh->dev[i];
2310 (test_bit(R5_UPTODATE, &dev->flags) ||
2311 test_bit(R5_Wantcompute, &dev->flags))) {
2312 set_bit(R5_Wantdrain, &dev->flags);
2313 set_bit(R5_LOCKED, &dev->flags);
2314 clear_bit(R5_UPTODATE, &dev->flags);
2320 /* keep the parity disk(s) locked while asynchronous operations
2323 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
2324 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2328 int qd_idx = sh->qd_idx;
2329 struct r5dev *dev = &sh->dev[qd_idx];
2331 set_bit(R5_LOCKED, &dev->flags);
2332 clear_bit(R5_UPTODATE, &dev->flags);
2336 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2337 __func__, (unsigned long long)sh->sector,
2338 s->locked, s->ops_request);
2342 * Each stripe/dev can have one or more bion attached.
2343 * toread/towrite point to the first in a chain.
2344 * The bi_next chain must be in order.
2346 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
2349 struct r5conf *conf = sh->raid_conf;
2352 pr_debug("adding bi b#%llu to stripe s#%llu\n",
2353 (unsigned long long)bi->bi_sector,
2354 (unsigned long long)sh->sector);
2357 spin_lock_irq(&conf->device_lock);
2359 bip = &sh->dev[dd_idx].towrite;
2360 if (*bip == NULL && sh->dev[dd_idx].written == NULL)
2363 bip = &sh->dev[dd_idx].toread;
2364 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
2365 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
2367 bip = & (*bip)->bi_next;
2369 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
2372 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
2376 raid5_inc_bi_active_stripes(bi);
2379 /* check if page is covered */
2380 sector_t sector = sh->dev[dd_idx].sector;
2381 for (bi=sh->dev[dd_idx].towrite;
2382 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2383 bi && bi->bi_sector <= sector;
2384 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2385 if (bi->bi_sector + (bi->bi_size>>9) >= sector)
2386 sector = bi->bi_sector + (bi->bi_size>>9);
2388 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
2389 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
2391 spin_unlock_irq(&conf->device_lock);
2393 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2394 (unsigned long long)(*bip)->bi_sector,
2395 (unsigned long long)sh->sector, dd_idx);
2397 if (conf->mddev->bitmap && firstwrite) {
2398 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
2400 sh->bm_seq = conf->seq_flush+1;
2401 set_bit(STRIPE_BIT_DELAY, &sh->state);
2406 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
2407 spin_unlock_irq(&conf->device_lock);
2411 static void end_reshape(struct r5conf *conf);
2413 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
2414 struct stripe_head *sh)
2416 int sectors_per_chunk =
2417 previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
2419 int chunk_offset = sector_div(stripe, sectors_per_chunk);
2420 int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
2422 raid5_compute_sector(conf,
2423 stripe * (disks - conf->max_degraded)
2424 *sectors_per_chunk + chunk_offset,
2430 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2431 struct stripe_head_state *s, int disks,
2432 struct bio **return_bi)
2435 for (i = disks; i--; ) {
2439 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2440 struct md_rdev *rdev;
2442 rdev = rcu_dereference(conf->disks[i].rdev);
2443 if (rdev && test_bit(In_sync, &rdev->flags))
2444 atomic_inc(&rdev->nr_pending);
2449 if (!rdev_set_badblocks(
2453 md_error(conf->mddev, rdev);
2454 rdev_dec_pending(rdev, conf->mddev);
2457 spin_lock_irq(&conf->device_lock);
2458 /* fail all writes first */
2459 bi = sh->dev[i].towrite;
2460 sh->dev[i].towrite = NULL;
2466 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2467 wake_up(&conf->wait_for_overlap);
2469 while (bi && bi->bi_sector <
2470 sh->dev[i].sector + STRIPE_SECTORS) {
2471 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2472 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2473 if (!raid5_dec_bi_active_stripes(bi)) {
2474 md_write_end(conf->mddev);
2475 bi->bi_next = *return_bi;
2480 /* and fail all 'written' */
2481 bi = sh->dev[i].written;
2482 sh->dev[i].written = NULL;
2483 if (bi) bitmap_end = 1;
2484 while (bi && bi->bi_sector <
2485 sh->dev[i].sector + STRIPE_SECTORS) {
2486 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2487 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2488 if (!raid5_dec_bi_active_stripes(bi)) {
2489 md_write_end(conf->mddev);
2490 bi->bi_next = *return_bi;
2496 /* fail any reads if this device is non-operational and
2497 * the data has not reached the cache yet.
2499 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
2500 (!test_bit(R5_Insync, &sh->dev[i].flags) ||
2501 test_bit(R5_ReadError, &sh->dev[i].flags))) {
2502 bi = sh->dev[i].toread;
2503 sh->dev[i].toread = NULL;
2504 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2505 wake_up(&conf->wait_for_overlap);
2506 if (bi) s->to_read--;
2507 while (bi && bi->bi_sector <
2508 sh->dev[i].sector + STRIPE_SECTORS) {
2509 struct bio *nextbi =
2510 r5_next_bio(bi, sh->dev[i].sector);
2511 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2512 if (!raid5_dec_bi_active_stripes(bi)) {
2513 bi->bi_next = *return_bi;
2519 spin_unlock_irq(&conf->device_lock);
2521 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2522 STRIPE_SECTORS, 0, 0);
2523 /* If we were in the middle of a write the parity block might
2524 * still be locked - so just clear all R5_LOCKED flags
2526 clear_bit(R5_LOCKED, &sh->dev[i].flags);
2529 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2530 if (atomic_dec_and_test(&conf->pending_full_writes))
2531 md_wakeup_thread(conf->mddev->thread);
2535 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
2536 struct stripe_head_state *s)
2541 clear_bit(STRIPE_SYNCING, &sh->state);
2544 /* There is nothing more to do for sync/check/repair.
2545 * Don't even need to abort as that is handled elsewhere
2546 * if needed, and not always wanted e.g. if there is a known
2548 * For recover/replace we need to record a bad block on all
2549 * non-sync devices, or abort the recovery
2551 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) {
2552 /* During recovery devices cannot be removed, so
2553 * locking and refcounting of rdevs is not needed
2555 for (i = 0; i < conf->raid_disks; i++) {
2556 struct md_rdev *rdev = conf->disks[i].rdev;
2558 && !test_bit(Faulty, &rdev->flags)
2559 && !test_bit(In_sync, &rdev->flags)
2560 && !rdev_set_badblocks(rdev, sh->sector,
2563 rdev = conf->disks[i].replacement;
2565 && !test_bit(Faulty, &rdev->flags)
2566 && !test_bit(In_sync, &rdev->flags)
2567 && !rdev_set_badblocks(rdev, sh->sector,
2572 conf->recovery_disabled =
2573 conf->mddev->recovery_disabled;
2575 md_done_sync(conf->mddev, STRIPE_SECTORS, !abort);
2578 static int want_replace(struct stripe_head *sh, int disk_idx)
2580 struct md_rdev *rdev;
2582 /* Doing recovery so rcu locking not required */
2583 rdev = sh->raid_conf->disks[disk_idx].replacement;
2585 && !test_bit(Faulty, &rdev->flags)
2586 && !test_bit(In_sync, &rdev->flags)
2587 && (rdev->recovery_offset <= sh->sector
2588 || rdev->mddev->recovery_cp <= sh->sector))
2594 /* fetch_block - checks the given member device to see if its data needs
2595 * to be read or computed to satisfy a request.
2597 * Returns 1 when no more member devices need to be checked, otherwise returns
2598 * 0 to tell the loop in handle_stripe_fill to continue
2600 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
2601 int disk_idx, int disks)
2603 struct r5dev *dev = &sh->dev[disk_idx];
2604 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
2605 &sh->dev[s->failed_num[1]] };
2607 /* is the data in this block needed, and can we get it? */
2608 if (!test_bit(R5_LOCKED, &dev->flags) &&
2609 !test_bit(R5_UPTODATE, &dev->flags) &&
2611 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2612 s->syncing || s->expanding ||
2613 (s->replacing && want_replace(sh, disk_idx)) ||
2614 (s->failed >= 1 && fdev[0]->toread) ||
2615 (s->failed >= 2 && fdev[1]->toread) ||
2616 (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
2617 !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
2618 (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
2619 /* we would like to get this block, possibly by computing it,
2620 * otherwise read it if the backing disk is insync
2622 BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
2623 BUG_ON(test_bit(R5_Wantread, &dev->flags));
2624 if ((s->uptodate == disks - 1) &&
2625 (s->failed && (disk_idx == s->failed_num[0] ||
2626 disk_idx == s->failed_num[1]))) {
2627 /* have disk failed, and we're requested to fetch it;
2630 pr_debug("Computing stripe %llu block %d\n",
2631 (unsigned long long)sh->sector, disk_idx);
2632 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2633 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2634 set_bit(R5_Wantcompute, &dev->flags);
2635 sh->ops.target = disk_idx;
2636 sh->ops.target2 = -1; /* no 2nd target */
2638 /* Careful: from this point on 'uptodate' is in the eye
2639 * of raid_run_ops which services 'compute' operations
2640 * before writes. R5_Wantcompute flags a block that will
2641 * be R5_UPTODATE by the time it is needed for a
2642 * subsequent operation.
2646 } else if (s->uptodate == disks-2 && s->failed >= 2) {
2647 /* Computing 2-failure is *very* expensive; only
2648 * do it if failed >= 2
2651 for (other = disks; other--; ) {
2652 if (other == disk_idx)
2654 if (!test_bit(R5_UPTODATE,
2655 &sh->dev[other].flags))
2659 pr_debug("Computing stripe %llu blocks %d,%d\n",
2660 (unsigned long long)sh->sector,
2662 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2663 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2664 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
2665 set_bit(R5_Wantcompute, &sh->dev[other].flags);
2666 sh->ops.target = disk_idx;
2667 sh->ops.target2 = other;
2671 } else if (test_bit(R5_Insync, &dev->flags)) {
2672 set_bit(R5_LOCKED, &dev->flags);
2673 set_bit(R5_Wantread, &dev->flags);
2675 pr_debug("Reading block %d (sync=%d)\n",
2676 disk_idx, s->syncing);
2684 * handle_stripe_fill - read or compute data to satisfy pending requests.
2686 static void handle_stripe_fill(struct stripe_head *sh,
2687 struct stripe_head_state *s,
2692 /* look for blocks to read/compute, skip this if a compute
2693 * is already in flight, or if the stripe contents are in the
2694 * midst of changing due to a write
2696 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
2697 !sh->reconstruct_state)
2698 for (i = disks; i--; )
2699 if (fetch_block(sh, s, i, disks))
2701 set_bit(STRIPE_HANDLE, &sh->state);
2705 /* handle_stripe_clean_event
2706 * any written block on an uptodate or failed drive can be returned.
2707 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2708 * never LOCKED, so we don't need to test 'failed' directly.
2710 static void handle_stripe_clean_event(struct r5conf *conf,
2711 struct stripe_head *sh, int disks, struct bio **return_bi)
2716 for (i = disks; i--; )
2717 if (sh->dev[i].written) {
2719 if (!test_bit(R5_LOCKED, &dev->flags) &&
2720 test_bit(R5_UPTODATE, &dev->flags)) {
2721 /* We can return any write requests */
2722 struct bio *wbi, *wbi2;
2724 pr_debug("Return write for disc %d\n", i);
2725 spin_lock_irq(&conf->device_lock);
2727 dev->written = NULL;
2728 while (wbi && wbi->bi_sector <
2729 dev->sector + STRIPE_SECTORS) {
2730 wbi2 = r5_next_bio(wbi, dev->sector);
2731 if (!raid5_dec_bi_active_stripes(wbi)) {
2732 md_write_end(conf->mddev);
2733 wbi->bi_next = *return_bi;
2738 if (dev->towrite == NULL)
2740 spin_unlock_irq(&conf->device_lock);
2742 bitmap_endwrite(conf->mddev->bitmap,
2745 !test_bit(STRIPE_DEGRADED, &sh->state),
2750 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2751 if (atomic_dec_and_test(&conf->pending_full_writes))
2752 md_wakeup_thread(conf->mddev->thread);
2755 static void handle_stripe_dirtying(struct r5conf *conf,
2756 struct stripe_head *sh,
2757 struct stripe_head_state *s,
2760 int rmw = 0, rcw = 0, i;
2761 if (conf->max_degraded == 2) {
2762 /* RAID6 requires 'rcw' in current implementation
2763 * Calculate the real rcw later - for now fake it
2764 * look like rcw is cheaper
2767 } else for (i = disks; i--; ) {
2768 /* would I have to read this buffer for read_modify_write */
2769 struct r5dev *dev = &sh->dev[i];
2770 if ((dev->towrite || i == sh->pd_idx) &&
2771 !test_bit(R5_LOCKED, &dev->flags) &&
2772 !(test_bit(R5_UPTODATE, &dev->flags) ||
2773 test_bit(R5_Wantcompute, &dev->flags))) {
2774 if (test_bit(R5_Insync, &dev->flags))
2777 rmw += 2*disks; /* cannot read it */
2779 /* Would I have to read this buffer for reconstruct_write */
2780 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
2781 !test_bit(R5_LOCKED, &dev->flags) &&
2782 !(test_bit(R5_UPTODATE, &dev->flags) ||
2783 test_bit(R5_Wantcompute, &dev->flags))) {
2784 if (test_bit(R5_Insync, &dev->flags)) rcw++;
2789 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2790 (unsigned long long)sh->sector, rmw, rcw);
2791 set_bit(STRIPE_HANDLE, &sh->state);
2792 if (rmw < rcw && rmw > 0)
2793 /* prefer read-modify-write, but need to get some data */
2794 for (i = disks; i--; ) {
2795 struct r5dev *dev = &sh->dev[i];
2796 if ((dev->towrite || i == sh->pd_idx) &&
2797 !test_bit(R5_LOCKED, &dev->flags) &&
2798 !(test_bit(R5_UPTODATE, &dev->flags) ||
2799 test_bit(R5_Wantcompute, &dev->flags)) &&
2800 test_bit(R5_Insync, &dev->flags)) {
2802 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2803 pr_debug("Read_old block "
2804 "%d for r-m-w\n", i);
2805 set_bit(R5_LOCKED, &dev->flags);
2806 set_bit(R5_Wantread, &dev->flags);
2809 set_bit(STRIPE_DELAYED, &sh->state);
2810 set_bit(STRIPE_HANDLE, &sh->state);
2814 if (rcw <= rmw && rcw > 0) {
2815 /* want reconstruct write, but need to get some data */
2817 for (i = disks; i--; ) {
2818 struct r5dev *dev = &sh->dev[i];
2819 if (!test_bit(R5_OVERWRITE, &dev->flags) &&
2820 i != sh->pd_idx && i != sh->qd_idx &&
2821 !test_bit(R5_LOCKED, &dev->flags) &&
2822 !(test_bit(R5_UPTODATE, &dev->flags) ||
2823 test_bit(R5_Wantcompute, &dev->flags))) {
2825 if (!test_bit(R5_Insync, &dev->flags))
2826 continue; /* it's a failed drive */
2828 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2829 pr_debug("Read_old block "
2830 "%d for Reconstruct\n", i);
2831 set_bit(R5_LOCKED, &dev->flags);
2832 set_bit(R5_Wantread, &dev->flags);
2835 set_bit(STRIPE_DELAYED, &sh->state);
2836 set_bit(STRIPE_HANDLE, &sh->state);
2841 /* now if nothing is locked, and if we have enough data,
2842 * we can start a write request
2844 /* since handle_stripe can be called at any time we need to handle the
2845 * case where a compute block operation has been submitted and then a
2846 * subsequent call wants to start a write request. raid_run_ops only
2847 * handles the case where compute block and reconstruct are requested
2848 * simultaneously. If this is not the case then new writes need to be
2849 * held off until the compute completes.
2851 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
2852 (s->locked == 0 && (rcw == 0 || rmw == 0) &&
2853 !test_bit(STRIPE_BIT_DELAY, &sh->state)))
2854 schedule_reconstruction(sh, s, rcw == 0, 0);
2857 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
2858 struct stripe_head_state *s, int disks)
2860 struct r5dev *dev = NULL;
2862 set_bit(STRIPE_HANDLE, &sh->state);
2864 switch (sh->check_state) {
2865 case check_state_idle:
2866 /* start a new check operation if there are no failures */
2867 if (s->failed == 0) {
2868 BUG_ON(s->uptodate != disks);
2869 sh->check_state = check_state_run;
2870 set_bit(STRIPE_OP_CHECK, &s->ops_request);
2871 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
2875 dev = &sh->dev[s->failed_num[0]];
2877 case check_state_compute_result:
2878 sh->check_state = check_state_idle;
2880 dev = &sh->dev[sh->pd_idx];
2882 /* check that a write has not made the stripe insync */
2883 if (test_bit(STRIPE_INSYNC, &sh->state))
2886 /* either failed parity check, or recovery is happening */
2887 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
2888 BUG_ON(s->uptodate != disks);
2890 set_bit(R5_LOCKED, &dev->flags);
2892 set_bit(R5_Wantwrite, &dev->flags);
2894 clear_bit(STRIPE_DEGRADED, &sh->state);
2895 set_bit(STRIPE_INSYNC, &sh->state);
2897 case check_state_run:
2898 break; /* we will be called again upon completion */
2899 case check_state_check_result:
2900 sh->check_state = check_state_idle;
2902 /* if a failure occurred during the check operation, leave
2903 * STRIPE_INSYNC not set and let the stripe be handled again
2908 /* handle a successful check operation, if parity is correct
2909 * we are done. Otherwise update the mismatch count and repair
2910 * parity if !MD_RECOVERY_CHECK
2912 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
2913 /* parity is correct (on disc,
2914 * not in buffer any more)
2916 set_bit(STRIPE_INSYNC, &sh->state);
2918 conf->mddev->resync_mismatches += STRIPE_SECTORS;
2919 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2920 /* don't try to repair!! */
2921 set_bit(STRIPE_INSYNC, &sh->state);
2923 sh->check_state = check_state_compute_run;
2924 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2925 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2926 set_bit(R5_Wantcompute,
2927 &sh->dev[sh->pd_idx].flags);
2928 sh->ops.target = sh->pd_idx;
2929 sh->ops.target2 = -1;
2934 case check_state_compute_run:
2937 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2938 __func__, sh->check_state,
2939 (unsigned long long) sh->sector);
2945 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
2946 struct stripe_head_state *s,
2949 int pd_idx = sh->pd_idx;
2950 int qd_idx = sh->qd_idx;
2953 set_bit(STRIPE_HANDLE, &sh->state);
2955 BUG_ON(s->failed > 2);
2957 /* Want to check and possibly repair P and Q.
2958 * However there could be one 'failed' device, in which
2959 * case we can only check one of them, possibly using the
2960 * other to generate missing data
2963 switch (sh->check_state) {
2964 case check_state_idle:
2965 /* start a new check operation if there are < 2 failures */
2966 if (s->failed == s->q_failed) {
2967 /* The only possible failed device holds Q, so it
2968 * makes sense to check P (If anything else were failed,
2969 * we would have used P to recreate it).
2971 sh->check_state = check_state_run;
2973 if (!s->q_failed && s->failed < 2) {
2974 /* Q is not failed, and we didn't use it to generate
2975 * anything, so it makes sense to check it
2977 if (sh->check_state == check_state_run)
2978 sh->check_state = check_state_run_pq;
2980 sh->check_state = check_state_run_q;
2983 /* discard potentially stale zero_sum_result */
2984 sh->ops.zero_sum_result = 0;
2986 if (sh->check_state == check_state_run) {
2987 /* async_xor_zero_sum destroys the contents of P */
2988 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2991 if (sh->check_state >= check_state_run &&
2992 sh->check_state <= check_state_run_pq) {
2993 /* async_syndrome_zero_sum preserves P and Q, so
2994 * no need to mark them !uptodate here
2996 set_bit(STRIPE_OP_CHECK, &s->ops_request);
3000 /* we have 2-disk failure */
3001 BUG_ON(s->failed != 2);
3003 case check_state_compute_result:
3004 sh->check_state = check_state_idle;
3006 /* check that a write has not made the stripe insync */
3007 if (test_bit(STRIPE_INSYNC, &sh->state))
3010 /* now write out any block on a failed drive,
3011 * or P or Q if they were recomputed
3013 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
3014 if (s->failed == 2) {
3015 dev = &sh->dev[s->failed_num[1]];
3017 set_bit(R5_LOCKED, &dev->flags);
3018 set_bit(R5_Wantwrite, &dev->flags);
3020 if (s->failed >= 1) {
3021 dev = &sh->dev[s->failed_num[0]];
3023 set_bit(R5_LOCKED, &dev->flags);
3024 set_bit(R5_Wantwrite, &dev->flags);
3026 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
3027 dev = &sh->dev[pd_idx];
3029 set_bit(R5_LOCKED, &dev->flags);
3030 set_bit(R5_Wantwrite, &dev->flags);
3032 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
3033 dev = &sh->dev[qd_idx];
3035 set_bit(R5_LOCKED, &dev->flags);
3036 set_bit(R5_Wantwrite, &dev->flags);
3038 clear_bit(STRIPE_DEGRADED, &sh->state);
3040 set_bit(STRIPE_INSYNC, &sh->state);
3042 case check_state_run:
3043 case check_state_run_q:
3044 case check_state_run_pq:
3045 break; /* we will be called again upon completion */
3046 case check_state_check_result:
3047 sh->check_state = check_state_idle;
3049 /* handle a successful check operation, if parity is correct
3050 * we are done. Otherwise update the mismatch count and repair
3051 * parity if !MD_RECOVERY_CHECK
3053 if (sh->ops.zero_sum_result == 0) {
3054 /* both parities are correct */
3056 set_bit(STRIPE_INSYNC, &sh->state);
3058 /* in contrast to the raid5 case we can validate
3059 * parity, but still have a failure to write
3062 sh->check_state = check_state_compute_result;
3063 /* Returning at this point means that we may go
3064 * off and bring p and/or q uptodate again so
3065 * we make sure to check zero_sum_result again
3066 * to verify if p or q need writeback
3070 conf->mddev->resync_mismatches += STRIPE_SECTORS;
3071 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
3072 /* don't try to repair!! */
3073 set_bit(STRIPE_INSYNC, &sh->state);
3075 int *target = &sh->ops.target;
3077 sh->ops.target = -1;
3078 sh->ops.target2 = -1;
3079 sh->check_state = check_state_compute_run;
3080 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3081 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3082 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
3083 set_bit(R5_Wantcompute,
3084 &sh->dev[pd_idx].flags);
3086 target = &sh->ops.target2;
3089 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
3090 set_bit(R5_Wantcompute,
3091 &sh->dev[qd_idx].flags);
3098 case check_state_compute_run:
3101 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
3102 __func__, sh->check_state,
3103 (unsigned long long) sh->sector);
3108 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
3112 /* We have read all the blocks in this stripe and now we need to
3113 * copy some of them into a target stripe for expand.
3115 struct dma_async_tx_descriptor *tx = NULL;
3116 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3117 for (i = 0; i < sh->disks; i++)
3118 if (i != sh->pd_idx && i != sh->qd_idx) {
3120 struct stripe_head *sh2;
3121 struct async_submit_ctl submit;
3123 sector_t bn = compute_blocknr(sh, i, 1);
3124 sector_t s = raid5_compute_sector(conf, bn, 0,
3126 sh2 = get_active_stripe(conf, s, 0, 1, 1);
3128 /* so far only the early blocks of this stripe
3129 * have been requested. When later blocks
3130 * get requested, we will try again
3133 if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
3134 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
3135 /* must have already done this block */
3136 release_stripe(sh2);
3140 /* place all the copies on one channel */
3141 init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
3142 tx = async_memcpy(sh2->dev[dd_idx].page,
3143 sh->dev[i].page, 0, 0, STRIPE_SIZE,
3146 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
3147 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
3148 for (j = 0; j < conf->raid_disks; j++)
3149 if (j != sh2->pd_idx &&
3151 !test_bit(R5_Expanded, &sh2->dev[j].flags))
3153 if (j == conf->raid_disks) {
3154 set_bit(STRIPE_EXPAND_READY, &sh2->state);
3155 set_bit(STRIPE_HANDLE, &sh2->state);
3157 release_stripe(sh2);
3160 /* done submitting copies, wait for them to complete */
3163 dma_wait_for_async_tx(tx);
3168 * handle_stripe - do things to a stripe.
3170 * We lock the stripe by setting STRIPE_ACTIVE and then examine the
3171 * state of various bits to see what needs to be done.
3173 * return some read requests which now have data
3174 * return some write requests which are safely on storage
3175 * schedule a read on some buffers
3176 * schedule a write of some buffers
3177 * return confirmation of parity correctness
3181 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3183 struct r5conf *conf = sh->raid_conf;
3184 int disks = sh->disks;
3187 int do_recovery = 0;
3189 memset(s, 0, sizeof(*s));
3191 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3192 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
3193 s->failed_num[0] = -1;
3194 s->failed_num[1] = -1;
3196 /* Now to look around and see what can be done */
3198 spin_lock_irq(&conf->device_lock);
3199 for (i=disks; i--; ) {
3200 struct md_rdev *rdev;
3207 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
3209 dev->toread, dev->towrite, dev->written);
3210 /* maybe we can reply to a read
3212 * new wantfill requests are only permitted while
3213 * ops_complete_biofill is guaranteed to be inactive
3215 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
3216 !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
3217 set_bit(R5_Wantfill, &dev->flags);
3219 /* now count some things */
3220 if (test_bit(R5_LOCKED, &dev->flags))
3222 if (test_bit(R5_UPTODATE, &dev->flags))
3224 if (test_bit(R5_Wantcompute, &dev->flags)) {
3226 BUG_ON(s->compute > 2);
3229 if (test_bit(R5_Wantfill, &dev->flags))
3231 else if (dev->toread)
3235 if (!test_bit(R5_OVERWRITE, &dev->flags))
3240 /* Prefer to use the replacement for reads, but only
3241 * if it is recovered enough and has no bad blocks.
3243 rdev = rcu_dereference(conf->disks[i].replacement);
3244 if (rdev && !test_bit(Faulty, &rdev->flags) &&
3245 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS &&
3246 !is_badblock(rdev, sh->sector, STRIPE_SECTORS,
3247 &first_bad, &bad_sectors))
3248 set_bit(R5_ReadRepl, &dev->flags);
3251 set_bit(R5_NeedReplace, &dev->flags);
3252 rdev = rcu_dereference(conf->disks[i].rdev);
3253 clear_bit(R5_ReadRepl, &dev->flags);
3255 if (rdev && test_bit(Faulty, &rdev->flags))
3258 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
3259 &first_bad, &bad_sectors);
3260 if (s->blocked_rdev == NULL
3261 && (test_bit(Blocked, &rdev->flags)
3264 set_bit(BlockedBadBlocks,
3266 s->blocked_rdev = rdev;
3267 atomic_inc(&rdev->nr_pending);
3270 clear_bit(R5_Insync, &dev->flags);
3274 /* also not in-sync */
3275 if (!test_bit(WriteErrorSeen, &rdev->flags) &&
3276 test_bit(R5_UPTODATE, &dev->flags)) {
3277 /* treat as in-sync, but with a read error
3278 * which we can now try to correct
3280 set_bit(R5_Insync, &dev->flags);
3281 set_bit(R5_ReadError, &dev->flags);
3283 } else if (test_bit(In_sync, &rdev->flags))
3284 set_bit(R5_Insync, &dev->flags);
3285 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
3286 /* in sync if before recovery_offset */
3287 set_bit(R5_Insync, &dev->flags);
3288 else if (test_bit(R5_UPTODATE, &dev->flags) &&
3289 test_bit(R5_Expanded, &dev->flags))
3290 /* If we've reshaped into here, we assume it is Insync.
3291 * We will shortly update recovery_offset to make
3294 set_bit(R5_Insync, &dev->flags);
3296 if (rdev && test_bit(R5_WriteError, &dev->flags)) {
3297 /* This flag does not apply to '.replacement'
3298 * only to .rdev, so make sure to check that*/
3299 struct md_rdev *rdev2 = rcu_dereference(
3300 conf->disks[i].rdev);
3302 clear_bit(R5_Insync, &dev->flags);
3303 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
3304 s->handle_bad_blocks = 1;
3305 atomic_inc(&rdev2->nr_pending);
3307 clear_bit(R5_WriteError, &dev->flags);
3309 if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
3310 /* This flag does not apply to '.replacement'
3311 * only to .rdev, so make sure to check that*/
3312 struct md_rdev *rdev2 = rcu_dereference(
3313 conf->disks[i].rdev);
3314 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
3315 s->handle_bad_blocks = 1;
3316 atomic_inc(&rdev2->nr_pending);
3318 clear_bit(R5_MadeGood, &dev->flags);
3320 if (test_bit(R5_MadeGoodRepl, &dev->flags)) {
3321 struct md_rdev *rdev2 = rcu_dereference(
3322 conf->disks[i].replacement);
3323 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
3324 s->handle_bad_blocks = 1;
3325 atomic_inc(&rdev2->nr_pending);
3327 clear_bit(R5_MadeGoodRepl, &dev->flags);
3329 if (!test_bit(R5_Insync, &dev->flags)) {
3330 /* The ReadError flag will just be confusing now */
3331 clear_bit(R5_ReadError, &dev->flags);
3332 clear_bit(R5_ReWrite, &dev->flags);
3334 if (test_bit(R5_ReadError, &dev->flags))
3335 clear_bit(R5_Insync, &dev->flags);
3336 if (!test_bit(R5_Insync, &dev->flags)) {
3338 s->failed_num[s->failed] = i;
3340 if (rdev && !test_bit(Faulty, &rdev->flags))
3344 spin_unlock_irq(&conf->device_lock);
3345 if (test_bit(STRIPE_SYNCING, &sh->state)) {
3346 /* If there is a failed device being replaced,
3347 * we must be recovering.
3348 * else if we are after recovery_cp, we must be syncing
3349 * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
3350 * else we can only be replacing
3351 * sync and recovery both need to read all devices, and so
3352 * use the same flag.
3355 sh->sector >= conf->mddev->recovery_cp ||
3356 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
3364 static void handle_stripe(struct stripe_head *sh)
3366 struct stripe_head_state s;
3367 struct r5conf *conf = sh->raid_conf;
3370 int disks = sh->disks;
3371 struct r5dev *pdev, *qdev;
3373 clear_bit(STRIPE_HANDLE, &sh->state);
3374 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
3375 /* already being handled, ensure it gets handled
3376 * again when current action finishes */
3377 set_bit(STRIPE_HANDLE, &sh->state);
3381 if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
3382 set_bit(STRIPE_SYNCING, &sh->state);
3383 clear_bit(STRIPE_INSYNC, &sh->state);
3385 clear_bit(STRIPE_DELAYED, &sh->state);
3387 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3388 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3389 (unsigned long long)sh->sector, sh->state,
3390 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
3391 sh->check_state, sh->reconstruct_state);
3393 analyse_stripe(sh, &s);
3395 if (s.handle_bad_blocks) {
3396 set_bit(STRIPE_HANDLE, &sh->state);
3400 if (unlikely(s.blocked_rdev)) {
3401 if (s.syncing || s.expanding || s.expanded ||
3402 s.replacing || s.to_write || s.written) {
3403 set_bit(STRIPE_HANDLE, &sh->state);
3406 /* There is nothing for the blocked_rdev to block */
3407 rdev_dec_pending(s.blocked_rdev, conf->mddev);
3408 s.blocked_rdev = NULL;
3411 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
3412 set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
3413 set_bit(STRIPE_BIOFILL_RUN, &sh->state);
3416 pr_debug("locked=%d uptodate=%d to_read=%d"
3417 " to_write=%d failed=%d failed_num=%d,%d\n",
3418 s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
3419 s.failed_num[0], s.failed_num[1]);
3420 /* check if the array has lost more than max_degraded devices and,
3421 * if so, some requests might need to be failed.
3423 if (s.failed > conf->max_degraded) {
3424 sh->check_state = 0;
3425 sh->reconstruct_state = 0;
3426 if (s.to_read+s.to_write+s.written)
3427 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
3428 if (s.syncing + s.replacing)
3429 handle_failed_sync(conf, sh, &s);
3433 * might be able to return some write requests if the parity blocks
3434 * are safe, or on a failed drive
3436 pdev = &sh->dev[sh->pd_idx];
3437 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
3438 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
3439 qdev = &sh->dev[sh->qd_idx];
3440 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
3441 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
3445 (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
3446 && !test_bit(R5_LOCKED, &pdev->flags)
3447 && test_bit(R5_UPTODATE, &pdev->flags)))) &&
3448 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
3449 && !test_bit(R5_LOCKED, &qdev->flags)
3450 && test_bit(R5_UPTODATE, &qdev->flags)))))
3451 handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
3453 /* Now we might consider reading some blocks, either to check/generate
3454 * parity, or to satisfy requests
3455 * or to load a block that is being partially written.
3457 if (s.to_read || s.non_overwrite
3458 || (conf->level == 6 && s.to_write && s.failed)
3459 || (s.syncing && (s.uptodate + s.compute < disks))
3462 handle_stripe_fill(sh, &s, disks);
3464 /* Now we check to see if any write operations have recently
3468 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
3470 if (sh->reconstruct_state == reconstruct_state_drain_result ||
3471 sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
3472 sh->reconstruct_state = reconstruct_state_idle;
3474 /* All the 'written' buffers and the parity block are ready to
3475 * be written back to disk
3477 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
3478 BUG_ON(sh->qd_idx >= 0 &&
3479 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags));
3480 for (i = disks; i--; ) {
3481 struct r5dev *dev = &sh->dev[i];
3482 if (test_bit(R5_LOCKED, &dev->flags) &&
3483 (i == sh->pd_idx || i == sh->qd_idx ||
3485 pr_debug("Writing block %d\n", i);
3486 set_bit(R5_Wantwrite, &dev->flags);
3489 if (!test_bit(R5_Insync, &dev->flags) ||
3490 ((i == sh->pd_idx || i == sh->qd_idx) &&
3492 set_bit(STRIPE_INSYNC, &sh->state);
3495 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3496 s.dec_preread_active = 1;
3499 /* Now to consider new write requests and what else, if anything
3500 * should be read. We do not handle new writes when:
3501 * 1/ A 'write' operation (copy+xor) is already in flight.
3502 * 2/ A 'check' operation is in flight, as it may clobber the parity
3505 if (s.to_write && !sh->reconstruct_state && !sh->check_state)
3506 handle_stripe_dirtying(conf, sh, &s, disks);
3508 /* maybe we need to check and possibly fix the parity for this stripe
3509 * Any reads will already have been scheduled, so we just see if enough
3510 * data is available. The parity check is held off while parity
3511 * dependent operations are in flight.
3513 if (sh->check_state ||
3514 (s.syncing && s.locked == 0 &&
3515 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3516 !test_bit(STRIPE_INSYNC, &sh->state))) {
3517 if (conf->level == 6)
3518 handle_parity_checks6(conf, sh, &s, disks);
3520 handle_parity_checks5(conf, sh, &s, disks);
3523 if (s.replacing && s.locked == 0
3524 && !test_bit(STRIPE_INSYNC, &sh->state)) {
3525 /* Write out to replacement devices where possible */
3526 for (i = 0; i < conf->raid_disks; i++)
3527 if (test_bit(R5_UPTODATE, &sh->dev[i].flags) &&
3528 test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
3529 set_bit(R5_WantReplace, &sh->dev[i].flags);
3530 set_bit(R5_LOCKED, &sh->dev[i].flags);
3533 set_bit(STRIPE_INSYNC, &sh->state);
3535 if ((s.syncing || s.replacing) && s.locked == 0 &&
3536 test_bit(STRIPE_INSYNC, &sh->state)) {
3537 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3538 clear_bit(STRIPE_SYNCING, &sh->state);
3541 /* If the failed drives are just a ReadError, then we might need
3542 * to progress the repair/check process
3544 if (s.failed <= conf->max_degraded && !conf->mddev->ro)
3545 for (i = 0; i < s.failed; i++) {
3546 struct r5dev *dev = &sh->dev[s.failed_num[i]];
3547 if (test_bit(R5_ReadError, &dev->flags)
3548 && !test_bit(R5_LOCKED, &dev->flags)
3549 && test_bit(R5_UPTODATE, &dev->flags)
3551 if (!test_bit(R5_ReWrite, &dev->flags)) {
3552 set_bit(R5_Wantwrite, &dev->flags);
3553 set_bit(R5_ReWrite, &dev->flags);
3554 set_bit(R5_LOCKED, &dev->flags);
3557 /* let's read it back */
3558 set_bit(R5_Wantread, &dev->flags);
3559 set_bit(R5_LOCKED, &dev->flags);
3566 /* Finish reconstruct operations initiated by the expansion process */
3567 if (sh->reconstruct_state == reconstruct_state_result) {
3568 struct stripe_head *sh_src
3569 = get_active_stripe(conf, sh->sector, 1, 1, 1);
3570 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
3571 /* sh cannot be written until sh_src has been read.
3572 * so arrange for sh to be delayed a little
3574 set_bit(STRIPE_DELAYED, &sh->state);
3575 set_bit(STRIPE_HANDLE, &sh->state);
3576 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3578 atomic_inc(&conf->preread_active_stripes);
3579 release_stripe(sh_src);
3583 release_stripe(sh_src);
3585 sh->reconstruct_state = reconstruct_state_idle;
3586 clear_bit(STRIPE_EXPANDING, &sh->state);
3587 for (i = conf->raid_disks; i--; ) {
3588 set_bit(R5_Wantwrite, &sh->dev[i].flags);
3589 set_bit(R5_LOCKED, &sh->dev[i].flags);
3594 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
3595 !sh->reconstruct_state) {
3596 /* Need to write out all blocks after computing parity */
3597 sh->disks = conf->raid_disks;
3598 stripe_set_idx(sh->sector, conf, 0, sh);
3599 schedule_reconstruction(sh, &s, 1, 1);
3600 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
3601 clear_bit(STRIPE_EXPAND_READY, &sh->state);
3602 atomic_dec(&conf->reshape_stripes);
3603 wake_up(&conf->wait_for_overlap);
3604 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3607 if (s.expanding && s.locked == 0 &&
3608 !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
3609 handle_stripe_expansion(conf, sh);
3612 /* wait for this device to become unblocked */
3613 if (unlikely(s.blocked_rdev)) {
3614 if (conf->mddev->external)
3615 md_wait_for_blocked_rdev(s.blocked_rdev,
3618 /* Internal metadata will immediately
3619 * be written by raid5d, so we don't
3620 * need to wait here.
3622 rdev_dec_pending(s.blocked_rdev,
3626 if (s.handle_bad_blocks)
3627 for (i = disks; i--; ) {
3628 struct md_rdev *rdev;
3629 struct r5dev *dev = &sh->dev[i];
3630 if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
3631 /* We own a safe reference to the rdev */
3632 rdev = conf->disks[i].rdev;
3633 if (!rdev_set_badblocks(rdev, sh->sector,
3635 md_error(conf->mddev, rdev);
3636 rdev_dec_pending(rdev, conf->mddev);
3638 if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
3639 rdev = conf->disks[i].rdev;
3640 rdev_clear_badblocks(rdev, sh->sector,
3642 rdev_dec_pending(rdev, conf->mddev);
3644 if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
3645 rdev = conf->disks[i].replacement;
3647 /* rdev have been moved down */
3648 rdev = conf->disks[i].rdev;
3649 rdev_clear_badblocks(rdev, sh->sector,
3651 rdev_dec_pending(rdev, conf->mddev);
3656 raid_run_ops(sh, s.ops_request);
3660 if (s.dec_preread_active) {
3661 /* We delay this until after ops_run_io so that if make_request
3662 * is waiting on a flush, it won't continue until the writes
3663 * have actually been submitted.
3665 atomic_dec(&conf->preread_active_stripes);
3666 if (atomic_read(&conf->preread_active_stripes) <
3668 md_wakeup_thread(conf->mddev->thread);
3671 return_io(s.return_bi);
3673 clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
3676 static void raid5_activate_delayed(struct r5conf *conf)
3678 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
3679 while (!list_empty(&conf->delayed_list)) {
3680 struct list_head *l = conf->delayed_list.next;
3681 struct stripe_head *sh;
3682 sh = list_entry(l, struct stripe_head, lru);
3684 clear_bit(STRIPE_DELAYED, &sh->state);
3685 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3686 atomic_inc(&conf->preread_active_stripes);
3687 list_add_tail(&sh->lru, &conf->hold_list);
3692 static void activate_bit_delay(struct r5conf *conf)
3694 /* device_lock is held */
3695 struct list_head head;
3696 list_add(&head, &conf->bitmap_list);
3697 list_del_init(&conf->bitmap_list);
3698 while (!list_empty(&head)) {
3699 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
3700 list_del_init(&sh->lru);
3701 atomic_inc(&sh->count);
3702 __release_stripe(conf, sh);
3706 int md_raid5_congested(struct mddev *mddev, int bits)
3708 struct r5conf *conf = mddev->private;
3710 /* No difference between reads and writes. Just check
3711 * how busy the stripe_cache is
3714 if (conf->inactive_blocked)
3718 if (list_empty_careful(&conf->inactive_list))
3723 EXPORT_SYMBOL_GPL(md_raid5_congested);
3725 static int raid5_congested(void *data, int bits)
3727 struct mddev *mddev = data;
3729 return mddev_congested(mddev, bits) ||
3730 md_raid5_congested(mddev, bits);
3733 /* We want read requests to align with chunks where possible,
3734 * but write requests don't need to.
3736 static int raid5_mergeable_bvec(struct request_queue *q,
3737 struct bvec_merge_data *bvm,
3738 struct bio_vec *biovec)
3740 struct mddev *mddev = q->queuedata;
3741 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
3743 unsigned int chunk_sectors = mddev->chunk_sectors;
3744 unsigned int bio_sectors = bvm->bi_size >> 9;
3746 if ((bvm->bi_rw & 1) == WRITE)
3747 return biovec->bv_len; /* always allow writes to be mergeable */
3749 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3750 chunk_sectors = mddev->new_chunk_sectors;
3751 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
3752 if (max < 0) max = 0;
3753 if (max <= biovec->bv_len && bio_sectors == 0)
3754 return biovec->bv_len;
3760 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
3762 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
3763 unsigned int chunk_sectors = mddev->chunk_sectors;
3764 unsigned int bio_sectors = bio->bi_size >> 9;
3766 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3767 chunk_sectors = mddev->new_chunk_sectors;
3768 return chunk_sectors >=
3769 ((sector & (chunk_sectors - 1)) + bio_sectors);
3773 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
3774 * later sampled by raid5d.
3776 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
3778 unsigned long flags;
3780 spin_lock_irqsave(&conf->device_lock, flags);
3782 bi->bi_next = conf->retry_read_aligned_list;
3783 conf->retry_read_aligned_list = bi;
3785 spin_unlock_irqrestore(&conf->device_lock, flags);
3786 md_wakeup_thread(conf->mddev->thread);
3790 static struct bio *remove_bio_from_retry(struct r5conf *conf)
3794 bi = conf->retry_read_aligned;
3796 conf->retry_read_aligned = NULL;
3799 bi = conf->retry_read_aligned_list;
3801 conf->retry_read_aligned_list = bi->bi_next;
3804 * this sets the active strip count to 1 and the processed
3805 * strip count to zero (upper 8 bits)
3807 raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */
3815 * The "raid5_align_endio" should check if the read succeeded and if it
3816 * did, call bio_endio on the original bio (having bio_put the new bio
3818 * If the read failed..
3820 static void raid5_align_endio(struct bio *bi, int error)
3822 struct bio* raid_bi = bi->bi_private;
3823 struct mddev *mddev;
3824 struct r5conf *conf;
3825 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
3826 struct md_rdev *rdev;
3830 rdev = (void*)raid_bi->bi_next;
3831 raid_bi->bi_next = NULL;
3832 mddev = rdev->mddev;
3833 conf = mddev->private;
3835 rdev_dec_pending(rdev, conf->mddev);
3837 if (!error && uptodate) {
3838 bio_endio(raid_bi, 0);
3839 if (atomic_dec_and_test(&conf->active_aligned_reads))
3840 wake_up(&conf->wait_for_stripe);
3845 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3847 add_bio_to_retry(raid_bi, conf);
3850 static int bio_fits_rdev(struct bio *bi)
3852 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
3854 if ((bi->bi_size>>9) > queue_max_sectors(q))
3856 blk_recount_segments(q, bi);
3857 if (bi->bi_phys_segments > queue_max_segments(q))
3860 if (q->merge_bvec_fn)
3861 /* it's too hard to apply the merge_bvec_fn at this stage,
3870 static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
3872 struct r5conf *conf = mddev->private;
3874 struct bio* align_bi;
3875 struct md_rdev *rdev;
3876 sector_t end_sector;
3878 if (!in_chunk_boundary(mddev, raid_bio)) {
3879 pr_debug("chunk_aligned_read : non aligned\n");
3883 * use bio_clone_mddev to make a copy of the bio
3885 align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev);
3889 * set bi_end_io to a new function, and set bi_private to the
3892 align_bi->bi_end_io = raid5_align_endio;
3893 align_bi->bi_private = raid_bio;
3897 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
3901 end_sector = align_bi->bi_sector + (align_bi->bi_size >> 9);
3903 rdev = rcu_dereference(conf->disks[dd_idx].replacement);
3904 if (!rdev || test_bit(Faulty, &rdev->flags) ||
3905 rdev->recovery_offset < end_sector) {
3906 rdev = rcu_dereference(conf->disks[dd_idx].rdev);
3908 (test_bit(Faulty, &rdev->flags) ||
3909 !(test_bit(In_sync, &rdev->flags) ||
3910 rdev->recovery_offset >= end_sector)))
3917 atomic_inc(&rdev->nr_pending);
3919 raid_bio->bi_next = (void*)rdev;
3920 align_bi->bi_bdev = rdev->bdev;
3921 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
3923 if (!bio_fits_rdev(align_bi) ||
3924 is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
3925 &first_bad, &bad_sectors)) {
3926 /* too big in some way, or has a known bad block */
3928 rdev_dec_pending(rdev, mddev);
3932 /* No reshape active, so we can trust rdev->data_offset */
3933 align_bi->bi_sector += rdev->data_offset;
3935 spin_lock_irq(&conf->device_lock);
3936 wait_event_lock_irq(conf->wait_for_stripe,
3938 conf->device_lock, /* nothing */);
3939 atomic_inc(&conf->active_aligned_reads);
3940 spin_unlock_irq(&conf->device_lock);
3942 generic_make_request(align_bi);
3951 /* __get_priority_stripe - get the next stripe to process
3953 * Full stripe writes are allowed to pass preread active stripes up until
3954 * the bypass_threshold is exceeded. In general the bypass_count
3955 * increments when the handle_list is handled before the hold_list; however, it
3956 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3957 * stripe with in flight i/o. The bypass_count will be reset when the
3958 * head of the hold_list has changed, i.e. the head was promoted to the
3961 static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
3963 struct stripe_head *sh;
3965 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3967 list_empty(&conf->handle_list) ? "empty" : "busy",
3968 list_empty(&conf->hold_list) ? "empty" : "busy",
3969 atomic_read(&conf->pending_full_writes), conf->bypass_count);
3971 if (!list_empty(&conf->handle_list)) {
3972 sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
3974 if (list_empty(&conf->hold_list))
3975 conf->bypass_count = 0;
3976 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
3977 if (conf->hold_list.next == conf->last_hold)
3978 conf->bypass_count++;
3980 conf->last_hold = conf->hold_list.next;
3981 conf->bypass_count -= conf->bypass_threshold;
3982 if (conf->bypass_count < 0)
3983 conf->bypass_count = 0;
3986 } else if (!list_empty(&conf->hold_list) &&
3987 ((conf->bypass_threshold &&
3988 conf->bypass_count > conf->bypass_threshold) ||
3989 atomic_read(&conf->pending_full_writes) == 0)) {
3990 sh = list_entry(conf->hold_list.next,
3992 conf->bypass_count -= conf->bypass_threshold;
3993 if (conf->bypass_count < 0)
3994 conf->bypass_count = 0;
3998 list_del_init(&sh->lru);
3999 atomic_inc(&sh->count);
4000 BUG_ON(atomic_read(&sh->count) != 1);
4004 static void make_request(struct mddev *mddev, struct bio * bi)
4006 struct r5conf *conf = mddev->private;
4008 sector_t new_sector;
4009 sector_t logical_sector, last_sector;
4010 struct stripe_head *sh;
4011 const int rw = bio_data_dir(bi);
4014 if (unlikely(bi->bi_rw & REQ_FLUSH)) {
4015 md_flush_request(mddev, bi);
4019 md_write_start(mddev, bi);
4022 mddev->reshape_position == MaxSector &&
4023 chunk_aligned_read(mddev,bi))
4026 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4027 last_sector = bi->bi_sector + (bi->bi_size>>9);
4029 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
4031 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
4037 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
4038 if (unlikely(conf->reshape_progress != MaxSector)) {
4039 /* spinlock is needed as reshape_progress may be
4040 * 64bit on a 32bit platform, and so it might be
4041 * possible to see a half-updated value
4042 * Of course reshape_progress could change after
4043 * the lock is dropped, so once we get a reference
4044 * to the stripe that we think it is, we will have
4047 spin_lock_irq(&conf->device_lock);
4048 if (mddev->reshape_backwards
4049 ? logical_sector < conf->reshape_progress
4050 : logical_sector >= conf->reshape_progress) {
4053 if (mddev->reshape_backwards
4054 ? logical_sector < conf->reshape_safe
4055 : logical_sector >= conf->reshape_safe) {
4056 spin_unlock_irq(&conf->device_lock);
4061 spin_unlock_irq(&conf->device_lock);
4064 new_sector = raid5_compute_sector(conf, logical_sector,
4067 pr_debug("raid456: make_request, sector %llu logical %llu\n",
4068 (unsigned long long)new_sector,
4069 (unsigned long long)logical_sector);
4071 sh = get_active_stripe(conf, new_sector, previous,
4072 (bi->bi_rw&RWA_MASK), 0);
4074 if (unlikely(previous)) {
4075 /* expansion might have moved on while waiting for a
4076 * stripe, so we must do the range check again.
4077 * Expansion could still move past after this
4078 * test, but as we are holding a reference to
4079 * 'sh', we know that if that happens,
4080 * STRIPE_EXPANDING will get set and the expansion
4081 * won't proceed until we finish with the stripe.
4084 spin_lock_irq(&conf->device_lock);
4085 if (mddev->reshape_backwards
4086 ? logical_sector >= conf->reshape_progress
4087 : logical_sector < conf->reshape_progress)
4088 /* mismatch, need to try again */
4090 spin_unlock_irq(&conf->device_lock);
4099 logical_sector >= mddev->suspend_lo &&
4100 logical_sector < mddev->suspend_hi) {
4102 /* As the suspend_* range is controlled by
4103 * userspace, we want an interruptible
4106 flush_signals(current);
4107 prepare_to_wait(&conf->wait_for_overlap,
4108 &w, TASK_INTERRUPTIBLE);
4109 if (logical_sector >= mddev->suspend_lo &&
4110 logical_sector < mddev->suspend_hi)
4115 if (test_bit(STRIPE_EXPANDING, &sh->state) ||
4116 !add_stripe_bio(sh, bi, dd_idx, rw)) {
4117 /* Stripe is busy expanding or
4118 * add failed due to overlap. Flush everything
4121 md_wakeup_thread(mddev->thread);
4126 finish_wait(&conf->wait_for_overlap, &w);
4127 set_bit(STRIPE_HANDLE, &sh->state);
4128 clear_bit(STRIPE_DELAYED, &sh->state);
4129 if ((bi->bi_rw & REQ_SYNC) &&
4130 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4131 atomic_inc(&conf->preread_active_stripes);
4132 mddev_check_plugged(mddev);
4135 /* cannot get stripe for read-ahead, just give-up */
4136 clear_bit(BIO_UPTODATE, &bi->bi_flags);
4137 finish_wait(&conf->wait_for_overlap, &w);
4142 remaining = raid5_dec_bi_active_stripes(bi);
4143 if (remaining == 0) {
4146 md_write_end(mddev);
4152 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
4154 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
4156 /* reshaping is quite different to recovery/resync so it is
4157 * handled quite separately ... here.
4159 * On each call to sync_request, we gather one chunk worth of
4160 * destination stripes and flag them as expanding.
4161 * Then we find all the source stripes and request reads.
4162 * As the reads complete, handle_stripe will copy the data
4163 * into the destination stripe and release that stripe.
4165 struct r5conf *conf = mddev->private;
4166 struct stripe_head *sh;
4167 sector_t first_sector, last_sector;
4168 int raid_disks = conf->previous_raid_disks;
4169 int data_disks = raid_disks - conf->max_degraded;
4170 int new_data_disks = conf->raid_disks - conf->max_degraded;
4173 sector_t writepos, readpos, safepos;
4174 sector_t stripe_addr;
4175 int reshape_sectors;
4176 struct list_head stripes;
4178 if (sector_nr == 0) {
4179 /* If restarting in the middle, skip the initial sectors */
4180 if (mddev->reshape_backwards &&
4181 conf->reshape_progress < raid5_size(mddev, 0, 0)) {
4182 sector_nr = raid5_size(mddev, 0, 0)
4183 - conf->reshape_progress;
4184 } else if (!mddev->reshape_backwards &&
4185 conf->reshape_progress > 0)
4186 sector_nr = conf->reshape_progress;
4187 sector_div(sector_nr, new_data_disks);
4189 mddev->curr_resync_completed = sector_nr;
4190 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4196 /* We need to process a full chunk at a time.
4197 * If old and new chunk sizes differ, we need to process the
4200 if (mddev->new_chunk_sectors > mddev->chunk_sectors)
4201 reshape_sectors = mddev->new_chunk_sectors;
4203 reshape_sectors = mddev->chunk_sectors;
4205 /* We update the metadata at least every 10 seconds, or when
4206 * the data about to be copied would over-write the source of
4207 * the data at the front of the range. i.e. one new_stripe
4208 * along from reshape_progress new_maps to after where
4209 * reshape_safe old_maps to
4211 writepos = conf->reshape_progress;
4212 sector_div(writepos, new_data_disks);
4213 readpos = conf->reshape_progress;
4214 sector_div(readpos, data_disks);
4215 safepos = conf->reshape_safe;
4216 sector_div(safepos, data_disks);
4217 if (mddev->reshape_backwards) {
4218 writepos -= min_t(sector_t, reshape_sectors, writepos);
4219 readpos += reshape_sectors;
4220 safepos += reshape_sectors;
4222 writepos += reshape_sectors;
4223 readpos -= min_t(sector_t, reshape_sectors, readpos);
4224 safepos -= min_t(sector_t, reshape_sectors, safepos);
4227 /* Having calculated the 'writepos' possibly use it
4228 * to set 'stripe_addr' which is where we will write to.
4230 if (mddev->reshape_backwards) {
4231 BUG_ON(conf->reshape_progress == 0);
4232 stripe_addr = writepos;
4233 BUG_ON((mddev->dev_sectors &
4234 ~((sector_t)reshape_sectors - 1))
4235 - reshape_sectors - stripe_addr
4238 BUG_ON(writepos != sector_nr + reshape_sectors);
4239 stripe_addr = sector_nr;
4242 /* 'writepos' is the most advanced device address we might write.
4243 * 'readpos' is the least advanced device address we might read.
4244 * 'safepos' is the least address recorded in the metadata as having
4246 * If there is a min_offset_diff, these are adjusted either by
4247 * increasing the safepos/readpos if diff is negative, or
4248 * increasing writepos if diff is positive.
4249 * If 'readpos' is then behind 'writepos', there is no way that we can
4250 * ensure safety in the face of a crash - that must be done by userspace
4251 * making a backup of the data. So in that case there is no particular
4252 * rush to update metadata.
4253 * Otherwise if 'safepos' is behind 'writepos', then we really need to
4254 * update the metadata to advance 'safepos' to match 'readpos' so that
4255 * we can be safe in the event of a crash.
4256 * So we insist on updating metadata if safepos is behind writepos and
4257 * readpos is beyond writepos.
4258 * In any case, update the metadata every 10 seconds.
4259 * Maybe that number should be configurable, but I'm not sure it is
4260 * worth it.... maybe it could be a multiple of safemode_delay???
4262 if (conf->min_offset_diff < 0) {
4263 safepos += -conf->min_offset_diff;
4264 readpos += -conf->min_offset_diff;
4266 writepos += conf->min_offset_diff;
4268 if ((mddev->reshape_backwards
4269 ? (safepos > writepos && readpos < writepos)
4270 : (safepos < writepos && readpos > writepos)) ||
4271 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4272 /* Cannot proceed until we've updated the superblock... */
4273 wait_event(conf->wait_for_overlap,
4274 atomic_read(&conf->reshape_stripes)==0);
4275 mddev->reshape_position = conf->reshape_progress;
4276 mddev->curr_resync_completed = sector_nr;
4277 conf->reshape_checkpoint = jiffies;
4278 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4279 md_wakeup_thread(mddev->thread);
4280 wait_event(mddev->sb_wait, mddev->flags == 0 ||
4281 kthread_should_stop());
4282 spin_lock_irq(&conf->device_lock);
4283 conf->reshape_safe = mddev->reshape_position;
4284 spin_unlock_irq(&conf->device_lock);
4285 wake_up(&conf->wait_for_overlap);
4286 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4289 INIT_LIST_HEAD(&stripes);
4290 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
4292 int skipped_disk = 0;
4293 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
4294 set_bit(STRIPE_EXPANDING, &sh->state);
4295 atomic_inc(&conf->reshape_stripes);
4296 /* If any of this stripe is beyond the end of the old
4297 * array, then we need to zero those blocks
4299 for (j=sh->disks; j--;) {
4301 if (j == sh->pd_idx)
4303 if (conf->level == 6 &&
4306 s = compute_blocknr(sh, j, 0);
4307 if (s < raid5_size(mddev, 0, 0)) {
4311 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
4312 set_bit(R5_Expanded, &sh->dev[j].flags);
4313 set_bit(R5_UPTODATE, &sh->dev[j].flags);
4315 if (!skipped_disk) {
4316 set_bit(STRIPE_EXPAND_READY, &sh->state);
4317 set_bit(STRIPE_HANDLE, &sh->state);
4319 list_add(&sh->lru, &stripes);
4321 spin_lock_irq(&conf->device_lock);
4322 if (mddev->reshape_backwards)
4323 conf->reshape_progress -= reshape_sectors * new_data_disks;
4325 conf->reshape_progress += reshape_sectors * new_data_disks;
4326 spin_unlock_irq(&conf->device_lock);
4327 /* Ok, those stripe are ready. We can start scheduling
4328 * reads on the source stripes.
4329 * The source stripes are determined by mapping the first and last
4330 * block on the destination stripes.
4333 raid5_compute_sector(conf, stripe_addr*(new_data_disks),
4336 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
4337 * new_data_disks - 1),
4339 if (last_sector >= mddev->dev_sectors)
4340 last_sector = mddev->dev_sectors - 1;
4341 while (first_sector <= last_sector) {
4342 sh = get_active_stripe(conf, first_sector, 1, 0, 1);
4343 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
4344 set_bit(STRIPE_HANDLE, &sh->state);
4346 first_sector += STRIPE_SECTORS;
4348 /* Now that the sources are clearly marked, we can release
4349 * the destination stripes
4351 while (!list_empty(&stripes)) {
4352 sh = list_entry(stripes.next, struct stripe_head, lru);
4353 list_del_init(&sh->lru);
4356 /* If this takes us to the resync_max point where we have to pause,
4357 * then we need to write out the superblock.
4359 sector_nr += reshape_sectors;
4360 if ((sector_nr - mddev->curr_resync_completed) * 2
4361 >= mddev->resync_max - mddev->curr_resync_completed) {
4362 /* Cannot proceed until we've updated the superblock... */
4363 wait_event(conf->wait_for_overlap,
4364 atomic_read(&conf->reshape_stripes) == 0);
4365 mddev->reshape_position = conf->reshape_progress;
4366 mddev->curr_resync_completed = sector_nr;
4367 conf->reshape_checkpoint = jiffies;
4368 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4369 md_wakeup_thread(mddev->thread);
4370 wait_event(mddev->sb_wait,
4371 !test_bit(MD_CHANGE_DEVS, &mddev->flags)
4372 || kthread_should_stop());
4373 spin_lock_irq(&conf->device_lock);
4374 conf->reshape_safe = mddev->reshape_position;
4375 spin_unlock_irq(&conf->device_lock);
4376 wake_up(&conf->wait_for_overlap);
4377 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4379 return reshape_sectors;
4382 /* FIXME go_faster isn't used */
4383 static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
4385 struct r5conf *conf = mddev->private;
4386 struct stripe_head *sh;
4387 sector_t max_sector = mddev->dev_sectors;
4388 sector_t sync_blocks;
4389 int still_degraded = 0;
4392 if (sector_nr >= max_sector) {
4393 /* just being told to finish up .. nothing much to do */
4395 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
4400 if (mddev->curr_resync < max_sector) /* aborted */
4401 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
4403 else /* completed sync */
4405 bitmap_close_sync(mddev->bitmap);
4410 /* Allow raid5_quiesce to complete */
4411 wait_event(conf->wait_for_overlap, conf->quiesce != 2);
4413 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4414 return reshape_request(mddev, sector_nr, skipped);
4416 /* No need to check resync_max as we never do more than one
4417 * stripe, and as resync_max will always be on a chunk boundary,
4418 * if the check in md_do_sync didn't fire, there is no chance
4419 * of overstepping resync_max here
4422 /* if there is too many failed drives and we are trying
4423 * to resync, then assert that we are finished, because there is
4424 * nothing we can do.
4426 if (mddev->degraded >= conf->max_degraded &&
4427 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
4428 sector_t rv = mddev->dev_sectors - sector_nr;
4432 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
4433 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
4434 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
4435 /* we can skip this block, and probably more */
4436 sync_blocks /= STRIPE_SECTORS;
4438 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
4441 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
4443 sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
4445 sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
4446 /* make sure we don't swamp the stripe cache if someone else
4447 * is trying to get access
4449 schedule_timeout_uninterruptible(1);
4451 /* Need to check if array will still be degraded after recovery/resync
4452 * We don't need to check the 'failed' flag as when that gets set,
4455 for (i = 0; i < conf->raid_disks; i++)
4456 if (conf->disks[i].rdev == NULL)
4459 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
4461 set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
4466 return STRIPE_SECTORS;
4469 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
4471 /* We may not be able to submit a whole bio at once as there
4472 * may not be enough stripe_heads available.
4473 * We cannot pre-allocate enough stripe_heads as we may need
4474 * more than exist in the cache (if we allow ever large chunks).
4475 * So we do one stripe head at a time and record in
4476 * ->bi_hw_segments how many have been done.
4478 * We *know* that this entire raid_bio is in one chunk, so
4479 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4481 struct stripe_head *sh;
4483 sector_t sector, logical_sector, last_sector;
4488 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4489 sector = raid5_compute_sector(conf, logical_sector,
4491 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
4493 for (; logical_sector < last_sector;
4494 logical_sector += STRIPE_SECTORS,
4495 sector += STRIPE_SECTORS,
4498 if (scnt < raid5_bi_processed_stripes(raid_bio))
4499 /* already done this stripe */
4502 sh = get_active_stripe(conf, sector, 0, 1, 0);
4505 /* failed to get a stripe - must wait */
4506 raid5_set_bi_processed_stripes(raid_bio, scnt);
4507 conf->retry_read_aligned = raid_bio;
4511 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
4513 raid5_set_bi_processed_stripes(raid_bio, scnt);
4514 conf->retry_read_aligned = raid_bio;
4522 remaining = raid5_dec_bi_active_stripes(raid_bio);
4524 bio_endio(raid_bio, 0);
4525 if (atomic_dec_and_test(&conf->active_aligned_reads))
4526 wake_up(&conf->wait_for_stripe);
4532 * This is our raid5 kernel thread.
4534 * We scan the hash table for stripes which can be handled now.
4535 * During the scan, completed stripes are saved for us by the interrupt
4536 * handler, so that they will not have to wait for our next wakeup.
4538 static void raid5d(struct mddev *mddev)
4540 struct stripe_head *sh;
4541 struct r5conf *conf = mddev->private;
4543 struct blk_plug plug;
4545 pr_debug("+++ raid5d active\n");
4547 md_check_recovery(mddev);
4549 blk_start_plug(&plug);
4551 spin_lock_irq(&conf->device_lock);
4555 if (atomic_read(&mddev->plug_cnt) == 0 &&
4556 !list_empty(&conf->bitmap_list)) {
4557 /* Now is a good time to flush some bitmap updates */
4559 spin_unlock_irq(&conf->device_lock);
4560 bitmap_unplug(mddev->bitmap);
4561 spin_lock_irq(&conf->device_lock);
4562 conf->seq_write = conf->seq_flush;
4563 activate_bit_delay(conf);
4565 if (atomic_read(&mddev->plug_cnt) == 0)
4566 raid5_activate_delayed(conf);
4568 while ((bio = remove_bio_from_retry(conf))) {
4570 spin_unlock_irq(&conf->device_lock);
4571 ok = retry_aligned_read(conf, bio);
4572 spin_lock_irq(&conf->device_lock);
4578 sh = __get_priority_stripe(conf);
4582 spin_unlock_irq(&conf->device_lock);
4589 if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
4590 md_check_recovery(mddev);
4592 spin_lock_irq(&conf->device_lock);
4594 pr_debug("%d stripes handled\n", handled);
4596 spin_unlock_irq(&conf->device_lock);
4598 async_tx_issue_pending_all();
4599 blk_finish_plug(&plug);
4601 pr_debug("--- raid5d inactive\n");
4605 raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
4607 struct r5conf *conf = mddev->private;
4609 return sprintf(page, "%d\n", conf->max_nr_stripes);
4615 raid5_set_cache_size(struct mddev *mddev, int size)
4617 struct r5conf *conf = mddev->private;
4620 if (size <= 16 || size > 32768)
4622 while (size < conf->max_nr_stripes) {
4623 if (drop_one_stripe(conf))
4624 conf->max_nr_stripes--;
4628 err = md_allow_write(mddev);
4631 while (size > conf->max_nr_stripes) {
4632 if (grow_one_stripe(conf))
4633 conf->max_nr_stripes++;
4638 EXPORT_SYMBOL(raid5_set_cache_size);
4641 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
4643 struct r5conf *conf = mddev->private;
4647 if (len >= PAGE_SIZE)
4652 if (strict_strtoul(page, 10, &new))
4654 err = raid5_set_cache_size(mddev, new);
4660 static struct md_sysfs_entry
4661 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
4662 raid5_show_stripe_cache_size,
4663 raid5_store_stripe_cache_size);
4666 raid5_show_preread_threshold(struct mddev *mddev, char *page)
4668 struct r5conf *conf = mddev->private;
4670 return sprintf(page, "%d\n", conf->bypass_threshold);
4676 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
4678 struct r5conf *conf = mddev->private;
4680 if (len >= PAGE_SIZE)
4685 if (strict_strtoul(page, 10, &new))
4687 if (new > conf->max_nr_stripes)
4689 conf->bypass_threshold = new;
4693 static struct md_sysfs_entry
4694 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
4696 raid5_show_preread_threshold,
4697 raid5_store_preread_threshold);
4700 stripe_cache_active_show(struct mddev *mddev, char *page)
4702 struct r5conf *conf = mddev->private;
4704 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
4709 static struct md_sysfs_entry
4710 raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
4712 static struct attribute *raid5_attrs[] = {
4713 &raid5_stripecache_size.attr,
4714 &raid5_stripecache_active.attr,
4715 &raid5_preread_bypass_threshold.attr,
4718 static struct attribute_group raid5_attrs_group = {
4720 .attrs = raid5_attrs,
4724 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
4726 struct r5conf *conf = mddev->private;
4729 sectors = mddev->dev_sectors;
4731 /* size is defined by the smallest of previous and new size */
4732 raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
4734 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
4735 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
4736 return sectors * (raid_disks - conf->max_degraded);
4739 static void raid5_free_percpu(struct r5conf *conf)
4741 struct raid5_percpu *percpu;
4748 for_each_possible_cpu(cpu) {
4749 percpu = per_cpu_ptr(conf->percpu, cpu);
4750 safe_put_page(percpu->spare_page);
4751 kfree(percpu->scribble);
4753 #ifdef CONFIG_HOTPLUG_CPU
4754 unregister_cpu_notifier(&conf->cpu_notify);
4758 free_percpu(conf->percpu);
4761 static void free_conf(struct r5conf *conf)
4763 shrink_stripes(conf);
4764 raid5_free_percpu(conf);
4766 kfree(conf->stripe_hashtbl);
4770 #ifdef CONFIG_HOTPLUG_CPU
4771 static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
4774 struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify);
4775 long cpu = (long)hcpu;
4776 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
4779 case CPU_UP_PREPARE:
4780 case CPU_UP_PREPARE_FROZEN:
4781 if (conf->level == 6 && !percpu->spare_page)
4782 percpu->spare_page = alloc_page(GFP_KERNEL);
4783 if (!percpu->scribble)
4784 percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4786 if (!percpu->scribble ||
4787 (conf->level == 6 && !percpu->spare_page)) {
4788 safe_put_page(percpu->spare_page);
4789 kfree(percpu->scribble);
4790 pr_err("%s: failed memory allocation for cpu%ld\n",
4792 return notifier_from_errno(-ENOMEM);
4796 case CPU_DEAD_FROZEN:
4797 safe_put_page(percpu->spare_page);
4798 kfree(percpu->scribble);
4799 percpu->spare_page = NULL;
4800 percpu->scribble = NULL;
4809 static int raid5_alloc_percpu(struct r5conf *conf)
4812 struct page *spare_page;
4813 struct raid5_percpu __percpu *allcpus;
4817 allcpus = alloc_percpu(struct raid5_percpu);
4820 conf->percpu = allcpus;
4824 for_each_present_cpu(cpu) {
4825 if (conf->level == 6) {
4826 spare_page = alloc_page(GFP_KERNEL);
4831 per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
4833 scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4838 per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
4840 #ifdef CONFIG_HOTPLUG_CPU
4841 conf->cpu_notify.notifier_call = raid456_cpu_notify;
4842 conf->cpu_notify.priority = 0;
4844 err = register_cpu_notifier(&conf->cpu_notify);
4851 static struct r5conf *setup_conf(struct mddev *mddev)
4853 struct r5conf *conf;
4854 int raid_disk, memory, max_disks;
4855 struct md_rdev *rdev;
4856 struct disk_info *disk;
4859 if (mddev->new_level != 5
4860 && mddev->new_level != 4
4861 && mddev->new_level != 6) {
4862 printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
4863 mdname(mddev), mddev->new_level);
4864 return ERR_PTR(-EIO);
4866 if ((mddev->new_level == 5
4867 && !algorithm_valid_raid5(mddev->new_layout)) ||
4868 (mddev->new_level == 6
4869 && !algorithm_valid_raid6(mddev->new_layout))) {
4870 printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
4871 mdname(mddev), mddev->new_layout);
4872 return ERR_PTR(-EIO);
4874 if (mddev->new_level == 6 && mddev->raid_disks < 4) {
4875 printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
4876 mdname(mddev), mddev->raid_disks);
4877 return ERR_PTR(-EINVAL);
4880 if (!mddev->new_chunk_sectors ||
4881 (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
4882 !is_power_of_2(mddev->new_chunk_sectors)) {
4883 printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
4884 mdname(mddev), mddev->new_chunk_sectors << 9);
4885 return ERR_PTR(-EINVAL);
4888 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);
4891 spin_lock_init(&conf->device_lock);
4892 init_waitqueue_head(&conf->wait_for_stripe);
4893 init_waitqueue_head(&conf->wait_for_overlap);
4894 INIT_LIST_HEAD(&conf->handle_list);
4895 INIT_LIST_HEAD(&conf->hold_list);
4896 INIT_LIST_HEAD(&conf->delayed_list);
4897 INIT_LIST_HEAD(&conf->bitmap_list);
4898 INIT_LIST_HEAD(&conf->inactive_list);
4899 atomic_set(&conf->active_stripes, 0);
4900 atomic_set(&conf->preread_active_stripes, 0);
4901 atomic_set(&conf->active_aligned_reads, 0);
4902 conf->bypass_threshold = BYPASS_THRESHOLD;
4903 conf->recovery_disabled = mddev->recovery_disabled - 1;
4905 conf->raid_disks = mddev->raid_disks;
4906 if (mddev->reshape_position == MaxSector)
4907 conf->previous_raid_disks = mddev->raid_disks;
4909 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
4910 max_disks = max(conf->raid_disks, conf->previous_raid_disks);
4911 conf->scribble_len = scribble_len(max_disks);
4913 conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
4918 conf->mddev = mddev;
4920 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
4923 conf->level = mddev->new_level;
4924 if (raid5_alloc_percpu(conf) != 0)
4927 pr_debug("raid456: run(%s) called.\n", mdname(mddev));
4929 rdev_for_each(rdev, mddev) {
4930 raid_disk = rdev->raid_disk;
4931 if (raid_disk >= max_disks
4934 disk = conf->disks + raid_disk;
4936 if (test_bit(Replacement, &rdev->flags)) {
4937 if (disk->replacement)
4939 disk->replacement = rdev;
4946 if (test_bit(In_sync, &rdev->flags)) {
4947 char b[BDEVNAME_SIZE];
4948 printk(KERN_INFO "md/raid:%s: device %s operational as raid"
4950 mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
4951 } else if (rdev->saved_raid_disk != raid_disk)
4952 /* Cannot rely on bitmap to complete recovery */
4956 conf->chunk_sectors = mddev->new_chunk_sectors;
4957 conf->level = mddev->new_level;
4958 if (conf->level == 6)
4959 conf->max_degraded = 2;
4961 conf->max_degraded = 1;
4962 conf->algorithm = mddev->new_layout;
4963 conf->max_nr_stripes = NR_STRIPES;
4964 conf->reshape_progress = mddev->reshape_position;
4965 if (conf->reshape_progress != MaxSector) {
4966 conf->prev_chunk_sectors = mddev->chunk_sectors;
4967 conf->prev_algo = mddev->layout;
4970 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
4971 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
4972 if (grow_stripes(conf, conf->max_nr_stripes)) {
4974 "md/raid:%s: couldn't allocate %dkB for buffers\n",
4975 mdname(mddev), memory);
4978 printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
4979 mdname(mddev), memory);
4981 sprintf(pers_name, "raid%d", mddev->new_level);
4982 conf->thread = md_register_thread(raid5d, mddev, pers_name);
4983 if (!conf->thread) {
4985 "md/raid:%s: couldn't allocate thread.\n",
4995 return ERR_PTR(-EIO);
4997 return ERR_PTR(-ENOMEM);
5001 static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
5004 case ALGORITHM_PARITY_0:
5005 if (raid_disk < max_degraded)
5008 case ALGORITHM_PARITY_N:
5009 if (raid_disk >= raid_disks - max_degraded)
5012 case ALGORITHM_PARITY_0_6:
5013 if (raid_disk == 0 ||
5014 raid_disk == raid_disks - 1)
5017 case ALGORITHM_LEFT_ASYMMETRIC_6:
5018 case ALGORITHM_RIGHT_ASYMMETRIC_6:
5019 case ALGORITHM_LEFT_SYMMETRIC_6:
5020 case ALGORITHM_RIGHT_SYMMETRIC_6:
5021 if (raid_disk == raid_disks - 1)
5027 static int run(struct mddev *mddev)
5029 struct r5conf *conf;
5030 int working_disks = 0;
5031 int dirty_parity_disks = 0;
5032 struct md_rdev *rdev;
5033 sector_t reshape_offset = 0;
5035 long long min_offset_diff = 0;
5038 if (mddev->recovery_cp != MaxSector)
5039 printk(KERN_NOTICE "md/raid:%s: not clean"
5040 " -- starting background reconstruction\n",
5043 rdev_for_each(rdev, mddev) {
5045 if (rdev->raid_disk < 0)
5047 diff = (rdev->new_data_offset - rdev->data_offset);
5049 min_offset_diff = diff;
5051 } else if (mddev->reshape_backwards &&
5052 diff < min_offset_diff)
5053 min_offset_diff = diff;
5054 else if (!mddev->reshape_backwards &&
5055 diff > min_offset_diff)
5056 min_offset_diff = diff;
5059 if (mddev->reshape_position != MaxSector) {
5060 /* Check that we can continue the reshape.
5061 * Difficulties arise if the stripe we would write to
5062 * next is at or after the stripe we would read from next.
5063 * For a reshape that changes the number of devices, this
5064 * is only possible for a very short time, and mdadm makes
5065 * sure that time appears to have past before assembling
5066 * the array. So we fail if that time hasn't passed.
5067 * For a reshape that keeps the number of devices the same
5068 * mdadm must be monitoring the reshape can keeping the
5069 * critical areas read-only and backed up. It will start
5070 * the array in read-only mode, so we check for that.
5072 sector_t here_new, here_old;
5074 int max_degraded = (mddev->level == 6 ? 2 : 1);
5076 if (mddev->new_level != mddev->level) {
5077 printk(KERN_ERR "md/raid:%s: unsupported reshape "
5078 "required - aborting.\n",
5082 old_disks = mddev->raid_disks - mddev->delta_disks;
5083 /* reshape_position must be on a new-stripe boundary, and one
5084 * further up in new geometry must map after here in old
5087 here_new = mddev->reshape_position;
5088 if (sector_div(here_new, mddev->new_chunk_sectors *
5089 (mddev->raid_disks - max_degraded))) {
5090 printk(KERN_ERR "md/raid:%s: reshape_position not "
5091 "on a stripe boundary\n", mdname(mddev));
5094 reshape_offset = here_new * mddev->new_chunk_sectors;
5095 /* here_new is the stripe we will write to */
5096 here_old = mddev->reshape_position;
5097 sector_div(here_old, mddev->chunk_sectors *
5098 (old_disks-max_degraded));
5099 /* here_old is the first stripe that we might need to read
5101 if (mddev->delta_disks == 0) {
5102 if ((here_new * mddev->new_chunk_sectors !=
5103 here_old * mddev->chunk_sectors)) {
5104 printk(KERN_ERR "md/raid:%s: reshape position is"
5105 " confused - aborting\n", mdname(mddev));
5108 /* We cannot be sure it is safe to start an in-place
5109 * reshape. It is only safe if user-space is monitoring
5110 * and taking constant backups.
5111 * mdadm always starts a situation like this in
5112 * readonly mode so it can take control before
5113 * allowing any writes. So just check for that.
5115 if (abs(min_offset_diff) >= mddev->chunk_sectors &&
5116 abs(min_offset_diff) >= mddev->new_chunk_sectors)
5117 /* not really in-place - so OK */;
5118 else if (mddev->ro == 0) {
5119 printk(KERN_ERR "md/raid:%s: in-place reshape "
5120 "must be started in read-only mode "
5125 } else if (mddev->reshape_backwards
5126 ? (here_new * mddev->new_chunk_sectors + min_offset_diff <=
5127 here_old * mddev->chunk_sectors)
5128 : (here_new * mddev->new_chunk_sectors >=
5129 here_old * mddev->chunk_sectors + (-min_offset_diff))) {
5130 /* Reading from the same stripe as writing to - bad */
5131 printk(KERN_ERR "md/raid:%s: reshape_position too early for "
5132 "auto-recovery - aborting.\n",
5136 printk(KERN_INFO "md/raid:%s: reshape will continue\n",
5138 /* OK, we should be able to continue; */
5140 BUG_ON(mddev->level != mddev->new_level);
5141 BUG_ON(mddev->layout != mddev->new_layout);
5142 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
5143 BUG_ON(mddev->delta_disks != 0);
5146 if (mddev->private == NULL)
5147 conf = setup_conf(mddev);
5149 conf = mddev->private;
5152 return PTR_ERR(conf);
5154 conf->min_offset_diff = min_offset_diff;
5155 mddev->thread = conf->thread;
5156 conf->thread = NULL;
5157 mddev->private = conf;
5159 for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
5161 rdev = conf->disks[i].rdev;
5162 if (!rdev && conf->disks[i].replacement) {
5163 /* The replacement is all we have yet */
5164 rdev = conf->disks[i].replacement;
5165 conf->disks[i].replacement = NULL;
5166 clear_bit(Replacement, &rdev->flags);
5167 conf->disks[i].rdev = rdev;
5171 if (conf->disks[i].replacement &&
5172 conf->reshape_progress != MaxSector) {
5173 /* replacements and reshape simply do not mix. */
5174 printk(KERN_ERR "md: cannot handle concurrent "
5175 "replacement and reshape.\n");
5178 if (test_bit(In_sync, &rdev->flags)) {
5182 /* This disc is not fully in-sync. However if it
5183 * just stored parity (beyond the recovery_offset),
5184 * when we don't need to be concerned about the
5185 * array being dirty.
5186 * When reshape goes 'backwards', we never have
5187 * partially completed devices, so we only need
5188 * to worry about reshape going forwards.
5190 /* Hack because v0.91 doesn't store recovery_offset properly. */
5191 if (mddev->major_version == 0 &&
5192 mddev->minor_version > 90)
5193 rdev->recovery_offset = reshape_offset;
5195 if (rdev->recovery_offset < reshape_offset) {
5196 /* We need to check old and new layout */
5197 if (!only_parity(rdev->raid_disk,
5200 conf->max_degraded))
5203 if (!only_parity(rdev->raid_disk,
5205 conf->previous_raid_disks,
5206 conf->max_degraded))
5208 dirty_parity_disks++;
5212 * 0 for a fully functional array, 1 or 2 for a degraded array.
5214 mddev->degraded = calc_degraded(conf);
5216 if (has_failed(conf)) {
5217 printk(KERN_ERR "md/raid:%s: not enough operational devices"
5218 " (%d/%d failed)\n",
5219 mdname(mddev), mddev->degraded, conf->raid_disks);
5223 /* device size must be a multiple of chunk size */
5224 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
5225 mddev->resync_max_sectors = mddev->dev_sectors;
5227 if (mddev->degraded > dirty_parity_disks &&
5228 mddev->recovery_cp != MaxSector) {
5229 if (mddev->ok_start_degraded)
5231 "md/raid:%s: starting dirty degraded array"
5232 " - data corruption possible.\n",
5236 "md/raid:%s: cannot start dirty degraded array.\n",
5242 if (mddev->degraded == 0)
5243 printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
5244 " devices, algorithm %d\n", mdname(mddev), conf->level,
5245 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
5248 printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
5249 " out of %d devices, algorithm %d\n",
5250 mdname(mddev), conf->level,
5251 mddev->raid_disks - mddev->degraded,
5252 mddev->raid_disks, mddev->new_layout);
5254 print_raid5_conf(conf);
5256 if (conf->reshape_progress != MaxSector) {
5257 conf->reshape_safe = conf->reshape_progress;
5258 atomic_set(&conf->reshape_stripes, 0);
5259 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5260 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5261 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5262 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5263 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
5268 /* Ok, everything is just fine now */
5269 if (mddev->to_remove == &raid5_attrs_group)
5270 mddev->to_remove = NULL;
5271 else if (mddev->kobj.sd &&
5272 sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
5274 "raid5: failed to create sysfs attributes for %s\n",
5276 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5280 /* read-ahead size must cover two whole stripes, which
5281 * is 2 * (datadisks) * chunksize where 'n' is the
5282 * number of raid devices
5284 int data_disks = conf->previous_raid_disks - conf->max_degraded;
5285 int stripe = data_disks *
5286 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
5287 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
5288 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
5290 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
5292 mddev->queue->backing_dev_info.congested_data = mddev;
5293 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
5295 chunk_size = mddev->chunk_sectors << 9;
5296 blk_queue_io_min(mddev->queue, chunk_size);
5297 blk_queue_io_opt(mddev->queue, chunk_size *
5298 (conf->raid_disks - conf->max_degraded));
5300 rdev_for_each(rdev, mddev) {
5301 disk_stack_limits(mddev->gendisk, rdev->bdev,
5302 rdev->data_offset << 9);
5303 disk_stack_limits(mddev->gendisk, rdev->bdev,
5304 rdev->new_data_offset << 9);
5310 md_unregister_thread(&mddev->thread);
5311 print_raid5_conf(conf);
5313 mddev->private = NULL;
5314 printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
5318 static int stop(struct mddev *mddev)
5320 struct r5conf *conf = mddev->private;
5322 md_unregister_thread(&mddev->thread);
5324 mddev->queue->backing_dev_info.congested_fn = NULL;
5326 mddev->private = NULL;
5327 mddev->to_remove = &raid5_attrs_group;
5331 static void status(struct seq_file *seq, struct mddev *mddev)
5333 struct r5conf *conf = mddev->private;
5336 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
5337 mddev->chunk_sectors / 2, mddev->layout);
5338 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
5339 for (i = 0; i < conf->raid_disks; i++)
5340 seq_printf (seq, "%s",
5341 conf->disks[i].rdev &&
5342 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
5343 seq_printf (seq, "]");
5346 static void print_raid5_conf (struct r5conf *conf)
5349 struct disk_info *tmp;
5351 printk(KERN_DEBUG "RAID conf printout:\n");
5353 printk("(conf==NULL)\n");
5356 printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
5358 conf->raid_disks - conf->mddev->degraded);
5360 for (i = 0; i < conf->raid_disks; i++) {
5361 char b[BDEVNAME_SIZE];
5362 tmp = conf->disks + i;
5364 printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
5365 i, !test_bit(Faulty, &tmp->rdev->flags),
5366 bdevname(tmp->rdev->bdev, b));
5370 static int raid5_spare_active(struct mddev *mddev)
5373 struct r5conf *conf = mddev->private;
5374 struct disk_info *tmp;
5376 unsigned long flags;
5378 for (i = 0; i < conf->raid_disks; i++) {
5379 tmp = conf->disks + i;
5380 if (tmp->replacement
5381 && tmp->replacement->recovery_offset == MaxSector
5382 && !test_bit(Faulty, &tmp->replacement->flags)
5383 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
5384 /* Replacement has just become active. */
5386 || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
5389 /* Replaced device not technically faulty,
5390 * but we need to be sure it gets removed
5391 * and never re-added.
5393 set_bit(Faulty, &tmp->rdev->flags);
5394 sysfs_notify_dirent_safe(
5395 tmp->rdev->sysfs_state);
5397 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
5398 } else if (tmp->rdev
5399 && tmp->rdev->recovery_offset == MaxSector
5400 && !test_bit(Faulty, &tmp->rdev->flags)
5401 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
5403 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
5406 spin_lock_irqsave(&conf->device_lock, flags);
5407 mddev->degraded = calc_degraded(conf);
5408 spin_unlock_irqrestore(&conf->device_lock, flags);
5409 print_raid5_conf(conf);
5413 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
5415 struct r5conf *conf = mddev->private;
5417 int number = rdev->raid_disk;
5418 struct md_rdev **rdevp;
5419 struct disk_info *p = conf->disks + number;
5421 print_raid5_conf(conf);
5422 if (rdev == p->rdev)
5424 else if (rdev == p->replacement)
5425 rdevp = &p->replacement;
5429 if (number >= conf->raid_disks &&
5430 conf->reshape_progress == MaxSector)
5431 clear_bit(In_sync, &rdev->flags);
5433 if (test_bit(In_sync, &rdev->flags) ||
5434 atomic_read(&rdev->nr_pending)) {
5438 /* Only remove non-faulty devices if recovery
5441 if (!test_bit(Faulty, &rdev->flags) &&
5442 mddev->recovery_disabled != conf->recovery_disabled &&
5443 !has_failed(conf) &&
5444 (!p->replacement || p->replacement == rdev) &&
5445 number < conf->raid_disks) {
5451 if (atomic_read(&rdev->nr_pending)) {
5452 /* lost the race, try later */
5455 } else if (p->replacement) {
5456 /* We must have just cleared 'rdev' */
5457 p->rdev = p->replacement;
5458 clear_bit(Replacement, &p->replacement->flags);
5459 smp_mb(); /* Make sure other CPUs may see both as identical
5460 * but will never see neither - if they are careful
5462 p->replacement = NULL;
5463 clear_bit(WantReplacement, &rdev->flags);
5465 /* We might have just removed the Replacement as faulty-
5466 * clear the bit just in case
5468 clear_bit(WantReplacement, &rdev->flags);
5471 print_raid5_conf(conf);
5475 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
5477 struct r5conf *conf = mddev->private;
5480 struct disk_info *p;
5482 int last = conf->raid_disks - 1;
5484 if (mddev->recovery_disabled == conf->recovery_disabled)
5487 if (rdev->saved_raid_disk < 0 && has_failed(conf))
5488 /* no point adding a device */
5491 if (rdev->raid_disk >= 0)
5492 first = last = rdev->raid_disk;
5495 * find the disk ... but prefer rdev->saved_raid_disk
5498 if (rdev->saved_raid_disk >= 0 &&
5499 rdev->saved_raid_disk >= first &&
5500 conf->disks[rdev->saved_raid_disk].rdev == NULL)
5501 first = rdev->saved_raid_disk;
5503 for (disk = first; disk <= last; disk++) {
5504 p = conf->disks + disk;
5505 if (p->rdev == NULL) {
5506 clear_bit(In_sync, &rdev->flags);
5507 rdev->raid_disk = disk;
5509 if (rdev->saved_raid_disk != disk)
5511 rcu_assign_pointer(p->rdev, rdev);
5515 for (disk = first; disk <= last; disk++) {
5516 p = conf->disks + disk;
5517 if (test_bit(WantReplacement, &p->rdev->flags) &&
5518 p->replacement == NULL) {
5519 clear_bit(In_sync, &rdev->flags);
5520 set_bit(Replacement, &rdev->flags);
5521 rdev->raid_disk = disk;
5524 rcu_assign_pointer(p->replacement, rdev);
5529 print_raid5_conf(conf);
5533 static int raid5_resize(struct mddev *mddev, sector_t sectors)
5535 /* no resync is happening, and there is enough space
5536 * on all devices, so we can resize.
5537 * We need to make sure resync covers any new space.
5538 * If the array is shrinking we should possibly wait until
5539 * any io in the removed space completes, but it hardly seems
5543 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
5544 newsize = raid5_size(mddev, sectors, mddev->raid_disks);
5545 if (mddev->external_size &&
5546 mddev->array_sectors > newsize)
5548 if (mddev->bitmap) {
5549 int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0);
5553 md_set_array_sectors(mddev, newsize);
5554 set_capacity(mddev->gendisk, mddev->array_sectors);
5555 revalidate_disk(mddev->gendisk);
5556 if (sectors > mddev->dev_sectors &&
5557 mddev->recovery_cp > mddev->dev_sectors) {
5558 mddev->recovery_cp = mddev->dev_sectors;
5559 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5561 mddev->dev_sectors = sectors;
5562 mddev->resync_max_sectors = sectors;
5566 static int check_stripe_cache(struct mddev *mddev)
5568 /* Can only proceed if there are plenty of stripe_heads.
5569 * We need a minimum of one full stripe,, and for sensible progress
5570 * it is best to have about 4 times that.
5571 * If we require 4 times, then the default 256 4K stripe_heads will
5572 * allow for chunk sizes up to 256K, which is probably OK.
5573 * If the chunk size is greater, user-space should request more
5574 * stripe_heads first.
5576 struct r5conf *conf = mddev->private;
5577 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
5578 > conf->max_nr_stripes ||
5579 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
5580 > conf->max_nr_stripes) {
5581 printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
5583 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
5590 static int check_reshape(struct mddev *mddev)
5592 struct r5conf *conf = mddev->private;
5594 if (mddev->delta_disks == 0 &&
5595 mddev->new_layout == mddev->layout &&
5596 mddev->new_chunk_sectors == mddev->chunk_sectors)
5597 return 0; /* nothing to do */
5598 if (has_failed(conf))
5600 if (mddev->delta_disks < 0) {
5601 /* We might be able to shrink, but the devices must
5602 * be made bigger first.
5603 * For raid6, 4 is the minimum size.
5604 * Otherwise 2 is the minimum
5607 if (mddev->level == 6)
5609 if (mddev->raid_disks + mddev->delta_disks < min)
5613 if (!check_stripe_cache(mddev))
5616 return resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
5619 static int raid5_start_reshape(struct mddev *mddev)
5621 struct r5conf *conf = mddev->private;
5622 struct md_rdev *rdev;
5624 unsigned long flags;
5626 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5629 if (!check_stripe_cache(mddev))
5632 if (has_failed(conf))
5635 rdev_for_each(rdev, mddev) {
5636 if (!test_bit(In_sync, &rdev->flags)
5637 && !test_bit(Faulty, &rdev->flags))
5641 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
5642 /* Not enough devices even to make a degraded array
5647 /* Refuse to reduce size of the array. Any reductions in
5648 * array size must be through explicit setting of array_size
5651 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
5652 < mddev->array_sectors) {
5653 printk(KERN_ERR "md/raid:%s: array size must be reduced "
5654 "before number of disks\n", mdname(mddev));
5658 atomic_set(&conf->reshape_stripes, 0);
5659 spin_lock_irq(&conf->device_lock);
5660 conf->previous_raid_disks = conf->raid_disks;
5661 conf->raid_disks += mddev->delta_disks;
5662 conf->prev_chunk_sectors = conf->chunk_sectors;
5663 conf->chunk_sectors = mddev->new_chunk_sectors;
5664 conf->prev_algo = conf->algorithm;
5665 conf->algorithm = mddev->new_layout;
5667 /* Code that selects data_offset needs to see the generation update
5668 * if reshape_progress has been set - so a memory barrier needed.
5671 if (mddev->reshape_backwards)
5672 conf->reshape_progress = raid5_size(mddev, 0, 0);
5674 conf->reshape_progress = 0;
5675 conf->reshape_safe = conf->reshape_progress;
5676 spin_unlock_irq(&conf->device_lock);
5678 /* Add some new drives, as many as will fit.
5679 * We know there are enough to make the newly sized array work.
5680 * Don't add devices if we are reducing the number of
5681 * devices in the array. This is because it is not possible
5682 * to correctly record the "partially reconstructed" state of
5683 * such devices during the reshape and confusion could result.
5685 if (mddev->delta_disks >= 0) {
5686 rdev_for_each(rdev, mddev)
5687 if (rdev->raid_disk < 0 &&
5688 !test_bit(Faulty, &rdev->flags)) {
5689 if (raid5_add_disk(mddev, rdev) == 0) {
5691 >= conf->previous_raid_disks)
5692 set_bit(In_sync, &rdev->flags);
5694 rdev->recovery_offset = 0;
5696 if (sysfs_link_rdev(mddev, rdev))
5697 /* Failure here is OK */;
5699 } else if (rdev->raid_disk >= conf->previous_raid_disks
5700 && !test_bit(Faulty, &rdev->flags)) {
5701 /* This is a spare that was manually added */
5702 set_bit(In_sync, &rdev->flags);
5705 /* When a reshape changes the number of devices,
5706 * ->degraded is measured against the larger of the
5707 * pre and post number of devices.
5709 spin_lock_irqsave(&conf->device_lock, flags);
5710 mddev->degraded = calc_degraded(conf);
5711 spin_unlock_irqrestore(&conf->device_lock, flags);
5713 mddev->raid_disks = conf->raid_disks;
5714 mddev->reshape_position = conf->reshape_progress;
5715 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5717 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5718 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5719 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5720 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5721 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
5723 if (!mddev->sync_thread) {
5724 mddev->recovery = 0;
5725 spin_lock_irq(&conf->device_lock);
5726 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
5727 rdev_for_each(rdev, mddev)
5728 rdev->new_data_offset = rdev->data_offset;
5730 conf->reshape_progress = MaxSector;
5731 mddev->reshape_position = MaxSector;
5732 spin_unlock_irq(&conf->device_lock);
5735 conf->reshape_checkpoint = jiffies;
5736 md_wakeup_thread(mddev->sync_thread);
5737 md_new_event(mddev);
5741 /* This is called from the reshape thread and should make any
5742 * changes needed in 'conf'
5744 static void end_reshape(struct r5conf *conf)
5747 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
5748 struct md_rdev *rdev;
5750 spin_lock_irq(&conf->device_lock);
5751 conf->previous_raid_disks = conf->raid_disks;
5752 rdev_for_each(rdev, conf->mddev)
5753 rdev->data_offset = rdev->new_data_offset;
5755 conf->reshape_progress = MaxSector;
5756 spin_unlock_irq(&conf->device_lock);
5757 wake_up(&conf->wait_for_overlap);
5759 /* read-ahead size must cover two whole stripes, which is
5760 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5762 if (conf->mddev->queue) {
5763 int data_disks = conf->raid_disks - conf->max_degraded;
5764 int stripe = data_disks * ((conf->chunk_sectors << 9)
5766 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
5767 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
5772 /* This is called from the raid5d thread with mddev_lock held.
5773 * It makes config changes to the device.
5775 static void raid5_finish_reshape(struct mddev *mddev)
5777 struct r5conf *conf = mddev->private;
5779 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5781 if (mddev->delta_disks > 0) {
5782 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5783 set_capacity(mddev->gendisk, mddev->array_sectors);
5784 revalidate_disk(mddev->gendisk);
5787 spin_lock_irq(&conf->device_lock);
5788 mddev->degraded = calc_degraded(conf);
5789 spin_unlock_irq(&conf->device_lock);
5790 for (d = conf->raid_disks ;
5791 d < conf->raid_disks - mddev->delta_disks;
5793 struct md_rdev *rdev = conf->disks[d].rdev;
5795 clear_bit(In_sync, &rdev->flags);
5796 rdev = conf->disks[d].replacement;
5798 clear_bit(In_sync, &rdev->flags);
5801 mddev->layout = conf->algorithm;
5802 mddev->chunk_sectors = conf->chunk_sectors;
5803 mddev->reshape_position = MaxSector;
5804 mddev->delta_disks = 0;
5805 mddev->reshape_backwards = 0;
5809 static void raid5_quiesce(struct mddev *mddev, int state)
5811 struct r5conf *conf = mddev->private;
5814 case 2: /* resume for a suspend */
5815 wake_up(&conf->wait_for_overlap);
5818 case 1: /* stop all writes */
5819 spin_lock_irq(&conf->device_lock);
5820 /* '2' tells resync/reshape to pause so that all
5821 * active stripes can drain
5824 wait_event_lock_irq(conf->wait_for_stripe,
5825 atomic_read(&conf->active_stripes) == 0 &&
5826 atomic_read(&conf->active_aligned_reads) == 0,
5827 conf->device_lock, /* nothing */);
5829 spin_unlock_irq(&conf->device_lock);
5830 /* allow reshape to continue */
5831 wake_up(&conf->wait_for_overlap);
5834 case 0: /* re-enable writes */
5835 spin_lock_irq(&conf->device_lock);
5837 wake_up(&conf->wait_for_stripe);
5838 wake_up(&conf->wait_for_overlap);
5839 spin_unlock_irq(&conf->device_lock);
5845 static void *raid45_takeover_raid0(struct mddev *mddev, int level)
5847 struct r0conf *raid0_conf = mddev->private;
5850 /* for raid0 takeover only one zone is supported */
5851 if (raid0_conf->nr_strip_zones > 1) {
5852 printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
5854 return ERR_PTR(-EINVAL);
5857 sectors = raid0_conf->strip_zone[0].zone_end;
5858 sector_div(sectors, raid0_conf->strip_zone[0].nb_dev);
5859 mddev->dev_sectors = sectors;
5860 mddev->new_level = level;
5861 mddev->new_layout = ALGORITHM_PARITY_N;
5862 mddev->new_chunk_sectors = mddev->chunk_sectors;
5863 mddev->raid_disks += 1;
5864 mddev->delta_disks = 1;
5865 /* make sure it will be not marked as dirty */
5866 mddev->recovery_cp = MaxSector;
5868 return setup_conf(mddev);
5872 static void *raid5_takeover_raid1(struct mddev *mddev)
5876 if (mddev->raid_disks != 2 ||
5877 mddev->degraded > 1)
5878 return ERR_PTR(-EINVAL);
5880 /* Should check if there are write-behind devices? */
5882 chunksect = 64*2; /* 64K by default */
5884 /* The array must be an exact multiple of chunksize */
5885 while (chunksect && (mddev->array_sectors & (chunksect-1)))
5888 if ((chunksect<<9) < STRIPE_SIZE)
5889 /* array size does not allow a suitable chunk size */
5890 return ERR_PTR(-EINVAL);
5892 mddev->new_level = 5;
5893 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
5894 mddev->new_chunk_sectors = chunksect;
5896 return setup_conf(mddev);
5899 static void *raid5_takeover_raid6(struct mddev *mddev)
5903 switch (mddev->layout) {
5904 case ALGORITHM_LEFT_ASYMMETRIC_6:
5905 new_layout = ALGORITHM_LEFT_ASYMMETRIC;
5907 case ALGORITHM_RIGHT_ASYMMETRIC_6:
5908 new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
5910 case ALGORITHM_LEFT_SYMMETRIC_6:
5911 new_layout = ALGORITHM_LEFT_SYMMETRIC;
5913 case ALGORITHM_RIGHT_SYMMETRIC_6:
5914 new_layout = ALGORITHM_RIGHT_SYMMETRIC;
5916 case ALGORITHM_PARITY_0_6:
5917 new_layout = ALGORITHM_PARITY_0;
5919 case ALGORITHM_PARITY_N:
5920 new_layout = ALGORITHM_PARITY_N;
5923 return ERR_PTR(-EINVAL);
5925 mddev->new_level = 5;
5926 mddev->new_layout = new_layout;
5927 mddev->delta_disks = -1;
5928 mddev->raid_disks -= 1;
5929 return setup_conf(mddev);
5933 static int raid5_check_reshape(struct mddev *mddev)
5935 /* For a 2-drive array, the layout and chunk size can be changed
5936 * immediately as not restriping is needed.
5937 * For larger arrays we record the new value - after validation
5938 * to be used by a reshape pass.
5940 struct r5conf *conf = mddev->private;
5941 int new_chunk = mddev->new_chunk_sectors;
5943 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
5945 if (new_chunk > 0) {
5946 if (!is_power_of_2(new_chunk))
5948 if (new_chunk < (PAGE_SIZE>>9))
5950 if (mddev->array_sectors & (new_chunk-1))
5951 /* not factor of array size */
5955 /* They look valid */
5957 if (mddev->raid_disks == 2) {
5958 /* can make the change immediately */
5959 if (mddev->new_layout >= 0) {
5960 conf->algorithm = mddev->new_layout;
5961 mddev->layout = mddev->new_layout;
5963 if (new_chunk > 0) {
5964 conf->chunk_sectors = new_chunk ;
5965 mddev->chunk_sectors = new_chunk;
5967 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5968 md_wakeup_thread(mddev->thread);
5970 return check_reshape(mddev);
5973 static int raid6_check_reshape(struct mddev *mddev)
5975 int new_chunk = mddev->new_chunk_sectors;
5977 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
5979 if (new_chunk > 0) {
5980 if (!is_power_of_2(new_chunk))
5982 if (new_chunk < (PAGE_SIZE >> 9))
5984 if (mddev->array_sectors & (new_chunk-1))
5985 /* not factor of array size */
5989 /* They look valid */
5990 return check_reshape(mddev);
5993 static void *raid5_takeover(struct mddev *mddev)
5995 /* raid5 can take over:
5996 * raid0 - if there is only one strip zone - make it a raid4 layout
5997 * raid1 - if there are two drives. We need to know the chunk size
5998 * raid4 - trivial - just use a raid4 layout.
5999 * raid6 - Providing it is a *_6 layout
6001 if (mddev->level == 0)
6002 return raid45_takeover_raid0(mddev, 5);
6003 if (mddev->level == 1)
6004 return raid5_takeover_raid1(mddev);
6005 if (mddev->level == 4) {
6006 mddev->new_layout = ALGORITHM_PARITY_N;
6007 mddev->new_level = 5;
6008 return setup_conf(mddev);
6010 if (mddev->level == 6)
6011 return raid5_takeover_raid6(mddev);
6013 return ERR_PTR(-EINVAL);
6016 static void *raid4_takeover(struct mddev *mddev)
6018 /* raid4 can take over:
6019 * raid0 - if there is only one strip zone
6020 * raid5 - if layout is right
6022 if (mddev->level == 0)
6023 return raid45_takeover_raid0(mddev, 4);
6024 if (mddev->level == 5 &&
6025 mddev->layout == ALGORITHM_PARITY_N) {
6026 mddev->new_layout = 0;
6027 mddev->new_level = 4;
6028 return setup_conf(mddev);
6030 return ERR_PTR(-EINVAL);
6033 static struct md_personality raid5_personality;
6035 static void *raid6_takeover(struct mddev *mddev)
6037 /* Currently can only take over a raid5. We map the
6038 * personality to an equivalent raid6 personality
6039 * with the Q block at the end.
6043 if (mddev->pers != &raid5_personality)
6044 return ERR_PTR(-EINVAL);
6045 if (mddev->degraded > 1)
6046 return ERR_PTR(-EINVAL);
6047 if (mddev->raid_disks > 253)
6048 return ERR_PTR(-EINVAL);
6049 if (mddev->raid_disks < 3)
6050 return ERR_PTR(-EINVAL);
6052 switch (mddev->layout) {
6053 case ALGORITHM_LEFT_ASYMMETRIC:
6054 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
6056 case ALGORITHM_RIGHT_ASYMMETRIC:
6057 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
6059 case ALGORITHM_LEFT_SYMMETRIC:
6060 new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
6062 case ALGORITHM_RIGHT_SYMMETRIC:
6063 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
6065 case ALGORITHM_PARITY_0:
6066 new_layout = ALGORITHM_PARITY_0_6;
6068 case ALGORITHM_PARITY_N:
6069 new_layout = ALGORITHM_PARITY_N;
6072 return ERR_PTR(-EINVAL);
6074 mddev->new_level = 6;
6075 mddev->new_layout = new_layout;
6076 mddev->delta_disks = 1;
6077 mddev->raid_disks += 1;
6078 return setup_conf(mddev);
6082 static struct md_personality raid6_personality =
6086 .owner = THIS_MODULE,
6087 .make_request = make_request,
6091 .error_handler = error,
6092 .hot_add_disk = raid5_add_disk,
6093 .hot_remove_disk= raid5_remove_disk,
6094 .spare_active = raid5_spare_active,
6095 .sync_request = sync_request,
6096 .resize = raid5_resize,
6098 .check_reshape = raid6_check_reshape,
6099 .start_reshape = raid5_start_reshape,
6100 .finish_reshape = raid5_finish_reshape,
6101 .quiesce = raid5_quiesce,
6102 .takeover = raid6_takeover,
6104 static struct md_personality raid5_personality =
6108 .owner = THIS_MODULE,
6109 .make_request = make_request,
6113 .error_handler = error,
6114 .hot_add_disk = raid5_add_disk,
6115 .hot_remove_disk= raid5_remove_disk,
6116 .spare_active = raid5_spare_active,
6117 .sync_request = sync_request,
6118 .resize = raid5_resize,
6120 .check_reshape = raid5_check_reshape,
6121 .start_reshape = raid5_start_reshape,
6122 .finish_reshape = raid5_finish_reshape,
6123 .quiesce = raid5_quiesce,
6124 .takeover = raid5_takeover,
6127 static struct md_personality raid4_personality =
6131 .owner = THIS_MODULE,
6132 .make_request = make_request,
6136 .error_handler = error,
6137 .hot_add_disk = raid5_add_disk,
6138 .hot_remove_disk= raid5_remove_disk,
6139 .spare_active = raid5_spare_active,
6140 .sync_request = sync_request,
6141 .resize = raid5_resize,
6143 .check_reshape = raid5_check_reshape,
6144 .start_reshape = raid5_start_reshape,
6145 .finish_reshape = raid5_finish_reshape,
6146 .quiesce = raid5_quiesce,
6147 .takeover = raid4_takeover,
6150 static int __init raid5_init(void)
6152 register_md_personality(&raid6_personality);
6153 register_md_personality(&raid5_personality);
6154 register_md_personality(&raid4_personality);
6158 static void raid5_exit(void)
6160 unregister_md_personality(&raid6_personality);
6161 unregister_md_personality(&raid5_personality);
6162 unregister_md_personality(&raid4_personality);
6165 module_init(raid5_init);
6166 module_exit(raid5_exit);
6167 MODULE_LICENSE("GPL");
6168 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
6169 MODULE_ALIAS("md-personality-4"); /* RAID5 */
6170 MODULE_ALIAS("md-raid5");
6171 MODULE_ALIAS("md-raid4");
6172 MODULE_ALIAS("md-level-5");
6173 MODULE_ALIAS("md-level-4");
6174 MODULE_ALIAS("md-personality-8"); /* RAID6 */
6175 MODULE_ALIAS("md-raid6");
6176 MODULE_ALIAS("md-level-6");
6178 /* This used to be two separate modules, they were: */
6179 MODULE_ALIAS("raid5");
6180 MODULE_ALIAS("raid6");