]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
md/raid5: use md_write_start to count stripes, not bios
authorNeilBrown <neilb@suse.com>
Wed, 15 Mar 2017 03:05:12 +0000 (14:05 +1100)
committerShaohua Li <shli@fb.com>
Thu, 23 Mar 2017 02:15:42 +0000 (19:15 -0700)
We use md_write_start() to increase the count of pending writes, and
md_write_end() to decrement the count.  We currently count bios
submitted to md/raid5.  Change it count stripe_heads that a WRITE bio
has been attached to.

So now, raid5_make_request() calls md_write_start() and then
md_write_end() to keep the count elevated during the setup of the
request.

add_stripe_bio() calls md_write_start() for each stripe_head, and the
completion routines always call md_write_end(), instead of only
calling it when raid5_dec_bi_active_stripes() returns 0.
make_discard_request also calls md_write_start/end().

The parallel between md_write_{start,end} and use of bi_phys_segments
can be seen in that:
 Whenever we set bi_phys_segments to 1, we now call md_write_start.
 Whenever we increment it on non-read requests with
   raid5_inc_bi_active_stripes(), we now call md_write_start().
 Whenever we decrement bi_phys_segments on non-read requsts with
    raid5_dec_bi_active_stripes(), we now call md_write_end().

This reduces our dependence on keeping a per-bio count of active
stripes in bi_phys_segments.

md_write_inc() is added which parallels md_write_start(), but requires
that a write has already been started, and is certain never to sleep.
This can be used inside a spinlocked region when adding to a write
request.

Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
drivers/md/md.c
drivers/md/md.h
drivers/md/raid5-cache.c
drivers/md/raid5.c

index 42e68b2e0b41dea1fdd46cfd36b1f2d55ca59ab3..41f766ab824af1bf7e1427921aebc32ac625167b 100644 (file)
@@ -7907,6 +7907,23 @@ void md_write_start(struct mddev *mddev, struct bio *bi)
 }
 EXPORT_SYMBOL(md_write_start);
 
+/* md_write_inc can only be called when md_write_start() has
+ * already been called at least once of the current request.
+ * It increments the counter and is useful when a single request
+ * is split into several parts.  Each part causes an increment and
+ * so needs a matching md_write_end().
+ * Unlike md_write_start(), it is safe to call md_write_inc() inside
+ * a spinlocked region.
+ */
+void md_write_inc(struct mddev *mddev, struct bio *bi)
+{
+       if (bio_data_dir(bi) != WRITE)
+               return;
+       WARN_ON_ONCE(mddev->in_sync || mddev->ro);
+       atomic_inc(&mddev->writes_pending);
+}
+EXPORT_SYMBOL(md_write_inc);
+
 void md_write_end(struct mddev *mddev)
 {
        if (atomic_dec_and_test(&mddev->writes_pending)) {
index e0940064c3ec1c4778db2f726e80f7a4975088e6..0cd12721a53659fa2128c79e0cadc52fc32e147c 100644 (file)
@@ -648,6 +648,7 @@ extern void md_wakeup_thread(struct md_thread *thread);
 extern void md_check_recovery(struct mddev *mddev);
 extern void md_reap_sync_thread(struct mddev *mddev);
 extern void md_write_start(struct mddev *mddev, struct bio *bi);
+extern void md_write_inc(struct mddev *mddev, struct bio *bi);
 extern void md_write_end(struct mddev *mddev);
 extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
 extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
index 64493132470b527683771ba2d5d3dcd2d2ea606e..f5034ecb4e942f0efcb218b0ad0b30e308e22f5e 100644 (file)
@@ -318,8 +318,8 @@ r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev,
        while (wbi && wbi->bi_iter.bi_sector <
               dev->sector + STRIPE_SECTORS) {
                wbi2 = r5_next_bio(wbi, dev->sector);
+               md_write_end(conf->mddev);
                if (!raid5_dec_bi_active_stripes(wbi)) {
-                       md_write_end(conf->mddev);
                        bio_list_add(return_bi, wbi);
                }
                wbi = wbi2;
index 88cc8981bd4991ea4d65e4219e1a2c55ce8b5c9f..a684003fc9651bf6453234202049d4a5b307af8e 100644 (file)
@@ -3274,6 +3274,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
                bi->bi_next = *bip;
        *bip = bi;
        raid5_inc_bi_active_stripes(bi);
+       md_write_inc(conf->mddev, bi);
 
        if (forwrite) {
                /* check if page is covered */
@@ -3397,10 +3398,9 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                        struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
 
                        bi->bi_error = -EIO;
-                       if (!raid5_dec_bi_active_stripes(bi)) {
-                               md_write_end(conf->mddev);
+                       md_write_end(conf->mddev);
+                       if (!raid5_dec_bi_active_stripes(bi))
                                bio_list_add(return_bi, bi);
-                       }
                        bi = nextbi;
                }
                if (bitmap_end)
@@ -3421,10 +3421,9 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                        struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
 
                        bi->bi_error = -EIO;
-                       if (!raid5_dec_bi_active_stripes(bi)) {
-                               md_write_end(conf->mddev);
+                       md_write_end(conf->mddev);
+                       if (!raid5_dec_bi_active_stripes(bi))
                                bio_list_add(return_bi, bi);
-                       }
                        bi = bi2;
                }
 
@@ -3781,10 +3780,9 @@ returnbi:
                                while (wbi && wbi->bi_iter.bi_sector <
                                        dev->sector + STRIPE_SECTORS) {
                                        wbi2 = r5_next_bio(wbi, dev->sector);
-                                       if (!raid5_dec_bi_active_stripes(wbi)) {
-                                               md_write_end(conf->mddev);
+                                       md_write_end(conf->mddev);
+                                       if (!raid5_dec_bi_active_stripes(wbi))
                                                bio_list_add(return_bi, wbi);
-                                       }
                                        wbi = wbi2;
                                }
                                bitmap_endwrite(conf->mddev->bitmap, sh->sector,
@@ -5487,6 +5485,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
 
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
+       md_write_start(mddev, bi);
 
        stripe_sectors = conf->chunk_sectors *
                (conf->raid_disks - conf->max_degraded);
@@ -5533,6 +5532,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
                        sh->dev[d].towrite = bi;
                        set_bit(R5_OVERWRITE, &sh->dev[d].flags);
                        raid5_inc_bi_active_stripes(bi);
+                       md_write_inc(mddev, bi);
                        sh->overwrite_disks++;
                }
                spin_unlock_irq(&sh->stripe_lock);
@@ -5555,9 +5555,9 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
                release_stripe_plug(mddev, sh);
        }
 
+       md_write_end(mddev);
        remaining = raid5_dec_bi_active_stripes(bi);
        if (remaining == 0) {
-               md_write_end(mddev);
                bio_endio(bi);
        }
 }
@@ -5592,8 +5592,6 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
                do_flush = bi->bi_opf & REQ_PREFLUSH;
        }
 
-       md_write_start(mddev, bi);
-
        /*
         * If array is degraded, better not do chunk aligned read because
         * later we might have to read it again in order to reconstruct
@@ -5615,6 +5613,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
        last_sector = bio_end_sector(bi);
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
+       md_write_start(mddev, bi);
 
        prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
        for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
@@ -5749,11 +5748,11 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
        }
        finish_wait(&conf->wait_for_overlap, &w);
 
+       if (rw == WRITE)
+               md_write_end(mddev);
        remaining = raid5_dec_bi_active_stripes(bi);
        if (remaining == 0) {
 
-               if ( rw == WRITE )
-                       md_write_end(mddev);
 
                trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
                                         bi, 0);