]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
md/raid10: stop using bi_phys_segments
authorNeilBrown <neilb@suse.com>
Wed, 15 Mar 2017 03:05:13 +0000 (14:05 +1100)
committerShaohua Li <shli@fb.com>
Thu, 23 Mar 2017 02:17:41 +0000 (19:17 -0700)
raid10 currently repurposes bi_phys_segments on each
incoming bio to count how many r10bio was used to encode the
request.

We need to know when the number of attached r10bio reaches
zero to:
1/ call bio_endio() when all IO on the bio is finished
2/ decrement ->nr_pending so that resync IO can proceed.

Now that the bio has its own __bi_remaining counter, that
can be used instead. We can call bio_inc_remaining to
increment the counter and call bio_endio() every time an
r10bio completes, rather than only when bi_phys_segments
reaches zero.

This addresses point 1, but not point 2.  bio_endio()
doesn't (and cannot) report when the last r10bio has
finished, so a different approach is needed.

So: instead of counting bios in ->nr_pending, count r10bios.
i.e. every time we attach a bio, increment nr_pending.
Every time an r10bio completes, decrement nr_pending.

Normally we only increment nr_pending after first checking
that ->barrier is zero, or some other non-trivial tests and
possible waiting.  When attaching multiple r10bios to a bio,
we only need the tests and the waiting once.  After the
first increment, subsequent increments can happen
unconditionally as they are really all part of the one
request.

So introduce inc_pending() which can be used when we know
that nr_pending is already elevated.

Note that this fixes a bug.  freeze_array() contains the line
atomic_read(&conf->nr_pending) == conf->nr_queued+extra,
which implies that the units for ->nr_pending, ->nr_queued and extra
are the same.
->nr_queue and extra count r10_bios, but prior to this patch,
->nr_pending counted bios.  If a bio ever resulted in multiple
r10_bios (due to bad blocks), freeze_array() would not work correctly.
Now it does.

Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
drivers/md/raid10.c

index c7c5b2693fc96ac2033524c067648c470bede1e9..0f1b78b386491e3cbddd7bd5cbd3ff8a841dbf02 100644 (file)
@@ -301,27 +301,18 @@ static void reschedule_retry(struct r10bio *r10_bio)
 static void raid_end_bio_io(struct r10bio *r10_bio)
 {
        struct bio *bio = r10_bio->master_bio;
-       int done;
        struct r10conf *conf = r10_bio->mddev->private;
 
-       if (bio->bi_phys_segments) {
-               unsigned long flags;
-               spin_lock_irqsave(&conf->device_lock, flags);
-               bio->bi_phys_segments--;
-               done = (bio->bi_phys_segments == 0);
-               spin_unlock_irqrestore(&conf->device_lock, flags);
-       } else
-               done = 1;
        if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
                bio->bi_error = -EIO;
-       if (done) {
-               bio_endio(bio);
-               /*
-                * Wake up any possible resync thread that waits for the device
-                * to go idle.
-                */
-               allow_barrier(conf);
-       }
+
+       bio_endio(bio);
+       /*
+        * Wake up any possible resync thread that waits for the device
+        * to go idle.
+        */
+       allow_barrier(conf);
+
        free_r10bio(r10_bio);
 }
 
@@ -985,6 +976,15 @@ static void wait_barrier(struct r10conf *conf)
        spin_unlock_irq(&conf->resync_lock);
 }
 
+static void inc_pending(struct r10conf *conf)
+{
+       /* The current request requires multiple r10_bio, so
+        * we need to increment the pending count.
+        */
+       WARN_ON(!atomic_read(&conf->nr_pending));
+       atomic_inc(&conf->nr_pending);
+}
+
 static void allow_barrier(struct r10conf *conf)
 {
        if ((atomic_dec_and_test(&conf->nr_pending)) ||
@@ -1162,12 +1162,8 @@ read_again:
                sectors_handled = (r10_bio->sector + max_sectors
                                   - bio->bi_iter.bi_sector);
                r10_bio->sectors = max_sectors;
-               spin_lock_irq(&conf->device_lock);
-               if (bio->bi_phys_segments == 0)
-                       bio->bi_phys_segments = 2;
-               else
-                       bio->bi_phys_segments++;
-               spin_unlock_irq(&conf->device_lock);
+               inc_pending(conf);
+               bio_inc_remaining(bio);
                /*
                 * Cannot call generic_make_request directly as that will be
                 * queued in __generic_make_request and subsequent
@@ -1262,9 +1258,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
         * on which we have seen a write error, we want to avoid
         * writing to those blocks.  This potentially requires several
         * writes to write around the bad blocks.  Each set of writes
-        * gets its own r10_bio with a set of bios attached.  The number
-        * of r10_bios is recored in bio->bi_phys_segments just as with
-        * the read case.
+        * gets its own r10_bio with a set of bios attached.
         */
 
        r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
@@ -1495,15 +1489,9 @@ retry_write:
         */
 
        if (sectors_handled < bio_sectors(bio)) {
-               /* We need another r10_bio and it needs to be counted
-                * in bio->bi_phys_segments.
-                */
-               spin_lock_irq(&conf->device_lock);
-               if (bio->bi_phys_segments == 0)
-                       bio->bi_phys_segments = 2;
-               else
-                       bio->bi_phys_segments++;
-               spin_unlock_irq(&conf->device_lock);
+               /* We need another r10_bio and it needs to be counted */
+               inc_pending(conf);
+               bio_inc_remaining(bio);
                one_write_done(r10_bio);
                r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
 
@@ -1532,16 +1520,6 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
        r10_bio->sector = bio->bi_iter.bi_sector;
        r10_bio->state = 0;
 
-       /*
-        * We might need to issue multiple reads to different devices if there
-        * are bad blocks around, so we keep track of the number of reads in
-        * bio->bi_phys_segments.  If this is 0, there is only one r10_bio and
-        * no locking will be needed when the request completes.  If it is
-        * non-zero, then it is the number of not-completed requests.
-        */
-       bio->bi_phys_segments = 0;
-       bio_clear_flag(bio, BIO_SEG_VALID);
-
        if (bio_data_dir(bio) == READ)
                raid10_read_request(mddev, bio, r10_bio);
        else
@@ -2693,12 +2671,8 @@ read_more:
                        r10_bio->sector + max_sectors
                        - mbio->bi_iter.bi_sector;
                r10_bio->sectors = max_sectors;
-               spin_lock_irq(&conf->device_lock);
-               if (mbio->bi_phys_segments == 0)
-                       mbio->bi_phys_segments = 2;
-               else
-                       mbio->bi_phys_segments++;
-               spin_unlock_irq(&conf->device_lock);
+               bio_inc_remaining(mbio);
+               inc_pending(conf);
                generic_make_request(bio);
 
                r10_bio = mempool_alloc(conf->r10bio_pool,