]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
bio: skip atomic inc/dec of ->bi_remaining for non-chains
authorJens Axboe <axboe@fb.com>
Fri, 17 Apr 2015 22:15:18 +0000 (16:15 -0600)
committerJens Axboe <axboe@fb.com>
Tue, 5 May 2015 19:32:47 +0000 (13:32 -0600)
Struct bio has an atomic ref count for chained bio's, and we use this
to know when to end IO on the bio. However, most bio's are not chained,
so we don't need to always introduce this atomic operation as part of
ending IO.

Add a helper to elevate the bi_remaining count, and flag the bio as
now actually needing the decrement at end_io time. Rename the field
to __bi_remaining to catch any current users of this doing the
incrementing manually.

For high IOPS workloads, this reduces the overhead of bio_endio()
substantially.

Tested-by: Robert Elliott <elliott@hp.com>
Acked-by: Kent Overstreet <kent.overstreet@gmail.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/bio.c
drivers/md/dm-cache-target.c
drivers/md/dm-raid1.c
drivers/md/dm-snap.c
drivers/md/dm-thin.c
include/linux/bio.h
include/linux/blk_types.h

index f66a4eae16ee4a96c9469c7a9311de3437a923c5..117da319afb62c7a33d59479bf997a6245943440 100644 (file)
@@ -270,7 +270,7 @@ void bio_init(struct bio *bio)
 {
        memset(bio, 0, sizeof(*bio));
        bio->bi_flags = 1 << BIO_UPTODATE;
-       atomic_set(&bio->bi_remaining, 1);
+       atomic_set(&bio->__bi_remaining, 1);
        atomic_set(&bio->bi_cnt, 1);
 }
 EXPORT_SYMBOL(bio_init);
@@ -292,8 +292,8 @@ void bio_reset(struct bio *bio)
        __bio_free(bio);
 
        memset(bio, 0, BIO_RESET_BYTES);
-       bio->bi_flags = flags|(1 << BIO_UPTODATE);
-       atomic_set(&bio->bi_remaining, 1);
+       bio->bi_flags = flags | (1 << BIO_UPTODATE);
+       atomic_set(&bio->__bi_remaining, 1);
 }
 EXPORT_SYMBOL(bio_reset);
 
@@ -320,7 +320,7 @@ void bio_chain(struct bio *bio, struct bio *parent)
 
        bio->bi_private = parent;
        bio->bi_end_io  = bio_chain_endio;
-       atomic_inc(&parent->bi_remaining);
+       bio_inc_remaining(parent);
 }
 EXPORT_SYMBOL(bio_chain);
 
@@ -1741,6 +1741,23 @@ void bio_flush_dcache_pages(struct bio *bi)
 EXPORT_SYMBOL(bio_flush_dcache_pages);
 #endif
 
+static inline bool bio_remaining_done(struct bio *bio)
+{
+       /*
+        * If we're not chaining, then ->__bi_remaining is always 1 and
+        * we always end io on the first invocation.
+        */
+       if (!bio_flagged(bio, BIO_CHAIN))
+               return true;
+
+       BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
+
+       if (atomic_dec_and_test(&bio->__bi_remaining))
+               return true;
+
+       return false;
+}
+
 /**
  * bio_endio - end I/O on a bio
  * @bio:       bio
@@ -1758,15 +1775,13 @@ EXPORT_SYMBOL(bio_flush_dcache_pages);
 void bio_endio(struct bio *bio, int error)
 {
        while (bio) {
-               BUG_ON(atomic_read(&bio->bi_remaining) <= 0);
-
                if (error)
                        clear_bit(BIO_UPTODATE, &bio->bi_flags);
                else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
                        error = -EIO;
 
-               if (!atomic_dec_and_test(&bio->bi_remaining))
-                       return;
+               if (unlikely(!bio_remaining_done(bio)))
+                       break;
 
                /*
                 * Need to have a real endio function for chained bios,
@@ -1799,7 +1814,12 @@ EXPORT_SYMBOL(bio_endio);
  **/
 void bio_endio_nodec(struct bio *bio, int error)
 {
-       atomic_inc(&bio->bi_remaining);
+       /*
+        * If it's not flagged as a chain, we are not going to dec the count
+        */
+       if (bio_flagged(bio, BIO_CHAIN))
+               bio_inc_remaining(bio);
+
        bio_endio(bio, error);
 }
 EXPORT_SYMBOL(bio_endio_nodec);
index 7755af35186762a4319e8cff52d4e95b26524d3e..705eb7b99d691f8e33d012aaf94855b04480b126 100644 (file)
@@ -91,7 +91,7 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
         * Must bump bi_remaining to allow bio to complete with
         * restored bi_end_io.
         */
-       atomic_inc(&bio->bi_remaining);
+       bio_inc_remaining(bio);
 }
 
 /*----------------------------------------------------------------*/
index 089d62751f7ff2a3aedf7e441cb88bec0d06b8a7..d6a1c096b77719d3eb70dbcb34d8952b9882353c 100644 (file)
@@ -1254,7 +1254,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
                        dm_bio_restore(bd, bio);
                        bio_record->details.bi_bdev = NULL;
 
-                       atomic_inc(&bio->bi_remaining);
+                       bio_inc_remaining(bio);
 
                        queue_bio(ms, bio, rw);
                        return DM_ENDIO_INCOMPLETE;
index f83a0f3fc3656680c7bdba2dcd4bdaaac9f2f624..8bfeae2185316cf6c4b8829570b2279a8c9fdd85 100644 (file)
@@ -1478,7 +1478,7 @@ out:
        if (full_bio) {
                full_bio->bi_end_io = pe->full_bio_end_io;
                full_bio->bi_private = pe->full_bio_private;
-               atomic_inc(&full_bio->bi_remaining);
+               bio_inc_remaining(full_bio);
        }
        increment_pending_exceptions_done_count();
 
index 921aafd12aee6754c373fbbd0df8941219b5c1eb..342dbdad613181920c8b1c39831d29e4c43a68db 100644 (file)
@@ -795,7 +795,7 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
 {
        if (m->bio) {
                m->bio->bi_end_io = m->saved_bi_end_io;
-               atomic_inc(&m->bio->bi_remaining);
+               bio_inc_remaining(m->bio);
        }
        cell_error(m->tc->pool, m->cell);
        list_del(&m->list);
@@ -812,7 +812,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
        bio = m->bio;
        if (bio) {
                bio->bi_end_io = m->saved_bi_end_io;
-               atomic_inc(&bio->bi_remaining);
+               bio_inc_remaining(bio);
        }
 
        if (m->err) {
index da3a127c99583ba1c38d9cad1ed35550bb9666ca..8bfe9eee6d1a0ef8d1ebc4a397f98a628c7c5364 100644 (file)
@@ -644,6 +644,17 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
        return bio;
 }
 
+/*
+ * Increment chain count for the bio. Make sure the CHAIN flag update
+ * is visible before the raised count.
+ */
+static inline void bio_inc_remaining(struct bio *bio)
+{
+       bio->bi_flags |= (1 << BIO_CHAIN);
+       smp_mb__before_atomic();
+       atomic_inc(&bio->__bi_remaining);
+}
+
 /*
  * bio_set is used to allow other portions of the IO system to
  * allocate their own private memory pools for bio and iovec structures.
index a1b25e35ea5f9fc2978b7f62917c6b4e39c3dc75..8b07e06038871bd3a0bdd09a8fd9313ab688d6ea 100644 (file)
@@ -65,7 +65,7 @@ struct bio {
        unsigned int            bi_seg_front_size;
        unsigned int            bi_seg_back_size;
 
-       atomic_t                bi_remaining;
+       atomic_t                __bi_remaining;
 
        bio_end_io_t            *bi_end_io;
 
@@ -122,6 +122,7 @@ struct bio {
 #define BIO_NULL_MAPPED 8      /* contains invalid user pages */
 #define BIO_QUIET      9       /* Make BIO Quiet */
 #define BIO_SNAP_STABLE        10      /* bio data must be snapshotted during write */
+#define BIO_CHAIN      11      /* chained bio, ->bi_remaining in effect */
 
 /*
  * Flags starting here get preserved by bio_reset() - this includes