]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
dm thin: use bool rather than unsigned for flags in structures
authorMike Snitzer <snitzer@redhat.com>
Tue, 17 Dec 2013 18:43:31 +0000 (13:43 -0500)
committerMike Snitzer <snitzer@redhat.com>
Tue, 7 Jan 2014 15:14:18 +0000 (10:14 -0500)
Also, move 'err' member in dm_thin_new_mapping structure to eliminate 4
byte hole (reduces size from 88 bytes to 80).

Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Acked-by: Joe Thornber <ejt@redhat.com>
drivers/md/dm-thin-metadata.h
drivers/md/dm-thin.c

index 2edf5dbac76a94381d10c12de7200f8f296a96b9..9a368567632f9733f04bc0a1aebfbc3d871f13df 100644 (file)
@@ -131,7 +131,7 @@ dm_thin_id dm_thin_dev_id(struct dm_thin_device *td);
 
 struct dm_thin_lookup_result {
        dm_block_t block;
-       unsigned shared:1;
+       bool shared:1;
 };
 
 /*
index 51e656a3002c1b0184860631b38e63c551162095..5f1b11e45702096856b4fdbe407dbcd16382a2cf 100644 (file)
@@ -509,16 +509,16 @@ static void remap_and_issue(struct thin_c *tc, struct bio *bio,
 struct dm_thin_new_mapping {
        struct list_head list;
 
-       unsigned quiesced:1;
-       unsigned prepared:1;
-       unsigned pass_discard:1;
-       unsigned definitely_not_shared:1;
+       bool quiesced:1;
+       bool prepared:1;
+       bool pass_discard:1;
+       bool definitely_not_shared:1;
 
+       int err;
        struct thin_c *tc;
        dm_block_t virt_block;
        dm_block_t data_block;
        struct dm_bio_prison_cell *cell, *cell2;
-       int err;
 
        /*
         * If the bio covers the whole area of a block then we can avoid
@@ -549,7 +549,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
        m->err = read_err || write_err ? -EIO : 0;
 
        spin_lock_irqsave(&pool->lock, flags);
-       m->prepared = 1;
+       m->prepared = true;
        __maybe_add_mapping(m);
        spin_unlock_irqrestore(&pool->lock, flags);
 }
@@ -564,7 +564,7 @@ static void overwrite_endio(struct bio *bio, int err)
        m->err = err;
 
        spin_lock_irqsave(&pool->lock, flags);
-       m->prepared = 1;
+       m->prepared = true;
        __maybe_add_mapping(m);
        spin_unlock_irqrestore(&pool->lock, flags);
 }
@@ -788,7 +788,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
        m->cell = cell;
 
        if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
-               m->quiesced = 1;
+               m->quiesced = true;
 
        /*
         * IO to pool_dev remaps to the pool target's data_dev.
@@ -848,8 +848,8 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
        struct pool *pool = tc->pool;
        struct dm_thin_new_mapping *m = get_next_mapping(pool);
 
-       m->quiesced = 1;
-       m->prepared = 0;
+       m->quiesced = true;
+       m->prepared = false;
        m->tc = tc;
        m->virt_block = virt_block;
        m->data_block = data_block;
@@ -2904,7 +2904,7 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
                spin_lock_irqsave(&pool->lock, flags);
                list_for_each_entry_safe(m, tmp, &work, list) {
                        list_del(&m->list);
-                       m->quiesced = 1;
+                       m->quiesced = true;
                        __maybe_add_mapping(m);
                }
                spin_unlock_irqrestore(&pool->lock, flags);