]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
btrfs: cleanup 64bit/32bit divs, compile time constants
authorDavid Sterba <dsterba@suse.cz>
Fri, 16 Jan 2015 16:21:12 +0000 (17:21 +0100)
committerDavid Sterba <dsterba@suse.cz>
Fri, 20 Feb 2015 17:22:16 +0000 (18:22 +0100)
Switch to div_u64 if the divisor is a numeric constant or sum of
sizeof()s. We can remove a few instances of do_div that has the hidden
semtantics of changing the 1st argument.

Small power-of-two divisors are converted to bitshifts, large values are
kept intact for clarity.

Signed-off-by: David Sterba <dsterba@suse.cz>
fs/btrfs/dev-replace.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/free-space-cache.c
fs/btrfs/math.h
fs/btrfs/super.c

index 5ec03d999c37bfb3e48d937abd79b6dc57cd5257..0573848c73337f2f7848dfdd2fd05e31d5f26a56 100644 (file)
@@ -670,8 +670,8 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
        case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
        case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
                srcdev = dev_replace->srcdev;
-               args->status.progress_1000 = div64_u64(dev_replace->cursor_left,
-                       div64_u64(btrfs_device_get_total_bytes(srcdev), 1000));
+               args->status.progress_1000 = div_u64(dev_replace->cursor_left,
+                       div_u64(btrfs_device_get_total_bytes(srcdev), 1000));
                break;
        }
        btrfs_dev_replace_unlock(dev_replace);
@@ -806,7 +806,7 @@ static int btrfs_dev_replace_kthread(void *data)
                btrfs_dev_replace_status(fs_info, status_args);
                progress = status_args->status.progress_1000;
                kfree(status_args);
-               do_div(progress, 10);
+               progress = div_u64(progress, 10);
                printk_in_rcu(KERN_INFO
                        "BTRFS: continuing dev_replace from %s (devid %llu) to %s @%u%%\n",
                        dev_replace->srcdev->missing ? "<missing disk>" :
index 41b320e235d7ccf3fda2f3b6894501abd68cb78b..73b1521382217384bfbc3a2af44726311258887a 100644 (file)
@@ -2277,7 +2277,7 @@ int open_ctree(struct super_block *sb,
        fs_info->free_chunk_space = 0;
        fs_info->tree_mod_log = RB_ROOT;
        fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
-       fs_info->avg_delayed_ref_runtime = div64_u64(NSEC_PER_SEC, 64);
+       fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
        /* readahead state */
        INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
        spin_lock_init(&fs_info->reada_lock);
index 28ce5c8004d456da3d202d687465a755c2d4432b..2cb32bc45bcc8af4bef8430fe9136b38d276b35b 100644 (file)
@@ -2561,8 +2561,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
                 */
                spin_lock(&delayed_refs->lock);
                avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
-               avg = div64_u64(avg, 4);
-               fs_info->avg_delayed_ref_runtime = avg;
+               fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
                spin_unlock(&delayed_refs->lock);
        }
        return 0;
@@ -2624,7 +2623,7 @@ static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
         * We don't ever fill up leaves all the way so multiply by 2 just to be
         * closer to what we're really going to want to ouse.
         */
-       return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
+       return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
 }
 
 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
@@ -3193,7 +3192,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
        struct inode *inode = NULL;
        u64 alloc_hint = 0;
        int dcs = BTRFS_DC_ERROR;
-       int num_pages = 0;
+       u64 num_pages = 0;
        int retries = 0;
        int ret = 0;
 
@@ -3277,7 +3276,7 @@ again:
         * taking up quite a bit since it's not folded into the other space
         * cache.
         */
-       num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
+       num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
        if (!num_pages)
                num_pages = 1;
 
@@ -4770,10 +4769,10 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
 
        num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
                    csum_size * 2;
-       num_bytes += div64_u64(data_used + meta_used, 50);
+       num_bytes += div_u64(data_used + meta_used, 50);
 
        if (num_bytes * 3 > meta_used)
-               num_bytes = div64_u64(meta_used, 3);
+               num_bytes = div_u64(meta_used, 3);
 
        return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
 }
@@ -5039,7 +5038,7 @@ static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
        else
                BTRFS_I(inode)->csum_bytes -= num_bytes;
        csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
-       num_csums_per_leaf = (int)div64_u64(csum_size,
+       num_csums_per_leaf = (int)div_u64(csum_size,
                                            sizeof(struct btrfs_csum_item) +
                                            sizeof(struct btrfs_disk_key));
        num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
index a71978578fa71a11d83f8c435541b807eedc2b51..43007c15dcba6b49a5ce612b0e9a6d010123462c 100644 (file)
@@ -1537,7 +1537,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
                max_bytes = MAX_CACHE_BYTES_PER_GIG;
        else
                max_bytes = MAX_CACHE_BYTES_PER_GIG *
-                       div64_u64(size, 1024 * 1024 * 1024);
+                       div_u64(size, 1024 * 1024 * 1024);
 
        /*
         * we want to account for 1 more bitmap than what we have so we can make
@@ -1552,14 +1552,14 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
        }
 
        /*
-        * we want the extent entry threshold to always be at most 1/2 the maxw
+        * we want the extent entry threshold to always be at most 1/2 the max
         * bytes we can have, or whatever is less than that.
         */
        extent_bytes = max_bytes - bitmap_bytes;
-       extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
+       extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1);
 
        ctl->extents_thresh =
-               div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
+               div_u64(extent_bytes, sizeof(struct btrfs_free_space));
 }
 
 static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
index b7816cefbd1312247104821e3e2932569dea66a3..1b10a3cd1195eae7401eb46ba4c9d9bb9b5bb4de 100644 (file)
@@ -28,8 +28,7 @@ static inline u64 div_factor(u64 num, int factor)
        if (factor == 10)
                return num;
        num *= factor;
-       do_div(num, 10);
-       return num;
+       return div_u64(num, 10);
 }
 
 static inline u64 div_factor_fine(u64 num, int factor)
@@ -37,8 +36,7 @@ static inline u64 div_factor_fine(u64 num, int factor)
        if (factor == 100)
                return num;
        num *= factor;
-       do_div(num, 100);
-       return num;
+       return div_u64(num, 100);
 }
 
 #endif
index 100a0442c413f51626ab8a390752fc76b7fb0f48..530a0baa7c713459114444660a9497c659d12d9e 100644 (file)
@@ -1704,7 +1704,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
                avail_space = device->total_bytes - device->bytes_used;
 
                /* align with stripe_len */
-               do_div(avail_space, BTRFS_STRIPE_LEN);
+               avail_space = div_u64(avail_space, BTRFS_STRIPE_LEN);
                avail_space *= BTRFS_STRIPE_LEN;
 
                /*