if (reclaim_thresh == 0)
                return false;
 
-       thresh = div_factor_fine(bg->length, reclaim_thresh);
+       thresh = mult_perc(bg->length, reclaim_thresh);
 
        /*
         * If we were below the threshold before don't reclaim, we are likely a
         */
        if (force == CHUNK_ALLOC_LIMITED) {
                thresh = btrfs_super_total_bytes(fs_info->super_copy);
-               thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
+               thresh = max_t(u64, SZ_64M, mult_perc(thresh, 1));
 
                if (sinfo->total_bytes - bytes_used < thresh)
                        return 1;
        }
 
-       if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
+       if (bytes_used + SZ_2M < mult_perc(sinfo->total_bytes, 80))
                return 0;
        return 1;
 }
 
        return ret;
 }
 
-int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor)
+int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent)
 {
        u64 num_bytes = 0;
        int ret = -ENOSPC;
                return 0;
 
        spin_lock(&block_rsv->lock);
-       num_bytes = div_factor(block_rsv->size, min_factor);
+       num_bytes = mult_perc(block_rsv->size, min_percent);
        if (block_rsv->reserved >= num_bytes)
                ret = 0;
        spin_unlock(&block_rsv->lock);
 
 int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info,
                        struct btrfs_block_rsv *block_rsv, u64 num_bytes,
                        enum btrfs_reserve_flush_enum flush);
-int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor);
+int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent);
 int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info,
                           struct btrfs_block_rsv *block_rsv, u64 min_reserved,
                           enum btrfs_reserve_flush_enum flush);
 
                btrfs_mark_bg_unused(block_group);
        } else if (bg_reclaim_threshold &&
                   reclaimable_unusable >=
-                  div_factor_fine(block_group->zone_capacity,
-                                  bg_reclaim_threshold)) {
+                  mult_perc(block_group->zone_capacity, bg_reclaim_threshold)) {
                btrfs_mark_bg_to_reclaim(block_group);
        }
 
 
                wake_up(wq);
 }
 
-static inline u64 div_factor(u64 num, int factor)
+static inline u64 mult_perc(u64 num, u32 percent)
 {
-       if (factor == 10)
-               return num;
-       num *= factor;
-       return div_u64(num, 10);
+       return div_u64(num * percent, 100);
 }
-
-static inline u64 div_factor_fine(u64 num, int factor)
-{
-       if (factor == 100)
-               return num;
-       num *= factor;
-       return div_u64(num, 100);
-}
-
 /* Copy of is_power_of_two that is 64bit safe */
 static inline bool is_power_of_two_u64(u64 n)
 {
 
        u64 thresh;
        u64 used;
 
-       thresh = div_factor_fine(total, 90);
+       thresh = mult_perc(total, 90);
 
        lockdep_assert_held(&space_info->lock);
 
                return false;
 
        spin_lock(&global_rsv->lock);
-       min_bytes = div_factor(global_rsv->size, 1);
+       min_bytes = mult_perc(global_rsv->size, 10);
        if (global_rsv->reserved < min_bytes + ticket->bytes) {
                spin_unlock(&global_rsv->lock);
                return false;
 
        val = min(val, BTRFS_MAX_DATA_CHUNK_SIZE);
 
        /* Limit stripe size to 10% of available space. */
-       val = min(div_factor(fs_info->fs_devices->total_rw_bytes, 1), val);
+       val = min(mult_perc(fs_info->fs_devices->total_rw_bytes, 10), val);
 
        /* Must be multiple of 256M. */
        val &= ~((u64)SZ_256M - 1);
 
        if (btrfs_check_space_for_delayed_refs(fs_info))
                return true;
 
-       return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
+       return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 50);
 }
 
 bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
 
        if (bargs->usage_min == 0)
                user_thresh_min = 0;
        else
-               user_thresh_min = div_factor_fine(cache->length,
-                                                 bargs->usage_min);
+               user_thresh_min = mult_perc(cache->length, bargs->usage_min);
 
        if (bargs->usage_max == 0)
                user_thresh_max = 1;
        else if (bargs->usage_max > 100)
                user_thresh_max = cache->length;
        else
-               user_thresh_max = div_factor_fine(cache->length,
-                                                 bargs->usage_max);
+               user_thresh_max = mult_perc(cache->length, bargs->usage_max);
 
        if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
                ret = 0;
        else if (bargs->usage > 100)
                user_thresh = cache->length;
        else
-               user_thresh = div_factor_fine(cache->length, bargs->usage);
+               user_thresh = mult_perc(cache->length, bargs->usage);
 
        if (chunk_used < user_thresh)
                ret = 0;
                ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK);
 
        /* We don't want a chunk larger than 10% of writable space */
-       ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
+       ctl->max_chunk_size = min(mult_perc(fs_devices->total_rw_bytes, 10),
                                  ctl->max_chunk_size);
        ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
 }
        }
 
        /* We don't want a chunk larger than 10% of writable space */
-       limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1),
+       limit = max(round_down(mult_perc(fs_devices->total_rw_bytes, 10),
                               zone_size),
                    min_chunk_size);
        ctl->max_chunk_size = min(limit, ctl->max_chunk_size);