]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
ext4: factor out ext4_mb_might_prefetch()
authorBaokun Li <libaokun1@huawei.com>
Mon, 14 Jul 2025 13:03:23 +0000 (21:03 +0800)
committerTheodore Ts'o <tytso@mit.edu>
Fri, 25 Jul 2025 13:14:17 +0000 (09:14 -0400)
Extract ext4_mb_might_prefetch() to make the code clearer and to
prepare for the later conversion of 'choose group' to 'scan groups'.
No functional changes.

Signed-off-by: Baokun Li <libaokun1@huawei.com>
Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
Link: https://patch.msgid.link/20250714130327.1830534-14-libaokun1@huawei.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
fs/ext4/mballoc.c
fs/ext4/mballoc.h

index 650eb6366eb04a54268dd9b111129f8373a3b310..52ec59f58c363a2a2b93e190a03bc64ff78a7a35 100644 (file)
@@ -2781,6 +2781,37 @@ ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
        return group;
 }
 
+/*
+ * Batch reads of the block allocation bitmaps to get
+ * multiple READs in flight; limit prefetching at inexpensive
+ * CR, otherwise mballoc can spend a lot of time loading
+ * imperfect groups
+ */
+static void ext4_mb_might_prefetch(struct ext4_allocation_context *ac,
+                                  ext4_group_t group)
+{
+       struct ext4_sb_info *sbi;
+
+       if (ac->ac_prefetch_grp != group)
+               return;
+
+       sbi = EXT4_SB(ac->ac_sb);
+       if (ext4_mb_cr_expensive(ac->ac_criteria) ||
+           ac->ac_prefetch_ios < sbi->s_mb_prefetch_limit) {
+               unsigned int nr = sbi->s_mb_prefetch;
+
+               if (ext4_has_feature_flex_bg(ac->ac_sb)) {
+                       nr = 1 << sbi->s_log_groups_per_flex;
+                       nr -= group & (nr - 1);
+                       nr = umin(nr, sbi->s_mb_prefetch);
+               }
+
+               ac->ac_prefetch_nr = nr;
+               ac->ac_prefetch_grp = ext4_mb_prefetch(ac->ac_sb, group, nr,
+                                                      &ac->ac_prefetch_ios);
+       }
+}
+
 /*
  * Prefetching reads the block bitmap into the buffer cache; but we
  * need to make sure that the buddy bitmap in the page cache has been
@@ -2817,10 +2848,9 @@ void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
 static noinline_for_stack int
 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
 {
-       ext4_group_t prefetch_grp = 0, ngroups, group, i;
+       ext4_group_t ngroups, group, i;
        enum criteria new_cr, cr = CR_GOAL_LEN_FAST;
        int err = 0, first_err = 0;
-       unsigned int nr = 0, prefetch_ios = 0;
        struct ext4_sb_info *sbi;
        struct super_block *sb;
        struct ext4_buddy e4b;
@@ -2881,6 +2911,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
                cr = CR_POWER2_ALIGNED;
 
        ac->ac_e4b = &e4b;
+       ac->ac_prefetch_ios = 0;
 repeat:
        for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
                ac->ac_criteria = cr;
@@ -2890,8 +2921,8 @@ repeat:
                 */
                group = ac->ac_g_ex.fe_group;
                ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
-               prefetch_grp = group;
-               nr = 0;
+               ac->ac_prefetch_grp = group;
+               ac->ac_prefetch_nr = 0;
 
                for (i = 0, new_cr = cr; i < ngroups; i++,
                     ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
@@ -2903,24 +2934,7 @@ repeat:
                                goto repeat;
                        }
 
-                       /*
-                        * Batch reads of the block allocation bitmaps
-                        * to get multiple READs in flight; limit
-                        * prefetching at inexpensive CR, otherwise mballoc
-                        * can spend a lot of time loading imperfect groups
-                        */
-                       if ((prefetch_grp == group) &&
-                           (ext4_mb_cr_expensive(cr) ||
-                            prefetch_ios < sbi->s_mb_prefetch_limit)) {
-                               nr = sbi->s_mb_prefetch;
-                               if (ext4_has_feature_flex_bg(sb)) {
-                                       nr = 1 << sbi->s_log_groups_per_flex;
-                                       nr -= group & (nr - 1);
-                                       nr = min(nr, sbi->s_mb_prefetch);
-                               }
-                               prefetch_grp = ext4_mb_prefetch(sb, group,
-                                                       nr, &prefetch_ios);
-                       }
+                       ext4_mb_might_prefetch(ac, group);
 
                        /* prevent unnecessary buddy loading. */
                        if (cr < CR_ANY_FREE &&
@@ -3018,8 +3032,8 @@ out:
                 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
                 ac->ac_flags, cr, err);
 
-       if (nr)
-               ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
+       if (ac->ac_prefetch_nr)
+               ext4_mb_prefetch_fini(sb, ac->ac_prefetch_grp, ac->ac_prefetch_nr);
 
        return err;
 }
index 7a60b0103e64981ed211861d3133e2e2e2cdb6a7..9f66b1d5db67a840d38db2dd3f8db821dc631bbc 100644 (file)
@@ -192,6 +192,10 @@ struct ext4_allocation_context {
         */
        ext4_grpblk_t   ac_orig_goal_len;
 
+       ext4_group_t ac_prefetch_grp;
+       unsigned int ac_prefetch_ios;
+       unsigned int ac_prefetch_nr;
+
        __u32 ac_flags;         /* allocation hints */
        __u32 ac_groups_linear_remaining;
        __u16 ac_groups_scanned;