]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: replace (20 - PAGE_SHIFT) with common macros for pages<->MB conversion
authorYe Liu <liuye@kylinos.cn>
Fri, 18 Jul 2025 02:41:32 +0000 (10:41 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:24:36 +0000 (17:24 -0700)
Replace repeated (20 - PAGE_SHIFT) calculations with standard macros:
- MB_TO_PAGES(mb)    converts MB to page count
- PAGES_TO_MB(pages) converts pages to MB

No functional change.

Link: https://lkml.kernel.org/r/20250718024134.1304745-1-ye.liu@linux.dev
Signed-off-by: Ye Liu <liuye@kylinos.cn>
Acked-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Dev Jain <dev.jain@arm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Chris Li <chrisl@kernel.org>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Ben Segall <bsegall@google.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Frederic Weisbecker <frederic@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Lai jiangshan <jiangshanlai@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Mel Gorman <mgorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Neeraj Upadhyay <neeraj.upadhyay@kernel.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: "Paul E . McKenney" <paulmck@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
kernel/rcu/rcuscale.c
kernel/sched/fair.c
mm/backing-dev.c
mm/huge_memory.c
mm/swap.c

index 1ae97a0b8ec756e8b327cf6f848c6a5d9f36c341..b626d1bacef523031df114a2396cdd1d374052f9 100644 (file)
@@ -69,6 +69,15 @@ static inline void totalram_pages_add(long count)
 
 extern void * high_memory;
 
+/*
+ * Convert between pages and MB
+ * 20 is the shift for 1MB (2^20 = 1MB)
+ * PAGE_SHIFT is the shift for page size (e.g., 12 for 4KB pages)
+ * So (20 - PAGE_SHIFT) converts between pages and MB
+ */
+#define PAGES_TO_MB(pages) ((pages) >> (20 - PAGE_SHIFT))
+#define MB_TO_PAGES(mb)    ((mb) << (20 - PAGE_SHIFT))
+
 #ifdef CONFIG_SYSCTL
 extern int sysctl_legacy_va_layout;
 #else
index b521d04559927a21acc2649d8e8dbbf848dd8a2f..7484d8ad5767b5fb5e7869320980554c33a90450 100644 (file)
@@ -796,7 +796,7 @@ kfree_scale_thread(void *arg)
                pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n",
                       (unsigned long long)(end_time - start_time), kfree_loops,
                       rcuscale_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started),
-                      (mem_begin - mem_during) >> (20 - PAGE_SHIFT));
+                      PAGES_TO_MB(mem_begin - mem_during));
 
                if (shutdown) {
                        smp_mb(); /* Assign before wake. */
index 82c8d804c54c119fc385970ed401656f360bb84a..e256793b9a0833120a83d7a0d6ddfe706c994761 100644 (file)
@@ -1495,7 +1495,7 @@ static unsigned int task_nr_scan_windows(struct task_struct *p)
         * by the PTE scanner and NUMA hinting faults should be trapped based
         * on resident pages
         */
-       nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
+       nr_scan_pages = MB_TO_PAGES(sysctl_numa_balancing_scan_size);
        rss = get_mm_rss(p->mm);
        if (!rss)
                rss = nr_scan_pages;
@@ -1934,8 +1934,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct folio *folio,
                }
 
                def_th = sysctl_numa_balancing_hot_threshold;
-               rate_limit = sysctl_numa_balancing_promote_rate_limit << \
-                       (20 - PAGE_SHIFT);
+               rate_limit = MB_TO_PAGES(sysctl_numa_balancing_promote_rate_limit);
                numa_promotion_adjust_threshold(pgdat, rate_limit, def_th);
 
                th = pgdat->nbp_threshold ? : def_th;
index 783904d8c5ef867974d88998612677aeb111202e..e4d578e6121c0968d3c6cea897bec91ce0433efa 100644 (file)
@@ -510,7 +510,7 @@ static void wb_update_bandwidth_workfn(struct work_struct *work)
 /*
  * Initial write bandwidth: 100 MB/s
  */
-#define INIT_BW                (100 << (20 - PAGE_SHIFT))
+#define INIT_BW                MB_TO_PAGES(100)
 
 static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
                   gfp_t gfp)
index 9c38a95e9f091bd4ac28fc7446495a5b8a597041..2b4ea5a2ce7d242f3f7ae8b9ed6aaf57064407b6 100644 (file)
@@ -911,7 +911,7 @@ static int __init hugepage_init(void)
         * where the extra memory used could hurt more than TLB overhead
         * is likely to save.  The admin can still enable it through /sys.
         */
-       if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
+       if (totalram_pages() < MB_TO_PAGES(512)) {
                transparent_hugepage_flags = 0;
                return 0;
        }
index 881e53b2877e6c1633e56331c1596d559c1c5616..60e7b509807b85b1703b87e821ce2c0c7f59c336 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -1101,7 +1101,7 @@ static const struct ctl_table swap_sysctl_table[] = {
  */
 void __init swap_setup(void)
 {
-       unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
+       unsigned long megs = PAGES_TO_MB(totalram_pages());
 
        /* Use a smaller cluster for small-memory machines */
        if (megs < 16)