]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: numa,memblock: Use SZ_1M macro to denote bytes to MB conversion
authorPratyush Brahma <pratyush.brahma@oss.qualcomm.com>
Wed, 20 Aug 2025 00:59:34 +0000 (06:29 +0530)
committerMike Rapoport (Microsoft) <rppt@kernel.org>
Wed, 20 Aug 2025 13:31:23 +0000 (16:31 +0300)
Replace the manual bitwise conversion of bytes to MB with
SZ_1M macro, a standard macro used within the mm subsystem,
to improve readability.

Signed-off-by: Pratyush Brahma <pratyush.brahma@oss.qualcomm.com>
Link: https://lore.kernel.org/r/20250820-numa-memblks-refac-v2-1-43bf1af02acd@oss.qualcomm.com
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
mm/memblock.c
mm/numa_emulation.c
mm/numa_memblks.c

index 154f1d73b61f2234efe61c5cce5105be160d0041..8a0ed3074af4b4dacb87e45f3fecaeb6b3222fcf 100644 (file)
@@ -780,9 +780,9 @@ bool __init_memblock memblock_validate_numa_coverage(unsigned long threshold_byt
        }
 
        if ((nr_pages << PAGE_SHIFT) > threshold_bytes) {
-               mem_size_mb = memblock_phys_mem_size() >> 20;
+               mem_size_mb = memblock_phys_mem_size() / SZ_1M;
                pr_err("NUMA: no nodes coverage for %luMB of %luMB RAM\n",
-                      (nr_pages << PAGE_SHIFT) >> 20, mem_size_mb);
+                      (nr_pages << PAGE_SHIFT) / SZ_1M, mem_size_mb);
                return false;
        }
 
index 9d55679d99ceea9807d41840cf097eb449afaf8e..703c8fa05048019317bc2a011d7b928884ddc934 100644 (file)
@@ -73,7 +73,7 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei,
        }
 
        printk(KERN_INFO "Faking node %d at [mem %#018Lx-%#018Lx] (%LuMB)\n",
-              nid, eb->start, eb->end - 1, (eb->end - eb->start) >> 20);
+              nid, eb->start, eb->end - 1, (eb->end - eb->start) / SZ_1M);
        return 0;
 }
 
@@ -264,7 +264,7 @@ static int __init split_nodes_size_interleave_uniform(struct numa_meminfo *ei,
        min_size = ALIGN(max(min_size, FAKE_NODE_MIN_SIZE), FAKE_NODE_MIN_SIZE);
        if (size < min_size) {
                pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
-                       size >> 20, min_size >> 20);
+                       size / SZ_1M, min_size / SZ_1M);
                size = min_size;
        }
        size = ALIGN_DOWN(size, FAKE_NODE_MIN_SIZE);
index de626525a87c3ed54d31695e86f18c980c084558..5b009a9cd8b4c26e405e49a9536286bb001dd897 100644 (file)
@@ -427,9 +427,9 @@ static int __init numa_register_meminfo(struct numa_meminfo *mi)
                unsigned long pfn_align = node_map_pfn_alignment();
 
                if (pfn_align && pfn_align < PAGES_PER_SECTION) {
-                       unsigned long node_align_mb = PFN_PHYS(pfn_align) >> 20;
+                       unsigned long node_align_mb = PFN_PHYS(pfn_align) / SZ_1M;
 
-                       unsigned long sect_align_mb = PFN_PHYS(PAGES_PER_SECTION) >> 20;
+                       unsigned long sect_align_mb = PFN_PHYS(PAGES_PER_SECTION) / SZ_1M;
 
                        pr_warn("Node alignment %luMB < min %luMB, rejecting NUMA config\n",
                                node_align_mb, sect_align_mb);