]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/mm_init.c: remove unneeded calc_memmap_size()
authorBaoquan He <bhe@redhat.com>
Mon, 25 Mar 2024 14:56:45 +0000 (22:56 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:11 +0000 (20:56 -0700)
Nobody calls calc_memmap_size() now.

Link: https://lkml.kernel.org/r/20240325145646.1044760-6-bhe@redhat.com
Signed-off-by: Baoquan He <bhe@redhat.com>
Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/mm_init.c

index b211b30231cb4617da84dd5ad53d35759f36cdf7..8c261572ca6ea4e225dc0f60c391f64b2518e325 100644 (file)
@@ -1332,26 +1332,6 @@ static void __init calculate_node_totalpages(struct pglist_data *pgdat,
        pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
 }
 
-static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
-                                               unsigned long present_pages)
-{
-       unsigned long pages = spanned_pages;
-
-       /*
-        * Provide a more accurate estimation if there are holes within
-        * the zone and SPARSEMEM is in use. If there are holes within the
-        * zone, each populated memory region may cost us one or two extra
-        * memmap pages due to alignment because memmap pages for each
-        * populated regions may not be naturally aligned on page boundary.
-        * So the (present_pages >> 4) heuristic is a tradeoff for that.
-        */
-       if (spanned_pages > present_pages + (present_pages >> 4) &&
-           IS_ENABLED(CONFIG_SPARSEMEM))
-               pages = present_pages;
-
-       return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
-}
-
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static void pgdat_init_split_queue(struct pglist_data *pgdat)
 {