mm/mm_init.c: simplify logic of deferred_[init|free]_pages
authorWei Yang <richard.weiyang@gmail.com>
Wed, 12 Jun 2024 02:04:21 +0000 (02:04 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 4 Jul 2024 02:30:12 +0000 (19:30 -0700)
Function deferred_[init|free]_pages are only used in
deferred_init_maxorder(), which makes sure the range to init/free is
within MAX_ORDER_NR_PAGES size.

With this knowledge, we can simplify these two functions. Since

  * only the first pfn could be IS_MAX_ORDER_ALIGNED()

Also since the range passed to deferred_[init|free]_pages is always from
memblock.memory for those we have already allocated memmap to cover,
pfn_valid() always return true.  Then we can remove related check.

[richard.weiyang@gmail.com: adjust function declaration indention per David]
Link: https://lkml.kernel.org/r/20240613114525.27528-1-richard.weiyang@gmail.com
Link: https://lkml.kernel.org/r/20240612020421.31975-1-richard.weiyang@gmail.com
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/mm_init.c

index b882ae7977ae5432e8a9929984f9ff09e2ec5752..d3a222b92029270f91364ca3b0f5f7d5d78eca09 100644 (file)
@@ -1916,8 +1916,8 @@ unsigned long __init node_map_pfn_alignment(void)
 }
 
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-static void __init deferred_free_range(unsigned long pfn,
-                                      unsigned long nr_pages)
+static void __init deferred_free_pages(unsigned long pfn,
+               unsigned long nr_pages)
 {
        struct page *page;
        unsigned long i;
@@ -1955,69 +1955,21 @@ static inline void __init pgdat_init_report_one_done(void)
                complete(&pgdat_init_all_done_comp);
 }
 
-/*
- * Returns true if page needs to be initialized or freed to buddy allocator.
- *
- * We check if a current MAX_PAGE_ORDER block is valid by only checking the
- * validity of the head pfn.
- */
-static inline bool __init deferred_pfn_valid(unsigned long pfn)
-{
-       if (IS_MAX_ORDER_ALIGNED(pfn) && !pfn_valid(pfn))
-               return false;
-       return true;
-}
-
-/*
- * Free pages to buddy allocator. Try to free aligned pages in
- * MAX_ORDER_NR_PAGES sizes.
- */
-static void __init deferred_free_pages(unsigned long pfn,
-                                      unsigned long end_pfn)
-{
-       unsigned long nr_free = 0;
-
-       for (; pfn < end_pfn; pfn++) {
-               if (!deferred_pfn_valid(pfn)) {
-                       deferred_free_range(pfn - nr_free, nr_free);
-                       nr_free = 0;
-               } else if (IS_MAX_ORDER_ALIGNED(pfn)) {
-                       deferred_free_range(pfn - nr_free, nr_free);
-                       nr_free = 1;
-               } else {
-                       nr_free++;
-               }
-       }
-       /* Free the last block of pages to allocator */
-       deferred_free_range(pfn - nr_free, nr_free);
-}
-
 /*
  * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
  * by performing it only once every MAX_ORDER_NR_PAGES.
  * Return number of pages initialized.
  */
-static unsigned long  __init deferred_init_pages(struct zone *zone,
-                                                unsigned long pfn,
-                                                unsigned long end_pfn)
+static unsigned long __init deferred_init_pages(struct zone *zone,
+               unsigned long pfn, unsigned long end_pfn)
 {
        int nid = zone_to_nid(zone);
-       unsigned long nr_pages = 0;
+       unsigned long nr_pages = end_pfn - pfn;
        int zid = zone_idx(zone);
-       struct page *page = NULL;
+       struct page *page = pfn_to_page(pfn);
 
-       for (; pfn < end_pfn; pfn++) {
-               if (!deferred_pfn_valid(pfn)) {
-                       page = NULL;
-                       continue;
-               } else if (!page || IS_MAX_ORDER_ALIGNED(pfn)) {
-                       page = pfn_to_page(pfn);
-               } else {
-                       page++;
-               }
+       for (; pfn < end_pfn; pfn++, page++)
                __init_single_page(page, pfn, zid, nid);
-               nr_pages++;
-       }
        return nr_pages;
 }
 
@@ -2096,7 +2048,7 @@ deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
                        break;
 
                t = min(mo_pfn, epfn);
-               deferred_free_pages(spfn, t);
+               deferred_free_pages(spfn, t - spfn);
 
                if (mo_pfn <= epfn)
                        break;