Patch series "mm: remove pfn_valid_within() and CONFIG_HOLES_IN_ZONE".
After recent updates to freeing unused parts of the memory map, no
architecture can have holes in the memory map within a pageblock.  This
makes pfn_valid_within() check and CONFIG_HOLES_IN_ZONE configuration
option redundant.
The first patch removes them both in a mechanical way and the second patch
simplifies memory_hotplug::test_pages_in_a_zone() that had
pfn_valid_within() surrounded by more logic than simple if.
This patch (of 2):
After recent changes in freeing of the unused parts of the memory map and
rework of pfn_valid() in arm and arm64 there are no architectures that can
have holes in the memory map within a pageblock and so nothing can enable
CONFIG_HOLES_IN_ZONE which guards non trivial implementation of
pfn_valid_within().
With that, pfn_valid_within() is always hardwired to 1 and can be
completely removed.
Remove calls to pfn_valid_within() and CONFIG_HOLES_IN_ZONE.
Link: https://lkml.kernel.org/r/20210713080035.7464-1-rppt@kernel.org
Link: https://lkml.kernel.org/r/20210713080035.7464-2-rppt@kernel.org
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "Rafael J. Wysocki" <rafael@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
 static int __ref get_nid_for_pfn(unsigned long pfn)
 {
-       if (!pfn_valid_within(pfn))
-               return -1;
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
        if (system_state < SYSTEM_RUNNING)
                return early_pfn_to_nid(pfn);
 
 #define subsection_map_init(_pfn, _nr_pages) do {} while (0)
 #endif /* CONFIG_SPARSEMEM */
 
-/*
- * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
- * need to check pfn validity within that MAX_ORDER_NR_PAGES block.
- * pfn_valid_within() should be used in this case; we optimise this away
- * when we have no holes within a MAX_ORDER_NR_PAGES block.
- */
-#ifdef CONFIG_HOLES_IN_ZONE
-#define pfn_valid_within(pfn) pfn_valid(pfn)
-#else
-#define pfn_valid_within(pfn) (1)
-#endif
-
 #endif /* !__GENERATING_BOUNDS.H */
 #endif /* !__ASSEMBLY__ */
 #endif /* _LINUX_MMZONE_H */
 
        depends on MMU
        bool
 
-config HOLES_IN_ZONE
-       bool
-
 # Don't discard allocated memory used to track "memory" and "reserved" memblocks
 # after early boot, so it can still be used to test for validity of memory.
 # Also, memblocks are updated with memory hot(un)plug.
 
         * is necessary for the block to be a migration source/target.
         */
        do {
-               if (pfn_valid_within(pfn)) {
-                       if (check_source && PageLRU(page)) {
-                               clear_pageblock_skip(page);
-                               return true;
-                       }
+               if (check_source && PageLRU(page)) {
+                       clear_pageblock_skip(page);
+                       return true;
+               }
 
-                       if (check_target && PageBuddy(page)) {
-                               clear_pageblock_skip(page);
-                               return true;
-                       }
+               if (check_target && PageBuddy(page)) {
+                       clear_pageblock_skip(page);
+                       return true;
                }
 
                page += (1 << PAGE_ALLOC_COSTLY_ORDER);
                        break;
 
                nr_scanned++;
-               if (!pfn_valid_within(blockpfn))
-                       goto isolate_fail;
 
                /*
                 * For compound pages such as THP and hugetlbfs, we can save
                        cond_resched();
                }
 
-               if (!pfn_valid_within(low_pfn))
-                       goto isolate_fail;
                nr_scanned++;
 
                page = pfn_to_page(low_pfn);
 
                for (; pfn < sec_end_pfn && pfn < end_pfn;
                     pfn += MAX_ORDER_NR_PAGES) {
                        i = 0;
-                       /* This is just a CONFIG_HOLES_IN_ZONE check.*/
-                       while ((i < MAX_ORDER_NR_PAGES) &&
-                               !pfn_valid_within(pfn + i))
-                               i++;
                        if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
                                continue;
                        /* Check if we got outside of the zone */
 
 
 static int page_is_consistent(struct zone *zone, struct page *page)
 {
-       if (!pfn_valid_within(page_to_pfn(page)))
-               return 0;
        if (zone != page_zone(page))
                return 0;
 
        if (order >= MAX_ORDER - 2)
                return false;
 
-       if (!pfn_valid_within(buddy_pfn))
-               return false;
-
        combined_pfn = buddy_pfn & pfn;
        higher_page = page + (combined_pfn - pfn);
        buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
        higher_buddy = higher_page + (buddy_pfn - combined_pfn);
 
-       return pfn_valid_within(buddy_pfn) &&
-              page_is_buddy(higher_page, higher_buddy, order + 1);
+       return page_is_buddy(higher_page, higher_buddy, order + 1);
 }
 
 /*
                buddy_pfn = __find_buddy_pfn(pfn, order);
                buddy = page + (buddy_pfn - pfn);
 
-               if (!pfn_valid_within(buddy_pfn))
-                       goto done_merging;
                if (!page_is_buddy(page, buddy, order))
                        goto done_merging;
                /*
 /*
  * Check that the whole (or subset of) a pageblock given by the interval of
  * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
- * with the migration of free compaction scanner. The scanners then need to
- * use only pfn_valid_within() check for arches that allow holes within
- * pageblocks.
+ * with the migration of free compaction scanner.
  *
  * Return struct page pointer of start_pfn, or NULL if checks were not passed.
  *
  */
 static inline bool __init deferred_pfn_valid(unsigned long pfn)
 {
-       if (!pfn_valid_within(pfn))
-               return false;
        if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
                return false;
        return true;
        int pages_moved = 0;
 
        for (pfn = start_pfn; pfn <= end_pfn;) {
-               if (!pfn_valid_within(pfn)) {
-                       pfn++;
-                       continue;
-               }
-
                page = pfn_to_page(pfn);
                if (!PageBuddy(page)) {
                        /*
        }
 
        for (; iter < pageblock_nr_pages - offset; iter++) {
-               if (!pfn_valid_within(pfn + iter))
-                       continue;
-
                page = pfn_to_page(pfn + iter);
 
                /*
 
                        buddy_pfn = __find_buddy_pfn(pfn, order);
                        buddy = page + (buddy_pfn - pfn);
 
-                       if (pfn_valid_within(buddy_pfn) &&
-                           !is_migrate_isolate_page(buddy)) {
+                       if (!is_migrate_isolate_page(buddy)) {
                                __isolate_free_page(page, order);
                                isolated_page = true;
                        }
        struct page *page;
 
        while (pfn < end_pfn) {
-               if (!pfn_valid_within(pfn)) {
-                       pfn++;
-                       continue;
-               }
                page = pfn_to_page(pfn);
                if (PageBuddy(page))
                        /*
 
                pageblock_mt = get_pageblock_migratetype(page);
 
                for (; pfn < block_end_pfn; pfn++) {
-                       if (!pfn_valid_within(pfn))
-                               continue;
-
                        /* The pageblock is online, no need to recheck. */
                        page = pfn_to_page(pfn);
 
                        continue;
                }
 
-               /* Check for holes within a MAX_ORDER area */
-               if (!pfn_valid_within(pfn))
-                       continue;
-
                page = pfn_to_page(pfn);
                if (PageBuddy(page)) {
                        unsigned long freepage_order = buddy_order_unsafe(page);
                block_end_pfn = min(block_end_pfn, end_pfn);
 
                for (; pfn < block_end_pfn; pfn++) {
-                       struct page *page;
+                       struct page *page = pfn_to_page(pfn);
                        struct page_ext *page_ext;
 
-                       if (!pfn_valid_within(pfn))
-                               continue;
-
-                       page = pfn_to_page(pfn);
-
                        if (page_zone(page) != zone)
                                continue;