mm: Implement for_each_valid_pfn() for CONFIG_SPARSEMEM
authorDavid Woodhouse <dwmw@amazon.co.uk>
Wed, 2 Apr 2025 17:58:36 +0000 (18:58 +0100)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Fri, 4 Apr 2025 13:45:53 +0000 (14:45 +0100)
Introduce a pfn_first_valid() helper which takes a pointer to the PFN and
updates it to point to the first valid PFN starting from that point, and
returns true if a valid PFN was found.

This largely mirrors pfn_valid(), calling into a pfn_section_first_valid()
helper which is trivial for the !CONFIG_SPARSEMEM_VMEMMAP case, and in
the VMEMMAP case will skip to the next subsection as needed.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
include/linux/mmzone.h

index 32ecb5cadbaf440c074a2833829330409c221224..67cdf675a4b973843e657abcbc553e3ca4a0cd05 100644 (file)
@@ -2074,11 +2074,37 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
 
        return usage ? test_bit(idx, usage->subsection_map) : 0;
 }
+
+static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long *pfn)
+{
+       struct mem_section_usage *usage = READ_ONCE(ms->usage);
+       int idx = subsection_map_index(*pfn);
+       unsigned long bit;
+
+       if (!usage)
+               return false;
+
+       if (test_bit(idx, usage->subsection_map))
+               return true;
+
+       /* Find the next subsection that exists */
+       bit = find_next_bit(usage->subsection_map, SUBSECTIONS_PER_SECTION, idx);
+       if (bit == SUBSECTIONS_PER_SECTION)
+               return false;
+
+       *pfn = (*pfn & PAGE_SECTION_MASK) + (bit * PAGES_PER_SUBSECTION);
+       return true;
+}
 #else
 static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
 {
        return 1;
 }
+
+static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long *pfn)
+{
+       return true;
+}
 #endif
 
 void sparse_init_early_section(int nid, struct page *map, unsigned long pnum,
@@ -2127,6 +2153,39 @@ static inline int pfn_valid(unsigned long pfn)
 
        return ret;
 }
+
+static inline bool first_valid_pfn(unsigned long *p_pfn)
+{
+       unsigned long pfn = *p_pfn;
+       unsigned long nr = pfn_to_section_nr(pfn);
+
+       rcu_read_lock_sched();
+
+       while (nr <= __highest_present_section_nr) {
+               struct mem_section *ms = __pfn_to_section(pfn);
+
+               if (valid_section(ms) &&
+                   (early_section(ms) || pfn_section_first_valid(ms, &pfn))) {
+                       *p_pfn = pfn;
+                       rcu_read_unlock_sched();
+                       return true;
+               }
+
+               /* Nothing left in this section? Skip to next section */
+               nr++;
+               pfn = section_nr_to_pfn(nr);
+       }
+
+       rcu_read_unlock_sched();
+
+       return false;
+}
+
+#define for_each_valid_pfn(_pfn, _start_pfn, _end_pfn)        \
+       for ((_pfn) = (_start_pfn);                            \
+            first_valid_pfn(&(_pfn)) && (_pfn) < (_end_pfn);  \
+            (_pfn)++)
+
 #endif
 
 static inline int pfn_in_present_section(unsigned long pfn)