The unuse_pte_range() caller only wants the folio while do_swap_page()
wants both the page and the folio.  Since do_swap_page() already has logic
for handling both the folio and the page, move the folio-to-page logic
there.  This also lets us allocate larger folios in the SWP_SYNCHRONOUS_IO
path in future.
Link: https://lkml.kernel.org/r/20240807193734.1865400-1-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
                        /* skip swapcache */
                        folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
                                                vma, vmf->address, false);
-                       page = &folio->page;
                        if (folio) {
                                __folio_set_locked(folio);
                                __folio_set_swapbacked(folio);
                                folio->private = NULL;
                        }
                } else {
-                       page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
+                       folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
                                                vmf);
-                       if (page)
-                               folio = page_folio(page);
                        swapcache = folio;
                }
 
                ret = VM_FAULT_MAJOR;
                count_vm_event(PGMAJFAULT);
                count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
+               page = folio_file_page(folio, swp_offset(entry));
        } else if (PageHWPoison(page)) {
                /*
                 * hwpoisoned dirty swapcache pages are kept for killing
 
                bool skip_if_exists);
 struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
                struct mempolicy *mpol, pgoff_t ilx);
-struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
-                             struct vm_fault *vmf);
+struct folio *swapin_readahead(swp_entry_t entry, gfp_t flag,
+               struct vm_fault *vmf);
 
 static inline unsigned int folio_swap_flags(struct folio *folio)
 {
        return NULL;
 }
 
-static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
+static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
                        struct vm_fault *vmf)
 {
        return NULL;
 
  * @gfp_mask: memory allocation flags
  * @vmf: fault information
  *
- * Returns the struct page for entry and addr, after queueing swapin.
+ * Returns the struct folio for entry and addr, after queueing swapin.
  *
  * It's a main entry function for swap readahead. By the configuration,
  * it will read ahead blocks by cluster-based(ie, physical disk based)
  * or vma-based(ie, virtual address based on faulty address) readahead.
  */
-struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
+struct folio *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
                                struct vm_fault *vmf)
 {
        struct mempolicy *mpol;
                swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
        mpol_cond_put(mpol);
 
-       if (!folio)
-               return NULL;
-       return folio_file_page(folio, swp_offset(entry));
+       return folio;
 }
 
 #ifdef CONFIG_SYSFS
 
 
                folio = swap_cache_get_folio(entry, vma, addr);
                if (!folio) {
-                       struct page *page;
                        struct vm_fault vmf = {
                                .vma = vma,
                                .address = addr,
                                .pmd = pmd,
                        };
 
-                       page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
+                       folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
                                                &vmf);
-                       if (page)
-                               folio = page_folio(page);
                }
                if (!folio) {
                        swp_count = READ_ONCE(si->swap_map[offset]);