]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: return the folio from swapin_readahead
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 7 Aug 2024 19:37:32 +0000 (20:37 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 17 Aug 2024 00:53:03 +0000 (17:53 -0700)
The unuse_pte_range() caller only wants the folio while do_swap_page()
wants both the page and the folio.  Since do_swap_page() already has logic
for handling both the folio and the page, move the folio-to-page logic
there.  This also lets us allocate larger folios in the SWP_SYNCHRONOUS_IO
path in future.

Link: https://lkml.kernel.org/r/20240807193734.1865400-1-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c
mm/swap.h
mm/swap_state.c
mm/swapfile.c

index 46a44dc702fa60c5c9ef455cd3e75c338e85c079..2ca87ceafede2d181a484e8de82d7d3aa548c895 100644 (file)
@@ -4091,7 +4091,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                        /* skip swapcache */
                        folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
                                                vma, vmf->address, false);
-                       page = &folio->page;
                        if (folio) {
                                __folio_set_locked(folio);
                                __folio_set_swapbacked(folio);
@@ -4116,10 +4115,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                                folio->private = NULL;
                        }
                } else {
-                       page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
+                       folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
                                                vmf);
-                       if (page)
-                               folio = page_folio(page);
                        swapcache = folio;
                }
 
@@ -4140,6 +4137,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                ret = VM_FAULT_MAJOR;
                count_vm_event(PGMAJFAULT);
                count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
+               page = folio_file_page(folio, swp_offset(entry));
        } else if (PageHWPoison(page)) {
                /*
                 * hwpoisoned dirty swapcache pages are kept for killing
index 7c6330561d84b8138616884a50f14957174156d6..f8711ff82f843f2c2388c01707d5eb1b69926daf 100644 (file)
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -73,8 +73,8 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
                bool skip_if_exists);
 struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
                struct mempolicy *mpol, pgoff_t ilx);
-struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
-                             struct vm_fault *vmf);
+struct folio *swapin_readahead(swp_entry_t entry, gfp_t flag,
+               struct vm_fault *vmf);
 
 static inline unsigned int folio_swap_flags(struct folio *folio)
 {
@@ -109,7 +109,7 @@ static inline struct folio *swap_cluster_readahead(swp_entry_t entry,
        return NULL;
 }
 
-static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
+static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
                        struct vm_fault *vmf)
 {
        return NULL;
index 293ff1afdca40dda8cd644a48dd5bed7c8ff7c0e..a042720554a74d2e04a83be7727a87fe8f7aaeba 100644 (file)
@@ -863,13 +863,13 @@ skip:
  * @gfp_mask: memory allocation flags
  * @vmf: fault information
  *
- * Returns the struct page for entry and addr, after queueing swapin.
+ * Returns the struct folio for entry and addr, after queueing swapin.
  *
  * It's a main entry function for swap readahead. By the configuration,
  * it will read ahead blocks by cluster-based(ie, physical disk based)
  * or vma-based(ie, virtual address based on faulty address) readahead.
  */
-struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
+struct folio *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
                                struct vm_fault *vmf)
 {
        struct mempolicy *mpol;
@@ -882,9 +882,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
                swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
        mpol_cond_put(mpol);
 
-       if (!folio)
-               return NULL;
-       return folio_file_page(folio, swp_offset(entry));
+       return folio;
 }
 
 #ifdef CONFIG_SYSFS
index 6de12d712c7ed5a9e68659e9f83b8ac711074ad3..e6317fdeb39feb8470e96d2d157807fe74cc608e 100644 (file)
@@ -2124,7 +2124,6 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 
                folio = swap_cache_get_folio(entry, vma, addr);
                if (!folio) {
-                       struct page *page;
                        struct vm_fault vmf = {
                                .vma = vma,
                                .address = addr,
@@ -2132,10 +2131,8 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                                .pmd = pmd,
                        };
 
-                       page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
+                       folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
                                                &vmf);
-                       if (page)
-                               folio = page_folio(page);
                }
                if (!folio) {
                        swp_count = READ_ONCE(si->swap_map[offset]);