* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
  * but what if the vma was unmerged while the page was swapped out?
  */
-struct page *ksm_might_need_to_copy(struct page *page,
+struct folio *ksm_might_need_to_copy(struct folio *folio,
                        struct vm_area_struct *vma, unsigned long addr);
 
 void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
        return 0;
 }
 
-static inline struct page *ksm_might_need_to_copy(struct page *page,
+static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
                        struct vm_area_struct *vma, unsigned long addr)
 {
-       return page;
+       return folio;
 }
 
 static inline void rmap_walk_ksm(struct folio *folio,
 
        trace_ksm_exit(mm);
 }
 
-struct page *ksm_might_need_to_copy(struct page *page,
+struct folio *ksm_might_need_to_copy(struct folio *folio,
                        struct vm_area_struct *vma, unsigned long addr)
 {
-       struct folio *folio = page_folio(page);
+       struct page *page = folio_page(folio, 0);
        struct anon_vma *anon_vma = folio_anon_vma(folio);
        struct folio *new_folio;
 
        if (folio_test_large(folio))
-               return page;
+               return folio;
 
        if (folio_test_ksm(folio)) {
                if (folio_stable_node(folio) &&
                    !(ksm_run & KSM_RUN_UNMERGE))
-                       return page;    /* no need to copy it */
+                       return folio;   /* no need to copy it */
        } else if (!anon_vma) {
-               return page;            /* no need to copy it */
+               return folio;           /* no need to copy it */
        } else if (folio->index == linear_page_index(vma, addr) &&
                        anon_vma->root == vma->anon_vma->root) {
-               return page;            /* still no need to copy it */
+               return folio;           /* still no need to copy it */
        }
        if (PageHWPoison(page))
                return ERR_PTR(-EHWPOISON);
        if (!folio_test_uptodate(folio))
-               return page;            /* let do_swap_page report the error */
+               return folio;           /* let do_swap_page report the error */
 
        new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
        if (new_folio &&
                new_folio = NULL;
        }
        if (new_folio) {
-               if (copy_mc_user_highpage(&new_folio->page, page, addr, vma)) {
+               if (copy_mc_user_highpage(folio_page(new_folio, 0), page,
+                                                               addr, vma)) {
                        folio_put(new_folio);
-                       memory_failure_queue(page_to_pfn(page), 0);
+                       memory_failure_queue(folio_pfn(folio), 0);
                        return ERR_PTR(-EHWPOISON);
                }
                folio_set_dirty(new_folio);
 #endif
        }
 
-       return new_folio ? &new_folio->page : NULL;
+       return new_folio;
 }
 
 void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
 
                 * page->index of !PageKSM() pages would be nonlinear inside the
                 * anon VMA -- PageKSM() is lost on actual swapout.
                 */
-               page = ksm_might_need_to_copy(page, vma, vmf->address);
-               if (unlikely(!page)) {
+               folio = ksm_might_need_to_copy(folio, vma, vmf->address);
+               if (unlikely(!folio)) {
                        ret = VM_FAULT_OOM;
+                       folio = swapcache;
                        goto out_page;
-               } else if (unlikely(PTR_ERR(page) == -EHWPOISON)) {
+               } else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
                        ret = VM_FAULT_HWPOISON;
+                       folio = swapcache;
                        goto out_page;
                }
-               folio = page_folio(page);
+               if (folio != swapcache)
+                       page = folio_page(folio, 0);
 
                /*
                 * If we want to map a page that's in the swapcache writable, we
 
        int ret = 1;
 
        swapcache = page;
-       page = ksm_might_need_to_copy(page, vma, addr);
-       if (unlikely(!page))
+       folio = ksm_might_need_to_copy(folio, vma, addr);
+       if (unlikely(!folio))
                return -ENOMEM;
-       else if (unlikely(PTR_ERR(page) == -EHWPOISON))
+       else if (unlikely(folio == ERR_PTR(-EHWPOISON)))
                hwpoisoned = true;
+       else
+               page = folio_file_page(folio, swp_offset(entry));
 
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte),