]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/memory: pass folio and pte to restore_exclusive_pte()
authorDavid Hildenbrand <david@redhat.com>
Wed, 26 Feb 2025 13:22:55 +0000 (14:22 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 28 Feb 2025 01:00:37 +0000 (17:00 -0800)
Let's pass the folio and the pte to restore_exclusive_pte(), so we can
avoid repeated page_folio() and ptep_get().  To do that, pass the pte to
try_restore_exclusive_pte() and use a folio in there already.

While at it, just avoid the "swp_entry_t entry" variable in
try_restore_exclusive_pte() and add a folio-locked check to
restore_exclusive_pte().

Link: https://lkml.kernel.org/r/20250226132257.2826043-4-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Alistair Popple <apopple@nvidia.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index 17ac1f9e2aa28ac6b16a33b5151543811b6c339b..30ad6f8aa7c6880c264c9a8353781b512c7e2ce4 100644 (file)
@@ -719,14 +719,13 @@ struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
 #endif
 
 static void restore_exclusive_pte(struct vm_area_struct *vma,
-                                 struct page *page, unsigned long address,
-                                 pte_t *ptep)
+               struct folio *folio, struct page *page, unsigned long address,
+               pte_t *ptep, pte_t orig_pte)
 {
-       struct folio *folio = page_folio(page);
-       pte_t orig_pte;
        pte_t pte;
 
-       orig_pte = ptep_get(ptep);
+       VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
+
        pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
        if (pte_swp_soft_dirty(orig_pte))
                pte = pte_mksoft_dirty(pte);
@@ -753,16 +752,15 @@ static void restore_exclusive_pte(struct vm_area_struct *vma,
  * Tries to restore an exclusive pte if the page lock can be acquired without
  * sleeping.
  */
-static int
-try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
-                       unsigned long addr)
+static int try_restore_exclusive_pte(struct vm_area_struct *vma,
+               unsigned long addr, pte_t *ptep, pte_t orig_pte)
 {
-       swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte));
-       struct page *page = pfn_swap_entry_to_page(entry);
+       struct page *page = pfn_swap_entry_to_page(pte_to_swp_entry(orig_pte));
+       struct folio *folio = page_folio(page);
 
-       if (trylock_page(page)) {
-               restore_exclusive_pte(vma, page, addr, src_pte);
-               unlock_page(page);
+       if (folio_trylock(folio)) {
+               restore_exclusive_pte(vma, folio, page, addr, ptep, orig_pte);
+               folio_unlock(folio);
                return 0;
        }
 
@@ -868,7 +866,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                 * (ie. COW) mappings.
                 */
                VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
-               if (try_restore_exclusive_pte(src_pte, src_vma, addr))
+               if (try_restore_exclusive_pte(src_vma, addr, src_pte, orig_pte))
                        return -EBUSY;
                return -ENOENT;
        } else if (is_pte_marker_entry(entry)) {
@@ -4032,7 +4030,8 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
        vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
                                &vmf->ptl);
        if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
-               restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);
+               restore_exclusive_pte(vma, folio, vmf->page, vmf->address,
+                                     vmf->pte, vmf->orig_pte);
 
        if (vmf->pte)
                pte_unmap_unlock(vmf->pte, vmf->ptl);