]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/shmem: Reduce duplicate userfaultfd code
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Fri, 24 Oct 2025 15:46:03 +0000 (11:46 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 30 Oct 2025 16:42:58 +0000 (12:42 -0400)
shmem has duplicate code blocks in functions that were earlier split
out.  The duplicate code can be removed by using a function without much
difficulty.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
mm/shmem.c

index 1bc84fb55aa8f7338889b0b4f49dc8bea51f250a..fe079e2c8f30170c23bc3b3c46e407663b6b9f58 100644 (file)
@@ -3159,33 +3159,12 @@ static ssize_t shmem_is_dst_valid(struct vm_area_struct *dst_vma,
        return 0;
 }
 
-
-static int shmem_mfill_atomic_pte_zeropage(struct vm_area_struct *dst_vma,
-               unsigned long dst_addr)
+static inline int shmem_mfill_complete(struct vm_area_struct *dst_vma,
+               unsigned long dst_addr, pmd_t *dst_pmd, pgoff_t pgoff,
+               struct folio *folio, struct inode *inode, gfp_t gfp)
 {
-       struct inode *inode = file_inode(dst_vma->vm_file);
-       struct shmem_inode_info *info = SHMEM_I(inode);
-       struct address_space *mapping = inode->i_mapping;
-       gfp_t gfp = mapping_gfp_mask(mapping);
-       pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
-       struct folio *folio;
-       int ret;
        pgoff_t max_off;
-       pmd_t *dst_pmd;
-
-       ret = uffd_get_dst_pmd(dst_vma, dst_addr, &dst_pmd);
-       if (ret)
-               return ret;
-
-       if (shmem_inode_acct_blocks(inode, 1))
-               return -ENOMEM;
-
-       ret = -ENOMEM;
-       folio = shmem_alloc_folio(gfp, 0, info, pgoff);
-       if (!folio)
-               goto out_unacct_blocks;
-
-       clear_user_highpage(&folio->page, dst_addr);
+       int ret;
 
        VM_BUG_ON(folio_test_locked(folio));
        VM_BUG_ON(folio_test_swapbacked(folio));
@@ -3201,7 +3180,8 @@ static int shmem_mfill_atomic_pte_zeropage(struct vm_area_struct *dst_vma,
        ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp);
        if (ret)
                goto out_release;
-       ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
+       ret = shmem_add_to_page_cache(folio, inode->i_mapping, pgoff, NULL,
+                                     gfp);
        if (ret)
                goto out_release;
 
@@ -3218,11 +3198,42 @@ out_delete_from_cache:
 out_release:
        folio_unlock(folio);
        folio_put(folio);
-out_unacct_blocks:
        shmem_inode_unacct_blocks(inode, 1);
        return ret;
 }
 
+static int shmem_mfill_atomic_pte_zeropage(struct vm_area_struct *dst_vma,
+               unsigned long dst_addr)
+{
+       struct inode *inode = file_inode(dst_vma->vm_file);
+       struct shmem_inode_info *info = SHMEM_I(inode);
+       gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
+       pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
+       struct folio *folio;
+       int ret;
+       pmd_t *dst_pmd;
+
+       ret = uffd_get_dst_pmd(dst_vma, dst_addr, &dst_pmd);
+       if (ret)
+               return ret;
+
+       if (shmem_inode_acct_blocks(inode, 1))
+               return -ENOMEM;
+
+       ret = -ENOMEM;
+       folio = shmem_alloc_folio(gfp, 0, info, pgoff);
+       if (!folio) {
+               shmem_inode_unacct_blocks(inode, 1);
+               return -ENOMEM;
+       }
+
+       clear_user_highpage(&folio->page, dst_addr);
+
+       ret = shmem_mfill_complete(dst_vma, dst_addr, dst_pmd, pgoff, folio,
+                                  inode, gfp);
+       return ret;
+}
+
 static int shmem_mfill_atomic_pte_copy(struct vm_area_struct *dst_vma,
                unsigned long dst_addr, unsigned long src_addr,
                uffd_flags_t flags, struct folio **foliop,
@@ -3236,7 +3247,6 @@ static int shmem_mfill_atomic_pte_copy(struct vm_area_struct *dst_vma,
        void *page_kaddr;
        struct folio *folio;
        int ret;
-       pgoff_t max_off;
        pmd_t *dst_pmd;
 
        ret = uffd_get_dst_pmd(dst_vma, dst_addr, &dst_pmd);
@@ -3299,37 +3309,10 @@ static int shmem_mfill_atomic_pte_copy(struct vm_area_struct *dst_vma,
                *foliop = NULL;
        }
 
-       VM_BUG_ON(folio_test_locked(folio));
-       VM_BUG_ON(folio_test_swapbacked(folio));
-       __folio_set_locked(folio);
-       __folio_set_swapbacked(folio);
-       __folio_mark_uptodate(folio);
-
-       ret = -EFAULT;
-       max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
-       if (unlikely(pgoff >= max_off))
-               goto out_release;
-
-       ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp);
-       if (ret)
-               goto out_release;
-       ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
-       if (ret)
-               goto out_release;
-
-       ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
-                                      &folio->page, true, flags);
-       if (ret)
-               goto out_delete_from_cache;
+       ret = shmem_mfill_complete(dst_vma, dst_addr, dst_pmd, pgoff, folio,
+                                  inode, gfp);
+       return ret;
 
-       shmem_recalc_inode(inode, 1, 0);
-       folio_unlock(folio);
-       return 0;
-out_delete_from_cache:
-       filemap_remove_folio(folio);
-out_release:
-       folio_unlock(folio);
-       folio_put(folio);
 out_unacct_blocks:
        shmem_inode_unacct_blocks(inode, 1);
        return ret;