]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/userfaultfd: Add failed_do_unlock(), move hugetlb implementation to
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Mon, 27 Oct 2025 19:43:31 +0000 (15:43 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 30 Oct 2025 16:30:36 +0000 (12:30 -0400)
hugetlb.c

Create and populate failed_do_unlock() in uffd ops.  Moving the hugetlb
implementation into hugetlb.c and calling it in the
mfill_atomic_hugetlb() loop for now.

The mfill_atomic() loop is also updated to use the function pointer of
the types.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
include/linux/userfaultfd_k.h
mm/hugetlb.c
mm/shmem.c
mm/userfaultfd.c

index f65073ae8d1ce5b100f79ba9c96dccc3c8c039ba..d3b9d8adf32d4faca85dc29d3f4ba5f6f35df3b0 100644 (file)
@@ -92,11 +92,19 @@ enum mfill_atomic_mode {
        NR_MFILL_ATOMIC_MODES,
 };
 
+/* VMA userfaultfd operations */
+ssize_t uffd_failed_do_unlock(struct userfaultfd_ctx *ctx,
+               struct vm_area_struct *dst, struct folio *folio,
+               unsigned long src_addr);
+
 struct vm_uffd_ops {
        /* Required features below */
        ssize_t (*is_dst_valid)(struct vm_area_struct *dst_vma,
                                unsigned long dst_start, unsigned long len);
        unsigned long (*increment)(struct vm_area_struct *vma);
+       ssize_t (*failed_do_unlock)(struct userfaultfd_ctx *ctx,
+                       struct vm_area_struct *dst, struct folio *folio,
+                       unsigned long src_addr);
 };
 
 #define MFILL_ATOMIC_MODE_BITS (const_ilog2(NR_MFILL_ATOMIC_MODES - 1) + 1)
index c7409c7cee282e734fc667f6d0847268bd15d37f..bfd60f30827240e6748360b3224acbd4892da7ba 100644 (file)
@@ -5513,9 +5513,26 @@ static inline unsigned long hugetlb_mfill_size(struct vm_area_struct *vma)
 static ssize_t hugetlb_is_dst_valid(struct vm_area_struct *dst_vma,
                unsigned long dst_start, unsigned long len);
 
+static ssize_t hugetlb_failed_do_unlock(struct userfaultfd_ctx *ctx,
+               struct vm_area_struct *dst, struct folio *folio,
+               unsigned long src_addr)
+{
+       up_read(&ctx->map_changing_lock);
+#ifdef CONFIG_PER_VMA_LOCK
+       vma_end_read(dst);
+#else
+       mmap_read_unlock(dst->vm_mm);
+#endif
+
+       VM_WARN_ON_ONCE(!folio);
+
+       return copy_folio_from_user(folio,(const void __user *)src_addr, true);
+}
+
 static const struct vm_uffd_ops hugetlb_uffd_ops = {
        .is_dst_valid           =       hugetlb_is_dst_valid,
        .increment              =       hugetlb_mfill_size,
+       .failed_do_unlock       =       hugetlb_failed_do_unlock,
 };
 #endif
 
index 385dd39f93370b00805a8a61af928e3688912824..86db6aa3b8e50c2a0d9f49c9c733b9fbfcc4e290 100644 (file)
@@ -5210,6 +5210,7 @@ static int shmem_error_remove_folio(struct address_space *mapping,
 static const struct vm_uffd_ops shmem_uffd_ops = {
        .is_dst_valid           =       shmem_is_dst_valid,
        .increment              =       mfill_size,
+       .failed_do_unlock       =       uffd_failed_do_unlock,
 };
 #endif
 
index bc82a67d478215f306367b1b4475062a16a0cd9c..47b169d25eb8cab1c59160a4293392343c9029d0 100644 (file)
@@ -508,8 +508,9 @@ static ssize_t uffd_def_is_dst_valid(struct vm_area_struct *dst_vma,
 
 /* Anon vma ops */
 static const struct vm_uffd_ops default_uffd_ops = {
-       .is_dst_valid   = uffd_def_is_dst_valid,
-       .increment      = mfill_size,
+       .is_dst_valid           = uffd_def_is_dst_valid,
+       .increment              = mfill_size,
+       .failed_do_unlock       = uffd_failed_do_unlock,
 };
 
 static inline const struct vm_uffd_ops *vma_get_uffd_ops(struct vm_area_struct *vma)
@@ -520,6 +521,27 @@ static inline const struct vm_uffd_ops *vma_get_uffd_ops(struct vm_area_struct *
        return &default_uffd_ops;
 }
 
+inline ssize_t
+uffd_failed_do_unlock(struct userfaultfd_ctx *ctx, struct vm_area_struct *dst,
+                 struct folio *folio, unsigned long src_addr)
+{
+       ssize_t err;
+       void *kaddr;
+
+       up_read(&ctx->map_changing_lock);
+       uffd_mfill_unlock(dst);
+       VM_WARN_ON_ONCE(!folio);
+
+       kaddr = kmap_local_folio(folio, 0);
+       err = copy_from_user(kaddr, (const void __user *) src_addr, PAGE_SIZE);
+       kunmap_local(kaddr);
+       if (unlikely(err))
+               return -EFAULT;
+
+       flush_dcache_folio(folio);
+       return 0;
+}
+
 /*
  * Global checks live here, specific checks are in the callback.
  */
@@ -559,18 +581,6 @@ uffd_ctx_lock_and_validate_dst(struct userfaultfd_ctx *ctx,
 }
 
 #ifdef CONFIG_HUGETLB_PAGE
-static
-ssize_t hugetlb_failed_do_unlock(struct userfaultfd_ctx *ctx,
-               struct vm_area_struct *dst, struct folio *folio,
-               unsigned long src_addr)
-{
-       up_read(&ctx->map_changing_lock);
-       uffd_mfill_unlock(dst);
-       VM_WARN_ON_ONCE(!folio);
-
-       return copy_folio_from_user(folio,(const void __user *)src_addr, true);
-}
-
 /*
  * mfill_atomic processing for HUGETLB vmas.  Note that this routine is
  * called with either vma-lock or mmap_lock held, it will release the lock
@@ -640,8 +650,8 @@ retry:
                cond_resched();
 
                if (unlikely(err == -ENOENT)) {
-                       err = hugetlb_failed_do_unlock(ctx, dst_vma, folio,
-                                                      src_addr);
+                       err = uffd_ops->failed_do_unlock(ctx, dst_vma, folio,
+                                                        src_addr);
                        if (unlikely(err)) {
                                err = -EFAULT;
                                goto out;
@@ -685,27 +695,6 @@ extern ssize_t mfill_atomic_hugetlb(struct userfaultfd_ctx *ctx,
                                    uffd_flags_t flags);
 #endif /* CONFIG_HUGETLB_PAGE */
 
-static inline ssize_t
-uffd_failed_do_unlock(struct userfaultfd_ctx *ctx, struct vm_area_struct *dst,
-                 struct folio *folio, unsigned long src_addr)
-{
-       ssize_t err;
-       void *kaddr;
-
-       up_read(&ctx->map_changing_lock);
-       uffd_mfill_unlock(dst);
-       VM_WARN_ON_ONCE(!folio);
-
-       kaddr = kmap_local_folio(folio, 0);
-       err = copy_from_user(kaddr, (const void __user *) src_addr, PAGE_SIZE);
-       kunmap_local(kaddr);
-       if (unlikely(err))
-               return -EFAULT;
-
-       flush_dcache_folio(folio);
-       return 0;
-}
-
 ssize_t
 uffd_get_dst_pmd(struct vm_area_struct *dst_vma, unsigned long dst_addr,
                 pmd_t **dst_pmd)
@@ -840,8 +829,8 @@ retry:
                cond_resched();
 
                if (unlikely(err == -ENOENT)) {
-                       err = uffd_failed_do_unlock(ctx, dst_vma, folio,
-                                                   src_addr);
+                       err = uffd_ops->failed_do_unlock(ctx, dst_vma, folio,
+                                                        src_addr);
                        if (unlikely(err))
                                goto out;