]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/userfaultfd: Use uffd_ops continue operations
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Fri, 24 Oct 2025 03:32:05 +0000 (23:32 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 30 Oct 2025 16:41:30 +0000 (12:41 -0400)
Add the uffd_ops continue operation for all memory types.  Start using
the uffd_ops cont operation in mfill_atomic().

The further unites hugetlb with other types.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
include/linux/userfaultfd_k.h
mm/hugetlb.c
mm/shmem.c
mm/userfaultfd.c

index fee5a904fbd03c168e1e29842e6c8fa410896fde..54fc7a589b11c2e946fd3e3a853ff6eb45013506 100644 (file)
@@ -98,6 +98,8 @@ ssize_t uffd_failed_do_unlock(struct userfaultfd_ctx *ctx,
                unsigned long src_addr);
 
 struct vm_uffd_ops {
+       int (*cont)(struct vm_area_struct *dst_vma, unsigned long dst_addr,
+                   uffd_flags_t flags, unsigned long increment);
        int (*poison)(struct vm_area_struct *dst_vma,
                unsigned long dst_addr, unsigned long increment);
 
@@ -233,6 +235,10 @@ static inline unsigned long mfill_size(struct vm_area_struct *vma)
 int mfill_atomic_pte_poison(struct vm_area_struct *dst_vma,
                unsigned long dst_addr, unsigned long increment);
 
+int mfill_atomic_pte_continue(struct vm_area_struct *dst_vma,
+               unsigned long dst_addr, uffd_flags_t flags,
+               unsigned long increment);
+
 static inline bool vma_can_userfault(struct vm_area_struct *vma,
                                     vm_flags_t vm_flags,
                                     bool wp_async)
index 78e9affb82c6df73339495bd693a17ac0400bc4d..444ab261e03db7aee8305e294772c4622f0d1693 100644 (file)
@@ -5532,7 +5532,12 @@ static ssize_t hugetlb_failed_do_unlock(struct userfaultfd_ctx *ctx,
 static int hugetlb_mfill_pte_poison(struct vm_area_struct *dst_vma,
                unsigned long dst_addr, unsigned long increment);
 
+static int hugetlb_mfill_pte_continue(struct vm_area_struct *dst_vma,
+               unsigned long dst_addr, uffd_flags_t flags,
+               unsigned long increment);
+
 static const struct vm_uffd_ops hugetlb_uffd_ops = {
+       .cont                   =       hugetlb_mfill_pte_continue,
        .poison                 =       hugetlb_mfill_pte_poison,
        .is_dst_valid           =       hugetlb_is_dst_valid,
        .increment              =       hugetlb_mfill_size,
@@ -7153,7 +7158,6 @@ int hugetlb_mfill_atomic_pte(struct vm_area_struct *dst_vma,
                             unsigned long increment)
 {
        struct mm_struct *dst_mm = dst_vma->vm_mm;
-       bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
        bool wp_enabled = (flags & MFILL_ATOMIC_WP);
        struct hstate *h = hstate_vma(dst_vma);
        struct address_space *mapping = dst_vma->vm_file->f_mapping;
@@ -7168,10 +7172,6 @@ int hugetlb_mfill_atomic_pte(struct vm_area_struct *dst_vma,
        bool folio_in_pagecache = false;
        u32 hash;
 
-       if (is_continue)
-               return hugetlb_mfill_pte_continue(dst_vma, dst_addr, flags,
-                                                 increment);
-
        ret = hugetlb_mfill_prepare(dst_vma, dst_addr, increment,
                                    &dst_pte, &hash, flags);
        if (ret)
index c1816a582e48370ef030ab14e6300818df440e56..923e9521bf6cc4c6d2f194a2a00c75199941341d 100644 (file)
@@ -5208,6 +5208,7 @@ static int shmem_error_remove_folio(struct address_space *mapping,
 
 #ifdef CONFIG_USERFAULTFD
 static const struct vm_uffd_ops shmem_uffd_ops = {
+       .cont                   =       mfill_atomic_pte_continue,
        .poison                 =       mfill_atomic_pte_poison,
        .is_dst_valid           =       shmem_is_dst_valid,
        .increment              =       mfill_size,
index 0891c5dee2d44b0311342af5ce9d330308445e67..48deecc5fc985672bbe97322d0f12eb076cdbac7 100644 (file)
@@ -385,9 +385,9 @@ out:
 }
 
 /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
-static int mfill_atomic_pte_continue( struct vm_area_struct *dst_vma,
-                                    unsigned long dst_addr,
-                                    uffd_flags_t flags)
+int mfill_atomic_pte_continue(struct vm_area_struct *dst_vma,
+               unsigned long dst_addr, uffd_flags_t flags,
+               unsigned long increment)
 {
        struct inode *inode = file_inode(dst_vma->vm_file);
        pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
@@ -507,6 +507,7 @@ static ssize_t uffd_def_is_dst_valid(struct vm_area_struct *dst_vma,
 
 /* Anon vma ops */
 static const struct vm_uffd_ops default_uffd_ops = {
+       .cont                   = mfill_atomic_pte_continue,
        .poison                 = mfill_atomic_pte_poison,
        .is_dst_valid           = uffd_def_is_dst_valid,
        .increment              = mfill_size,
@@ -687,12 +688,12 @@ retry:
                 */
                if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
                        err = uffd_ops->poison(dst_vma, dst_addr, increment);
+               } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
+                       err = uffd_ops->cont(dst_vma, dst_addr, flags,
+                                            increment);
                } else if (is_vm_hugetlb_page(dst_vma)) {
                        err = hugetlb_mfill_atomic_pte(dst_vma, dst_addr,
                                        src_addr, flags, &folio, increment);
-               } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
-                       err = mfill_atomic_pte_continue(dst_vma, dst_addr,
-                                                       flags);
                } else if (!(dst_vma->vm_flags & VM_SHARED)) {
                        /*
                         * The normal page fault path for a shmem will invoke