From: Liam R. Howlett Date: Wed, 29 Oct 2025 16:37:21 +0000 (-0400) Subject: mm/userfaultfd: Create page_shift uffd_ops X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=ffb79ebf2ca995d6ffd229ba313d1aac69f1d90d;p=users%2Fjedix%2Flinux-maple.git mm/userfaultfd: Create page_shift uffd_ops Page shift differs for hugetlb, so modularize the caller. Signed-off-by: Liam R. Howlett --- diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index 6f919008bdde..dbf5f7072899 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -116,6 +116,7 @@ struct uffd_info { } /* VMA userfaultfd operations */ +unsigned int uffd_page_shift(struct vm_area_struct *vma); int uffd_writeprotect(struct uffd_info *info); ssize_t uffd_failed_do_unlock(struct userfaultfd_ctx *ctx, struct vm_area_struct *dst, struct folio *folio, @@ -134,6 +135,7 @@ struct vm_uffd_ops { ssize_t (*failed_do_unlock)(struct userfaultfd_ctx *ctx, struct vm_area_struct *dst, struct folio *folio, unsigned long src_addr); + unsigned int (*page_shift)(struct vm_area_struct *src_vma); }; #define MFILL_ATOMIC_MODE_BITS (const_ilog2(NR_MFILL_ATOMIC_MODES - 1) + 1) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0cf9c6907468..a725d6ba554e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5529,6 +5529,11 @@ static ssize_t hugetlb_failed_do_unlock(struct userfaultfd_ctx *ctx, return copy_folio_from_user(folio,(const void __user *)src_addr, true); } +static inline unsigned int hugetlb_page_shift(struct vm_area_struct *vma) +{ + return huge_page_shift(hstate_vma(vma)); +} + static int hugetlb_mfill_pte_poison(struct uffd_info *info); static int hugetlb_mfill_pte_continue(struct uffd_info *info); static int hugetlb_mfill_atomic_pte_copy(struct uffd_info *info); @@ -5543,6 +5548,7 @@ static const struct vm_uffd_ops hugetlb_uffd_ops = { .is_dst_valid = hugetlb_is_dst_valid, .increment = hugetlb_mfill_size, .failed_do_unlock = hugetlb_failed_do_unlock, + .page_shift = hugetlb_page_shift, }; #endif diff --git a/mm/shmem.c b/mm/shmem.c index 3ddf7f42e1fa..55d2bb73f8c6 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -5277,6 +5277,7 @@ static const struct vm_uffd_ops shmem_uffd_ops = { .is_dst_valid = shmem_is_dst_valid, .increment = mfill_size, .failed_do_unlock = uffd_failed_do_unlock, + .page_shift = uffd_page_shift, }; #endif diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 2b77fe81f122..7614fe039887 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -479,8 +479,14 @@ static const struct vm_uffd_ops default_uffd_ops = { .is_dst_valid = uffd_def_is_dst_valid, .increment = mfill_size, .failed_do_unlock = uffd_failed_do_unlock, + .page_shift = uffd_page_shift, }; +unsigned int uffd_page_shift(struct vm_area_struct *vma) +{ + return PAGE_SHIFT; +} + static inline const struct vm_uffd_ops *vma_get_uffd_ops(struct vm_area_struct *vma) { if (vma->vm_ops && vma->vm_ops->userfaultfd_ops) @@ -998,8 +1004,13 @@ static long move_present_ptes(struct mm_struct *mm, } arch_leave_lazy_mmu_mode(); - if (src_addr > src_start) - flush_tlb_range(src_vma, src_start, src_addr); + if (src_addr > src_start) { + unsigned int shift; + const struct vm_uffd_ops *uffd_ops = vma_get_uffd_ops(src_vma); + + shift = uffd_ops->page_shift(src_vma); + flush_tlb_mm_range(mm, src_start, src_addr, shift, true); + } if (src_folio) folio_unlock(src_folio);