unsigned long src_addr);
struct vm_uffd_ops {
+ int (*cont)(struct vm_area_struct *dst_vma, unsigned long dst_addr,
+ uffd_flags_t flags, unsigned long increment);
int (*poison)(struct vm_area_struct *dst_vma,
unsigned long dst_addr, unsigned long increment);
int mfill_atomic_pte_poison(struct vm_area_struct *dst_vma,
unsigned long dst_addr, unsigned long increment);
+int mfill_atomic_pte_continue(struct vm_area_struct *dst_vma,
+ unsigned long dst_addr, uffd_flags_t flags,
+ unsigned long increment);
+
static inline bool vma_can_userfault(struct vm_area_struct *vma,
vm_flags_t vm_flags,
bool wp_async)
static int hugetlb_mfill_pte_poison(struct vm_area_struct *dst_vma,
unsigned long dst_addr, unsigned long increment);
+static int hugetlb_mfill_pte_continue(struct vm_area_struct *dst_vma,
+ unsigned long dst_addr, uffd_flags_t flags,
+ unsigned long increment);
+
static const struct vm_uffd_ops hugetlb_uffd_ops = {
+ .cont = hugetlb_mfill_pte_continue,
.poison = hugetlb_mfill_pte_poison,
.is_dst_valid = hugetlb_is_dst_valid,
.increment = hugetlb_mfill_size,
unsigned long increment)
{
struct mm_struct *dst_mm = dst_vma->vm_mm;
- bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
bool wp_enabled = (flags & MFILL_ATOMIC_WP);
struct hstate *h = hstate_vma(dst_vma);
struct address_space *mapping = dst_vma->vm_file->f_mapping;
bool folio_in_pagecache = false;
u32 hash;
- if (is_continue)
- return hugetlb_mfill_pte_continue(dst_vma, dst_addr, flags,
- increment);
-
ret = hugetlb_mfill_prepare(dst_vma, dst_addr, increment,
&dst_pte, &hash, flags);
if (ret)
#ifdef CONFIG_USERFAULTFD
static const struct vm_uffd_ops shmem_uffd_ops = {
+ .cont = mfill_atomic_pte_continue,
.poison = mfill_atomic_pte_poison,
.is_dst_valid = shmem_is_dst_valid,
.increment = mfill_size,
}
/* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
-static int mfill_atomic_pte_continue( struct vm_area_struct *dst_vma,
- unsigned long dst_addr,
- uffd_flags_t flags)
+int mfill_atomic_pte_continue(struct vm_area_struct *dst_vma,
+ unsigned long dst_addr, uffd_flags_t flags,
+ unsigned long increment)
{
struct inode *inode = file_inode(dst_vma->vm_file);
pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
/* Anon vma ops */
static const struct vm_uffd_ops default_uffd_ops = {
+ .cont = mfill_atomic_pte_continue,
.poison = mfill_atomic_pte_poison,
.is_dst_valid = uffd_def_is_dst_valid,
.increment = mfill_size,
*/
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
err = uffd_ops->poison(dst_vma, dst_addr, increment);
+ } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
+ err = uffd_ops->cont(dst_vma, dst_addr, flags,
+ increment);
} else if (is_vm_hugetlb_page(dst_vma)) {
err = hugetlb_mfill_atomic_pte(dst_vma, dst_addr,
src_addr, flags, &folio, increment);
- } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
- err = mfill_atomic_pte_continue(dst_vma, dst_addr,
- flags);
} else if (!(dst_vma->vm_flags & VM_SHARED)) {
/*
* The normal page fault path for a shmem will invoke