It is cleaner to use the uffd_info struct.
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
struct vm_uffd_ops {
int (*copy)(struct uffd_info *info);
int (*zeropage)(struct uffd_info *info);
- int (*cont)(struct vm_area_struct *dst_vma, unsigned long dst_addr,
- bool wp, unsigned long increment);
+ int (*cont)(struct uffd_info *info);
int (*poison)(struct uffd_info *info);
/* Required features below */
ssize_t (*is_dst_valid)(struct vm_area_struct *dst_vma,
return PAGE_SIZE;
}
int mfill_atomic_pte_poison(struct uffd_info *info);
-int mfill_atomic_pte_continue(struct vm_area_struct *dst_vma,
- unsigned long dst_addr, bool wp,
- unsigned long increment);
-
+int mfill_atomic_pte_continue(struct uffd_info *info);
int mfill_atomic_pte_copy(struct uffd_info *info);
int mfill_atomic_pte_zeropage(struct uffd_info *info);
}
static int hugetlb_mfill_pte_poison(struct uffd_info *info);
-static int hugetlb_mfill_pte_continue(struct vm_area_struct *dst_vma,
- unsigned long dst_addr, bool wp_enabled,
- unsigned long increment);
-
+static int hugetlb_mfill_pte_continue(struct uffd_info *info);
static int hugetlb_mfill_atomic_pte_copy(struct uffd_info *info);
static const struct vm_uffd_ops hugetlb_uffd_ops = {
return 0;
}
-static int hugetlb_mfill_pte_continue(struct vm_area_struct *dst_vma,
- unsigned long dst_addr, bool wp_enabled,
- unsigned long increment)
+static int hugetlb_mfill_pte_continue(struct uffd_info *info)
{
+ struct vm_area_struct *dst_vma = info->dst_vma;
+ unsigned long dst_addr = info->dst_addr;
struct mm_struct *dst_mm = dst_vma->vm_mm;
struct hstate *h = hstate_vma(dst_vma);
struct address_space *mapping = dst_vma->vm_file->f_mapping;
struct folio *folio;
u32 hash;
- ret = hugetlb_mfill_prepare(dst_vma, dst_addr, increment,
- &dst_pte, &hash, wp_enabled);
+ ret = hugetlb_mfill_prepare(dst_vma, dst_addr, info->increment,
+ &dst_pte, &hash, info->wp);
if (ret)
return ret;
* For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
* with wp flag set, don't set pte write bit.
*/
- _dst_pte = make_huge_pte(dst_vma, folio, !wp_enabled && vm_shared);
+ _dst_pte = make_huge_pte(dst_vma, folio, !info->wp && vm_shared);
/*
* Always mark UFFDIO_COPY page dirty; note that this may not be
* extremely important for hugetlbfs for now since swapping is not
_dst_pte = huge_pte_mkdirty(_dst_pte);
_dst_pte = pte_mkyoung(_dst_pte);
- if (wp_enabled)
+ if (info->wp)
_dst_pte = huge_pte_mkuffd_wp(_dst_pte);
set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
}
/* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
-int mfill_atomic_pte_continue(struct vm_area_struct *dst_vma,
- unsigned long dst_addr, bool wp,
- unsigned long increment)
+int mfill_atomic_pte_continue(struct uffd_info *info)
{
+ struct vm_area_struct *dst_vma = info->dst_vma;
struct inode *inode = file_inode(dst_vma->vm_file);
- pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
+ pgoff_t pgoff = linear_page_index(dst_vma, info->dst_addr);
pmd_t *dst_pmd;
struct folio *folio;
struct page *page;
int ret;
- ret = uffd_get_dst_pmd(dst_vma, dst_addr, &dst_pmd);
+ ret = uffd_get_dst_pmd(dst_vma, info->dst_addr, &dst_pmd);
if (ret)
return ret;
goto out_release;
}
- ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
- page, false, wp);
+ ret = mfill_atomic_install_pte(dst_pmd, dst_vma, info->dst_addr,
+ page, false, info->wp);
if (ret)
goto out_release;
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
err = info.uffd_ops->poison(&info);
} else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
- err = info.uffd_ops->cont(dst_vma, info.dst_addr,
- flags & MFILL_ATOMIC_WP,
- increment);
+ err = info.uffd_ops->cont(&info);
} else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
err = info.uffd_ops->copy(&info);
/*