return 0;
}
+static inline ssize_t
+uffd_get_dst_pmd(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma,
+ unsigned long dst_addr, pmd_t **dst_pmd)
+{
+ pmd_t dst_pmdval;
+
+ *dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
+ if (unlikely(!*dst_pmd))
+ return -ENOMEM;
+
+ dst_pmdval = pmdp_get_lockless(*dst_pmd);
+ if (unlikely(pmd_none(dst_pmdval)) &&
+ unlikely(__pte_alloc(dst_mm, *dst_pmd)))
+ return -ENOMEM;
+
+ dst_pmdval = pmdp_get_lockless(*dst_pmd);
+ /*
+ * If the dst_pmd is THP don't override it and just be strict.
+ * (This includes the case where the PMD used to be THP and
+ * changed back to none after __pte_alloc().)
+ */
+ if (unlikely(!pmd_present(dst_pmdval) || pmd_trans_huge(dst_pmdval)))
+ return -EEXIST;
+
+ if (unlikely(pmd_bad(dst_pmdval)))
+ return -EFAULT;
+
+ return 0;
+}
+
+
static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
unsigned long dst_start,
unsigned long src_start,
goto out_unlock;
while (src_addr < src_start + len) {
- pmd_t dst_pmdval;
-
VM_WARN_ON_ONCE(dst_addr >= dst_start + len);
- dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
- if (unlikely(!dst_pmd)) {
- err = -ENOMEM;
+ err = uffd_get_dst_pmd(dst_mm, dst_vma, dst_addr, &dst_pmd);
+ if (err)
break;
- }
- dst_pmdval = pmdp_get_lockless(dst_pmd);
- if (unlikely(pmd_none(dst_pmdval)) &&
- unlikely(__pte_alloc(dst_mm, dst_pmd))) {
- err = -ENOMEM;
- break;
- }
- dst_pmdval = pmdp_get_lockless(dst_pmd);
- /*
- * If the dst_pmd is THP don't override it and just be strict.
- * (This includes the case where the PMD used to be THP and
- * changed back to none after __pte_alloc().)
- */
- if (unlikely(!pmd_present(dst_pmdval) ||
- pmd_trans_huge(dst_pmdval))) {
- err = -EEXIST;
- break;
- }
- if (unlikely(pmd_bad(dst_pmdval))) {
- err = -EFAULT;
- break;
- }
/*
* For shmem mappings, khugepaged is allowed to remove page
* tables under us; pte_offset_map_lock() will deal with that.