NR_MFILL_ATOMIC_MODES,
};
+/* VMA userfaultfd operations */
+ssize_t uffd_failed_do_unlock(struct userfaultfd_ctx *ctx,
+ struct vm_area_struct *dst, struct folio *folio,
+ unsigned long src_addr);
+
struct vm_uffd_ops {
/* Required features below */
ssize_t (*is_dst_valid)(struct vm_area_struct *dst_vma,
unsigned long dst_start, unsigned long len);
unsigned long (*increment)(struct vm_area_struct *vma);
+ ssize_t (*failed_do_unlock)(struct userfaultfd_ctx *ctx,
+ struct vm_area_struct *dst, struct folio *folio,
+ unsigned long src_addr);
};
#define MFILL_ATOMIC_MODE_BITS (const_ilog2(NR_MFILL_ATOMIC_MODES - 1) + 1)
static ssize_t hugetlb_is_dst_valid(struct vm_area_struct *dst_vma,
unsigned long dst_start, unsigned long len);
+static ssize_t hugetlb_failed_do_unlock(struct userfaultfd_ctx *ctx,
+ struct vm_area_struct *dst, struct folio *folio,
+ unsigned long src_addr)
+{
+ up_read(&ctx->map_changing_lock);
+#ifdef CONFIG_PER_VMA_LOCK
+ vma_end_read(dst);
+#else
+ mmap_read_unlock(dst->vm_mm);
+#endif
+
+ VM_WARN_ON_ONCE(!folio);
+
+ return copy_folio_from_user(folio,(const void __user *)src_addr, true);
+}
+
static const struct vm_uffd_ops hugetlb_uffd_ops = {
.is_dst_valid = hugetlb_is_dst_valid,
.increment = hugetlb_mfill_size,
+ .failed_do_unlock = hugetlb_failed_do_unlock,
};
#endif
/* Anon vma ops */
static const struct vm_uffd_ops default_uffd_ops = {
- .is_dst_valid = uffd_def_is_dst_valid,
- .increment = mfill_size,
+ .is_dst_valid = uffd_def_is_dst_valid,
+ .increment = mfill_size,
+ .failed_do_unlock = uffd_failed_do_unlock,
};
static inline const struct vm_uffd_ops *vma_get_uffd_ops(struct vm_area_struct *vma)
return &default_uffd_ops;
}
+inline ssize_t
+uffd_failed_do_unlock(struct userfaultfd_ctx *ctx, struct vm_area_struct *dst,
+ struct folio *folio, unsigned long src_addr)
+{
+ ssize_t err;
+ void *kaddr;
+
+ up_read(&ctx->map_changing_lock);
+ uffd_mfill_unlock(dst);
+ VM_WARN_ON_ONCE(!folio);
+
+ kaddr = kmap_local_folio(folio, 0);
+ err = copy_from_user(kaddr, (const void __user *) src_addr, PAGE_SIZE);
+ kunmap_local(kaddr);
+ if (unlikely(err))
+ return -EFAULT;
+
+ flush_dcache_folio(folio);
+ return 0;
+}
+
/*
* Global checks live here, specific checks are in the callback.
*/
}
#ifdef CONFIG_HUGETLB_PAGE
-static
-ssize_t hugetlb_failed_do_unlock(struct userfaultfd_ctx *ctx,
- struct vm_area_struct *dst, struct folio *folio,
- unsigned long src_addr)
-{
- up_read(&ctx->map_changing_lock);
- uffd_mfill_unlock(dst);
- VM_WARN_ON_ONCE(!folio);
-
- return copy_folio_from_user(folio,(const void __user *)src_addr, true);
-}
-
/*
* mfill_atomic processing for HUGETLB vmas. Note that this routine is
* called with either vma-lock or mmap_lock held, it will release the lock
cond_resched();
if (unlikely(err == -ENOENT)) {
- err = hugetlb_failed_do_unlock(ctx, dst_vma, folio,
- src_addr);
+ err = uffd_ops->failed_do_unlock(ctx, dst_vma, folio,
+ src_addr);
if (unlikely(err)) {
err = -EFAULT;
goto out;
uffd_flags_t flags);
#endif /* CONFIG_HUGETLB_PAGE */
-static inline ssize_t
-uffd_failed_do_unlock(struct userfaultfd_ctx *ctx, struct vm_area_struct *dst,
- struct folio *folio, unsigned long src_addr)
-{
- ssize_t err;
- void *kaddr;
-
- up_read(&ctx->map_changing_lock);
- uffd_mfill_unlock(dst);
- VM_WARN_ON_ONCE(!folio);
-
- kaddr = kmap_local_folio(folio, 0);
- err = copy_from_user(kaddr, (const void __user *) src_addr, PAGE_SIZE);
- kunmap_local(kaddr);
- if (unlikely(err))
- return -EFAULT;
-
- flush_dcache_folio(folio);
- return 0;
-}
-
ssize_t
uffd_get_dst_pmd(struct vm_area_struct *dst_vma, unsigned long dst_addr,
pmd_t **dst_pmd)
cond_resched();
if (unlikely(err == -ENOENT)) {
- err = uffd_failed_do_unlock(ctx, dst_vma, folio,
- src_addr);
+ err = uffd_ops->failed_do_unlock(ctx, dst_vma, folio,
+ src_addr);
if (unlikely(err))
goto out;