extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
-/* A combined operation mode + behavior flags. */
-typedef unsigned int __bitwise uffd_flags_t;
-
/* Mutually exclusive modes of operation. */
enum mfill_atomic_mode {
MFILL_ATOMIC_COPY,
void (*complete_register)(struct vm_area_struct *vma);
};
-#define MFILL_ATOMIC_MODE_BITS (const_ilog2(NR_MFILL_ATOMIC_MODES - 1) + 1)
-#define MFILL_ATOMIC_BIT(nr) BIT(MFILL_ATOMIC_MODE_BITS + (nr))
-#define MFILL_ATOMIC_FLAG(nr) ((__force uffd_flags_t) MFILL_ATOMIC_BIT(nr))
-#define MFILL_ATOMIC_MODE_MASK ((__force uffd_flags_t) (MFILL_ATOMIC_BIT(0) - 1))
-
-static inline bool uffd_flags_mode_is(uffd_flags_t flags, enum mfill_atomic_mode expected)
-{
- return (flags & MFILL_ATOMIC_MODE_MASK) == ((__force uffd_flags_t) expected);
-}
-
-static inline uffd_flags_t uffd_flags_set_mode(uffd_flags_t flags, enum mfill_atomic_mode mode)
-{
- flags &= ~MFILL_ATOMIC_MODE_MASK;
- return flags | ((__force uffd_flags_t) mode);
-}
-
-/* Flags controlling behavior. These behavior changes are mode-independent. */
-#define MFILL_ATOMIC_WP MFILL_ATOMIC_FLAG(0)
-
int mfill_atomic_install_pte(pmd_t *dst_pmd, struct vm_area_struct *dst_vma,
unsigned long dst_addr, struct page *page,
bool newly_allocated, bool wp);
unsigned long dst_start, unsigned long len);
ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx,
unsigned long dst_start, unsigned long len, bool wp);
-extern ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start,
- unsigned long len, uffd_flags_t flags);
+ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start,
+ unsigned long len);
extern int mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start,
unsigned long len, bool enable_wp);
extern long uffd_wp_range(struct vm_area_struct *vma,
return 0;
}
-static inline ssize_t uffd_info_set_ops(struct uffd_info *info,
- uffd_flags_t flags)
-{
- /*
- * For shmem mappings, khugepaged is allowed to remove page
- * tables under us; pte_offset_map_lock() will deal with that.
- */
- if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
- info->op = info->uffd_ops->poison;
- } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
- info->op = info->uffd_ops->cont;
- } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
- info->op = info->uffd_ops->copy;
- /*
- * The normal page fault path for a shmem will invoke
- * the fault, fill the hole in the file and COW it right
- * away. The result generates plain anonymous memory. So
- * when we are asked to fill an hole in a MAP_PRIVATE
- * shmem mapping, we'll generate anonymous memory
- * directly without actually filling the hole. For the
- * MAP_PRIVATE case the robustness check only happens in
- * the pagetable (to verify it's still none) and not in
- * the radix tree.
- */
- } else {
- info->op = info->uffd_ops->zeropage;
- }
-
- if (!info->op)
- return -EINVAL;
-
- return 0;
-}
-
static inline ssize_t uffd_info_setup(struct uffd_info *info)
{
return err;
}
-static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
- unsigned long dst_start,
- unsigned long src_start,
- unsigned long len,
- uffd_flags_t flags)
-{
- ssize_t err;
- struct uffd_info info = UFFD_STRUCT_INIT(dst_start, src_start, len, ctx,
- flags & MFILL_ATOMIC_WP);
-
- mfill_atomic_validate(&info);
- err = mfill_setup(&info);
- if (err)
- goto out;
-
- err = uffd_info_set_ops(&info, flags);
- if (err) {
- uffd_ctx_unlock(&info);
- goto out;
- }
-
- err = mfill_loop(&info);
-
-out:
- VM_WARN_ON_ONCE(info.copied < 0);
- VM_WARN_ON_ONCE(err > 0);
- VM_WARN_ON_ONCE(!info.copied && !err);
- return info.copied ? info.copied : err;
-}
-
static __always_inline ssize_t mfill_copy_loop(struct uffd_info *info)
{
ssize_t err = -EINVAL;
return info.copied ? info.copied : err;
}
-ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start,
- unsigned long len, uffd_flags_t flags)
+ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx,
+ unsigned long dst_start, unsigned long len)
{
- return mfill_atomic(ctx, start, 0, len,
- uffd_flags_set_mode(flags, MFILL_ATOMIC_POISON));
+ ssize_t err;
+ struct uffd_info info = UFFD_STRUCT_INIT(dst_start, 0, len, ctx,
+ false);
+
+ mfill_atomic_validate(&info);
+ err = mfill_setup(&info);
+ if (err)
+ goto out;
+
+ info.op = info.uffd_ops->poison;
+ err = mfill_loop(&info);
+
+out:
+ VM_WARN_ON_ONCE(info.copied < 0);
+ VM_WARN_ON_ONCE(err > 0);
+ VM_WARN_ON_ONCE(!info.copied && !err);
+ return info.copied ? info.copied : err;
}
long uffd_wp_range(struct vm_area_struct *dst_vma,