}
 }
 
+void mremap_userfaultfd_prep(struct vm_area_struct *vma,
+                            struct vm_userfaultfd_ctx *vm_ctx)
+{
+       struct userfaultfd_ctx *ctx;
+
+       ctx = vma->vm_userfaultfd_ctx.ctx;
+       if (ctx && (ctx->features & UFFD_FEATURE_EVENT_REMAP)) {
+               vm_ctx->ctx = ctx;
+               userfaultfd_ctx_get(ctx);
+       }
+}
+
+void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx vm_ctx,
+                                unsigned long from, unsigned long to,
+                                unsigned long len)
+{
+       struct userfaultfd_ctx *ctx = vm_ctx.ctx;
+       struct userfaultfd_wait_queue ewq;
+
+       if (!ctx)
+               return;
+
+       if (to & ~PAGE_MASK) {
+               userfaultfd_ctx_put(ctx);
+               return;
+       }
+
+       msg_init(&ewq.msg);
+
+       ewq.msg.event = UFFD_EVENT_REMAP;
+       ewq.msg.arg.remap.from = from;
+       ewq.msg.arg.remap.to = to;
+       ewq.msg.arg.remap.len = len;
+
+       userfaultfd_event_wait_completion(ctx, &ewq);
+}
+
 static int userfaultfd_release(struct inode *inode, struct file *file)
 {
        struct userfaultfd_ctx *ctx = file->private_data;
 
 extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *);
 extern void dup_userfaultfd_complete(struct list_head *);
 
+extern void mremap_userfaultfd_prep(struct vm_area_struct *,
+                                   struct vm_userfaultfd_ctx *);
+extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx,
+                                       unsigned long from, unsigned long to,
+                                       unsigned long len);
+
 #else /* CONFIG_USERFAULTFD */
 
 /* mm helpers */
 {
 }
 
+static inline void mremap_userfaultfd_prep(struct vm_area_struct *vma,
+                                          struct vm_userfaultfd_ctx *ctx)
+{
+}
+
+static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx ctx,
+                                              unsigned long from,
+                                              unsigned long to,
+                                              unsigned long len)
+{
+}
 #endif /* CONFIG_USERFAULTFD */
 
 #endif /* _LINUX_USERFAULTFD_K_H */
 
  * means the userland is reading).
  */
 #define UFFD_API ((__u64)0xAA)
-#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_FORK)
+#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_FORK |       \
+                          UFFD_FEATURE_EVENT_REMAP)
 #define UFFD_API_IOCTLS                                \
        ((__u64)1 << _UFFDIO_REGISTER |         \
         (__u64)1 << _UFFDIO_UNREGISTER |       \
                        __u32   ufd;
                } fork;
 
+               struct {
+                       __u64   from;
+                       __u64   to;
+                       __u64   len;
+               } remap;
+
                struct {
                        /* unused reserved fields */
                        __u64   reserved1;
  */
 #define UFFD_EVENT_PAGEFAULT   0x12
 #define UFFD_EVENT_FORK                0x13
+#define UFFD_EVENT_REMAP       0x14
 
 /* flags for UFFD_EVENT_PAGEFAULT */
 #define UFFD_PAGEFAULT_FLAG_WRITE      (1<<0)  /* If this was a write fault */
         */
 #define UFFD_FEATURE_PAGEFAULT_FLAG_WP         (1<<0)
 #define UFFD_FEATURE_EVENT_FORK                        (1<<1)
+#define UFFD_FEATURE_EVENT_REMAP               (1<<2)
        __u64 features;
 
        __u64 ioctls;
 
 #include <linux/mmu_notifier.h>
 #include <linux/uaccess.h>
 #include <linux/mm-arch-hooks.h>
+#include <linux/userfaultfd_k.h>
 
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
 static unsigned long move_vma(struct vm_area_struct *vma,
                unsigned long old_addr, unsigned long old_len,
-               unsigned long new_len, unsigned long new_addr, bool *locked)
+               unsigned long new_len, unsigned long new_addr,
+               bool *locked, struct vm_userfaultfd_ctx *uf)
 {
        struct mm_struct *mm = vma->vm_mm;
        struct vm_area_struct *new_vma;
                old_addr = new_addr;
                new_addr = err;
        } else {
+               mremap_userfaultfd_prep(new_vma, uf);
                arch_remap(mm, old_addr, old_addr + old_len,
                           new_addr, new_addr + new_len);
        }
 }
 
 static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
-               unsigned long new_addr, unsigned long new_len, bool *locked)
+               unsigned long new_addr, unsigned long new_len, bool *locked,
+               struct vm_userfaultfd_ctx *uf)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
        if (offset_in_page(ret))
                goto out1;
 
-       ret = move_vma(vma, addr, old_len, new_len, new_addr, locked);
+       ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, uf);
        if (!(offset_in_page(ret)))
                goto out;
 out1:
        unsigned long ret = -EINVAL;
        unsigned long charged = 0;
        bool locked = false;
+       struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
 
        if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
                return ret;
 
        if (flags & MREMAP_FIXED) {
                ret = mremap_to(addr, old_len, new_addr, new_len,
-                               &locked);
+                               &locked, &uf);
                goto out;
        }
 
                        goto out;
                }
 
-               ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
+               ret = move_vma(vma, addr, old_len, new_len, new_addr,
+                              &locked, &uf);
        }
 out:
        if (offset_in_page(ret)) {
        up_write(¤t->mm->mmap_sem);
        if (locked && new_len > old_len)
                mm_populate(new_addr + old_len, new_len - old_len);
+       mremap_userfaultfd_complete(uf, addr, new_addr, old_len);
        return ret;
 }