]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
userfaultfd: hugetlbfs: userfaultfd_huge_must_wait for hugepmd ranges
authorMike Kravetz <mike.kravetz@oracle.com>
Thu, 12 Jan 2017 01:19:17 +0000 (12:19 +1100)
committerDhaval Giani <dhaval.giani@oracle.com>
Fri, 20 Jan 2017 18:55:51 +0000 (13:55 -0500)
Orabug: 21685254

Add routine userfaultfd_huge_must_wait which has the same functionality as
the existing userfaultfd_must_wait routine.  Only difference is that new
routine must handle page table structure for hugepmd vmas.

Link: http://lkml.kernel.org/r/20161216144821.5183-24-aarcange@redhat.com
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Michael Rapoport <RAPOPORT@il.ibm.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
(cherry picked from linux-next next-20170117
 commit 36a121cb303d54b24bc4e590faf813daec1025d7)
[ Ported to UEK ]
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: Dhaval Giani <dhaval.giani@oracle.com>
Signed-off-by: Dhaval Giani <dhaval.giani@oracle.com>
fs/userfaultfd.c

index 871ae23afaee1544f2a803038583a9d22078e716..01ba6be16a47e39d9e6bae369a33db93ccb806ef 100644 (file)
@@ -182,6 +182,49 @@ static inline struct uffd_msg userfault_msg(unsigned long address,
        return msg;
 }
 
+#ifdef CONFIG_HUGETLB_PAGE
+/*
+ * Same functionality as userfaultfd_must_wait below with modifications for
+ * hugepmd ranges.
+ */
+static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
+                                        unsigned long address,
+                                        unsigned long flags,
+                                        unsigned long reason)
+{
+       struct mm_struct *mm = ctx->mm;
+       pte_t *pte;
+       bool ret = true;
+
+       VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+
+       pte = huge_pte_offset(mm, address);
+       if (!pte)
+               goto out;
+
+       ret = false;
+
+       /*
+        * Lockless access: we're in a wait_event so it's ok if it
+        * changes under us.
+        */
+       if (huge_pte_none(*pte))
+               ret = true;
+       if (!huge_pte_write(*pte) && (reason & VM_UFFD_WP))
+               ret = true;
+out:
+       return ret;
+}
+#else
+static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
+                                        unsigned long address,
+                                        unsigned long flags,
+                                        unsigned long reason)
+{
+       return false;   /* should never get here */
+}
+#endif /* CONFIG_HUGETLB_PAGE */
+
 /*
  * Verify the pagetables are still not ok after having reigstered into
  * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
@@ -354,7 +397,12 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
                          TASK_KILLABLE);
        spin_unlock(&ctx->fault_pending_wqh.lock);
 
-       must_wait = userfaultfd_must_wait(ctx, address, flags, reason);
+       if (!is_vm_hugetlb_page(vma))
+               must_wait = userfaultfd_must_wait(ctx, address, flags,
+                                                 reason);
+       else
+               must_wait = userfaultfd_huge_must_wait(ctx, address, flags,
+                                                               reason);
        up_read(&mm->mmap_sem);
 
        if (likely(must_wait && !ACCESS_ONCE(ctx->released) &&