From: Mike Kravetz Date: Thu, 12 Jan 2017 01:19:15 +0000 (+1100) Subject: userfaultfd: hugetlbfs: fix __mcopy_atomic_hugetlb retry/error processing X-Git-Tag: v4.1.12-92~16^2~8 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=bd0a437c4e3fee8e751a6056fd1de383925de6b3;p=users%2Fjedix%2Flinux-maple.git userfaultfd: hugetlbfs: fix __mcopy_atomic_hugetlb retry/error processing Orabug: 21685254 The new routine copy_huge_page_from_user() uses kmap_atomic() to map PAGE_SIZE pages. However, this prevents page faults in the subsequent call to copy_from_user(). This is OK in the case where the routine is copied with mmap_sema held. However, in another case we want to allow page faults. So, add a new argument allow_pagefault to indicate if the routine should allow page faults. Link: http://lkml.kernel.org/r/20161216144821.5183-20-aarcange@redhat.com Signed-off-by: Mike Kravetz Signed-off-by: Andrea Arcangeli Cc: "Dr. David Alan Gilbert" Cc: Hillf Danton Cc: Michael Rapoport Cc: Mike Rapoport Cc: Pavel Emelyanov Signed-off-by: Andrew Morton (cherry picked from linux-next next-20170117 commit 939d5ff6c4e48f72b3261baf8d4b82f54caf4561) Signed-off-by: Mike Kravetz Reviewed-by: Dhaval Giani Signed-off-by: Dhaval Giani --- diff --git a/include/linux/mm.h b/include/linux/mm.h index 288395e54996c..9cb12ef4c42ec 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2164,7 +2164,8 @@ extern void copy_user_huge_page(struct page *dst, struct page *src, unsigned int pages_per_huge_page); extern long copy_huge_page_from_user(struct page *dst_page, const void __user *usr_src, - unsigned int pages_per_huge_page); + unsigned int pages_per_huge_page, + bool allow_pagefault); #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ extern struct page_ext_operations debug_guardpage_ops; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 85b55f2df8fc0..5098c726714b0 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3840,7 +3840,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, ret = copy_huge_page_from_user(page, (const void __user *) src_addr, - pages_per_huge_page(h)); + pages_per_huge_page(h), false); /* fallback to copy_from_user outside mmap_sem */ if (unlikely(ret)) { diff --git a/mm/memory.c b/mm/memory.c index 739a6a8126932..df27b564c41f6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3852,7 +3852,8 @@ void copy_user_huge_page(struct page *dst, struct page *src, long copy_huge_page_from_user(struct page *dst_page, const void __user *usr_src, - unsigned int pages_per_huge_page) + unsigned int pages_per_huge_page, + bool allow_pagefault) { void *src = (void *)usr_src; void *page_kaddr; @@ -3860,11 +3861,17 @@ long copy_huge_page_from_user(struct page *dst_page, unsigned long ret_val = pages_per_huge_page * PAGE_SIZE; for (i = 0; i < pages_per_huge_page; i++) { - page_kaddr = kmap_atomic(dst_page + i); + if (allow_pagefault) + page_kaddr = kmap(dst_page + i); + else + page_kaddr = kmap_atomic(dst_page + i); rc = copy_from_user(page_kaddr, (const void __user *)(src + i * PAGE_SIZE), PAGE_SIZE); - kunmap_atomic(page_kaddr); + if (allow_pagefault) + kunmap(page_kaddr); + else + kunmap_atomic(page_kaddr); ret_val -= (PAGE_SIZE - rc); if (rc) diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 1b36e191e833e..8b1c09b5f8cb8 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -274,7 +274,7 @@ retry: err = copy_huge_page_from_user(page, (const void __user *)src_addr, - pages_per_huge_page(h)); + pages_per_huge_page(h), true); if (unlikely(err)) { err = -EFAULT; goto out;