]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
userfaultfd: hugetlbfs: fix __mcopy_atomic_hugetlb retry/error processing
authorMike Kravetz <mike.kravetz@oracle.com>
Thu, 12 Jan 2017 01:19:15 +0000 (12:19 +1100)
committerDhaval Giani <dhaval.giani@oracle.com>
Fri, 20 Jan 2017 18:55:48 +0000 (13:55 -0500)
Orabug: 21685254

The new routine copy_huge_page_from_user() uses kmap_atomic() to map
PAGE_SIZE pages.  However, this prevents page faults in the subsequent
call to copy_from_user().  This is OK in the case where the routine is
copied with mmap_sema held.  However, in another case we want to allow
page faults.  So, add a new argument allow_pagefault to indicate if the
routine should allow page faults.

Link: http://lkml.kernel.org/r/20161216144821.5183-20-aarcange@redhat.com
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Michael Rapoport <RAPOPORT@il.ibm.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
(cherry picked from linux-next next-20170117
 commit 939d5ff6c4e48f72b3261baf8d4b82f54caf4561)
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: Dhaval Giani <dhaval.giani@oracle.com>
Signed-off-by: Dhaval Giani <dhaval.giani@oracle.com>
include/linux/mm.h
mm/hugetlb.c
mm/memory.c
mm/userfaultfd.c

index 288395e54996c31eb710d6bf6faeef80afdc5acb..9cb12ef4c42ec56656e98323422892b54338a19a 100644 (file)
@@ -2164,7 +2164,8 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
                                unsigned int pages_per_huge_page);
 extern long copy_huge_page_from_user(struct page *dst_page,
                                const void __user *usr_src,
-                               unsigned int pages_per_huge_page);
+                               unsigned int pages_per_huge_page,
+                               bool allow_pagefault);
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
 
 extern struct page_ext_operations debug_guardpage_ops;
index 85b55f2df8fc007babe1e0f5297022c64b216cda..5098c726714b05f61bccaf57cc93987d923b5c5a 100644 (file)
@@ -3840,7 +3840,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
 
                ret = copy_huge_page_from_user(page,
                                                (const void __user *) src_addr,
-                                               pages_per_huge_page(h));
+                                               pages_per_huge_page(h), false);
 
                /* fallback to copy_from_user outside mmap_sem */
                if (unlikely(ret)) {
index 739a6a81269326721753ba203d939aa2c934426e..df27b564c41f68c15bb58b6d233aeadd43464673 100644 (file)
@@ -3852,7 +3852,8 @@ void copy_user_huge_page(struct page *dst, struct page *src,
 
 long copy_huge_page_from_user(struct page *dst_page,
                                const void __user *usr_src,
-                               unsigned int pages_per_huge_page)
+                               unsigned int pages_per_huge_page,
+                               bool allow_pagefault)
 {
        void *src = (void *)usr_src;
        void *page_kaddr;
@@ -3860,11 +3861,17 @@ long copy_huge_page_from_user(struct page *dst_page,
        unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
 
        for (i = 0; i < pages_per_huge_page; i++) {
-               page_kaddr = kmap_atomic(dst_page + i);
+               if (allow_pagefault)
+                       page_kaddr = kmap(dst_page + i);
+               else
+                       page_kaddr = kmap_atomic(dst_page + i);
                rc = copy_from_user(page_kaddr,
                                (const void __user *)(src + i * PAGE_SIZE),
                                PAGE_SIZE);
-               kunmap_atomic(page_kaddr);
+               if (allow_pagefault)
+                       kunmap(page_kaddr);
+               else
+                       kunmap_atomic(page_kaddr);
 
                ret_val -= (PAGE_SIZE - rc);
                if (rc)
index 1b36e191e833e3c0c0970b8911e1153d60aa26fc..8b1c09b5f8cb859c841e84e0822c10ffced0a8f9 100644 (file)
@@ -274,7 +274,7 @@ retry:
 
                        err = copy_huge_page_from_user(page,
                                                (const void __user *)src_addr,
-                                               pages_per_huge_page(h));
+                                               pages_per_huge_page(h), true);
                        if (unlikely(err)) {
                                err = -EFAULT;
                                goto out;