]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: add a pmd_fault handler
authorMatthew Wilcox <willy@linux.intel.com>
Tue, 8 Sep 2015 21:58:48 +0000 (14:58 -0700)
committerDan Duval <dan.duval@oracle.com>
Wed, 7 Dec 2016 17:19:37 +0000 (12:19 -0500)
Orabug: 22913653

Allow non-anonymous VMAs to provide huge pages in response to a page fault.

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Cc: Hillf Danton <dhillf@gmail.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Jan Kara <jack@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
(cherry picked from commit b96375f74a6d4f39fc6cbdc0bce5175115c7f96f)
Signed-off-by: Dan Duval <dan.duval@oracle.com>
NOTE: Moved new member to the end of the vm_operations_struct and
surrounded their definitions with #ifdef __GENKSYMS__/#endif so as
to maintain kABI.  - Dan Duval <dan.duval@oracle.com>

include/linux/mm.h
mm/memory.c

index 7a8dd3fbb4d03800de0e6c7a23f60613f07e6901..054265b132a2d9eeacde7ff3a7b75166bef3c1e1 100644 (file)
@@ -245,6 +245,7 @@ struct vm_operations_struct {
        void (*open)(struct vm_area_struct * area);
        void (*close)(struct vm_area_struct * area);
        int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
+
        void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
 
        /* notification that a previously read-only page is about to become
@@ -295,6 +296,11 @@ struct vm_operations_struct {
         */
        struct page *(*find_special_page)(struct vm_area_struct *vma,
                                          unsigned long addr);
+
+#ifndef __GENKSYMS__
+       int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
+                                               pmd_t *, unsigned int flags);
+#endif
 };
 
 struct mmu_gather;
index 701d9ad45c46f97c194f1750cfb0eeb9be24e785..0a85c3498f813decae5d74004464aee19ec4d303 100644 (file)
@@ -3214,6 +3214,27 @@ out:
        return 0;
 }
 
+static int create_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
+                       unsigned long address, pmd_t *pmd, unsigned int flags)
+{
+       if (!vma->vm_ops)
+               return do_huge_pmd_anonymous_page(mm, vma, address, pmd, flags);
+       if (vma->vm_ops->pmd_fault)
+               return vma->vm_ops->pmd_fault(vma, address, pmd, flags);
+       return VM_FAULT_FALLBACK;
+}
+
+static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
+                       unsigned long address, pmd_t *pmd, pmd_t orig_pmd,
+                       unsigned int flags)
+{
+       if (!vma->vm_ops)
+               return do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd);
+       if (vma->vm_ops->pmd_fault)
+               return vma->vm_ops->pmd_fault(vma, address, pmd, flags);
+       return VM_FAULT_FALLBACK;
+}
+
 /*
  * These routines also need to handle stuff like marking pages dirty
  * and/or accessed for architectures that don't do it in hardware (most
@@ -3316,10 +3337,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (!pmd)
                return VM_FAULT_OOM;
        if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) {
-               int ret = VM_FAULT_FALLBACK;
-               if (!vma->vm_ops)
-                       ret = do_huge_pmd_anonymous_page(mm, vma, address,
-                                       pmd, flags);
+               int ret = create_huge_pmd(mm, vma, address, pmd, flags);
                if (!(ret & VM_FAULT_FALLBACK))
                        return ret;
        } else {
@@ -3343,8 +3361,8 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                                             orig_pmd, pmd);
 
                        if (dirty && !pmd_write(orig_pmd)) {
-                               ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
-                                                         orig_pmd);
+                               ret = wp_huge_pmd(mm, vma, address, pmd,
+                                                       orig_pmd, flags);
                                if (!(ret & VM_FAULT_FALLBACK))
                                        return ret;
                        } else {