]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
dax: don't use set_huge_zero_page()
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tue, 8 Sep 2015 21:59:34 +0000 (14:59 -0700)
committerDan Duval <dan.duval@oracle.com>
Wed, 7 Dec 2016 17:19:48 +0000 (12:19 -0500)
Orabug: 22913653

This is another place where DAX assumed that pgtable_t was a pointer.
Open code the important parts of set_huge_zero_page() in DAX and make
set_huge_zero_page() static again.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
(cherry picked from commit d295e3415a88ae63a37a22652808b20c7fcb970e)
Signed-off-by: Dan Duval <dan.duval@oracle.com>
fs/dax.c
include/linux/huge_mm.h
mm/huge_memory.c

index 9772426fd5d6ed254cb4db9ac9ded70b0a0a23aa..c803abaf5d27aa1af3735ca4bcde458a3b749146 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -585,18 +585,24 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
                unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
 
        if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
-               bool set;
                spinlock_t *ptl;
-               struct mm_struct *mm = vma->vm_mm;
+               pmd_t entry;
                struct page *zero_page = get_huge_zero_page();
+
                if (unlikely(!zero_page))
                        goto fallback;
 
-               ptl = pmd_lock(mm, pmd);
-               set = set_huge_zero_page(NULL, mm, vma, pmd_addr, pmd,
-                                                               zero_page);
-               spin_unlock(ptl);
+               ptl = pmd_lock(vma->vm_mm, pmd);
+               if (!pmd_none(*pmd)) {
+                       spin_unlock(ptl);
+                       goto fallback;
+               }
+
+               entry = mk_pmd(zero_page, vma->vm_page_prot);
+               entry = pmd_mkhuge(entry);
+               set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
                result = VM_FAULT_NOPAGE;
+               spin_unlock(ptl);
        } else {
                sector = bh.b_blocknr << (blkbits - 9);
                length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
index f9b612fec4dd8242ca20fe2d0e45c8b520ad273f..ecb080d6ff42077513f03b95537dc108bded9e07 100644 (file)
@@ -163,9 +163,6 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
 }
 
 struct page *get_huge_zero_page(void);
-bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
-               struct vm_area_struct *vma, unsigned long haddr,
-               pmd_t *pmd, struct page *zero_page);
 
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
index 366010919c68e1d54370cead54d9a92c4f2ce4b3..365bb9999959d878b557e0cd7b5987f04839fc92 100644 (file)
@@ -767,7 +767,7 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
 }
 
 /* Caller must hold page table lock. */
-bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
+static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
                struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
                struct page *zero_page)
 {