/* The 'colour' (ie low bits) within a PMD of a page offset.  */
 #define PG_PMD_COLOUR  ((PMD_SIZE >> PAGE_SHIFT) - 1)
+#define PG_PMD_NR      (PMD_SIZE >> PAGE_SHIFT)
 
 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
 
                 * unmapped.
                 */
                if (pmd_downgrade && dax_is_zero_entry(entry))
-                       unmap_mapping_range(mapping,
-                               (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
+                       unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
+                                                       PG_PMD_NR, false);
 
                err = radix_tree_preload(
                                mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
        if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
                /* we are replacing a zero page with block mapping */
                if (dax_is_pmd_entry(entry))
-                       unmap_mapping_range(mapping,
-                                       (vmf->pgoff << PAGE_SHIFT) & PMD_MASK,
-                                       PMD_SIZE, 0);
+                       unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
+                                                       PG_PMD_NR, false);
                else /* pte entry */
-                       unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
-                                       PAGE_SIZE, 0);
+                       unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
        }
 
        spin_lock_irq(&mapping->tree_lock);
 }
 
 #ifdef CONFIG_FS_DAX_PMD
-/*
- * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
- * more often than one might expect in the below functions.
- */
-#define PG_PMD_COLOUR  ((PMD_SIZE >> PAGE_SHIFT) - 1)
-
 static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
                void *entry)
 {
 
                unsigned long end, unsigned long floor, unsigned long ceiling);
 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
                        struct vm_area_struct *vma);
-void unmap_mapping_range(struct address_space *mapping,
-               loff_t const holebegin, loff_t const holelen, int even_cows);
 int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
                             unsigned long *start, unsigned long *end,
                             pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
                        void *buf, int len, int write);
 
-static inline void unmap_shared_mapping_range(struct address_space *mapping,
-               loff_t const holebegin, loff_t const holelen)
-{
-       unmap_mapping_range(mapping, holebegin, holelen, 0);
-}
-
 extern void truncate_pagecache(struct inode *inode, loff_t new);
 extern void truncate_setsize(struct inode *inode, loff_t newsize);
 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
                            unsigned long address, unsigned int fault_flags,
                            bool *unlocked);
+void unmap_mapping_pages(struct address_space *mapping,
+               pgoff_t start, pgoff_t nr, bool even_cows);
+void unmap_mapping_range(struct address_space *mapping,
+               loff_t const holebegin, loff_t const holelen, int even_cows);
 #else
 static inline int handle_mm_fault(struct vm_area_struct *vma,
                unsigned long address, unsigned int flags)
        BUG();
        return -EFAULT;
 }
+static inline void unmap_mapping_pages(struct address_space *mapping,
+               pgoff_t start, pgoff_t nr, bool even_cows) { }
+static inline void unmap_mapping_range(struct address_space *mapping,
+               loff_t const holebegin, loff_t const holelen, int even_cows) { }
 #endif
 
-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
-               unsigned int gup_flags);
+static inline void unmap_shared_mapping_range(struct address_space *mapping,
+               loff_t const holebegin, loff_t const holelen)
+{
+       unmap_mapping_range(mapping, holebegin, holelen, 0);
+}
+
+extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
+               void *buf, int len, unsigned int gup_flags);
 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
                void *buf, int len, unsigned int gup_flags);
 extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
 
                }
 
                if (page_mapped(page))
-                       unmap_mapping_range(mapping, index << PAGE_SHIFT,
-                                       PAGE_SIZE, 0);
+                       unmap_mapping_pages(mapping, index, 1, false);
 
                spin_lock_irq(&mapping->tree_lock);
 
 
        }
 }
 
+/**
+ * unmap_mapping_pages() - Unmap pages from processes.
+ * @mapping: The address space containing pages to be unmapped.
+ * @start: Index of first page to be unmapped.
+ * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
+ * @even_cows: Whether to unmap even private COWed pages.
+ *
+ * Unmap the pages in this address space from any userspace process which
+ * has them mmaped.  Generally, you want to remove COWed pages as well when
+ * a file is being truncated, but not when invalidating pages from the page
+ * cache.
+ */
+void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
+               pgoff_t nr, bool even_cows)
+{
+       struct zap_details details = { };
+
+       details.check_mapping = even_cows ? NULL : mapping;
+       details.first_index = start;
+       details.last_index = start + nr - 1;
+       if (details.last_index < details.first_index)
+               details.last_index = ULONG_MAX;
+
+       i_mmap_lock_write(mapping);
+       if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
+               unmap_mapping_range_tree(&mapping->i_mmap, &details);
+       i_mmap_unlock_write(mapping);
+}
+
 /**
  * unmap_mapping_range - unmap the portion of all mmaps in the specified
- * address_space corresponding to the specified page range in the underlying
+ * address_space corresponding to the specified byte range in the underlying
  * file.
  *
  * @mapping: the address space containing mmaps to be unmapped.
 void unmap_mapping_range(struct address_space *mapping,
                loff_t const holebegin, loff_t const holelen, int even_cows)
 {
-       struct zap_details details = { };
        pgoff_t hba = holebegin >> PAGE_SHIFT;
        pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
                        hlen = ULONG_MAX - hba + 1;
        }
 
-       details.check_mapping = even_cows ? NULL : mapping;
-       details.first_index = hba;
-       details.last_index = hba + hlen - 1;
-       if (details.last_index < details.first_index)
-               details.last_index = ULONG_MAX;
-
-       i_mmap_lock_write(mapping);
-       if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
-               unmap_mapping_range_tree(&mapping->i_mmap, &details);
-       i_mmap_unlock_write(mapping);
+       unmap_mapping_pages(mapping, hba, hlen, even_cows);
 }
 EXPORT_SYMBOL(unmap_mapping_range);
 
 
        return -ENOMEM;
 }
 
-void unmap_mapping_range(struct address_space *mapping,
-                        loff_t const holebegin, loff_t const holelen,
-                        int even_cows)
-{
-}
-EXPORT_SYMBOL(unmap_mapping_range);
-
 int filemap_fault(struct vm_fault *vmf)
 {
        BUG();
 
 truncate_cleanup_page(struct address_space *mapping, struct page *page)
 {
        if (page_mapped(page)) {
-               loff_t holelen;
-
-               holelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE;
-               unmap_mapping_range(mapping,
-                                  (loff_t)page->index << PAGE_SHIFT,
-                                  holelen, 0);
+               pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;
+               unmap_mapping_pages(mapping, page->index, nr, false);
        }
 
        if (page_has_private(page))
                                        /*
                                         * Zap the rest of the file in one hit.
                                         */
-                                       unmap_mapping_range(mapping,
-                                          (loff_t)index << PAGE_SHIFT,
-                                          (loff_t)(1 + end - index)
-                                                        << PAGE_SHIFT,
-                                                        0);
+                                       unmap_mapping_pages(mapping, index,
+                                               (1 + end - index), false);
                                        did_range_unmap = 1;
                                } else {
                                        /*
                                         * Just zap this page
                                         */
-                                       unmap_mapping_range(mapping,
-                                          (loff_t)index << PAGE_SHIFT,
-                                          PAGE_SIZE, 0);
+                                       unmap_mapping_pages(mapping, index,
+                                                               1, false);
                                }
                        }
                        BUG_ON(page_mapped(page));
         * get remapped later.
         */
        if (dax_mapping(mapping)) {
-               unmap_mapping_range(mapping, (loff_t)start << PAGE_SHIFT,
-                                   (loff_t)(end - start + 1) << PAGE_SHIFT, 0);
+               unmap_mapping_pages(mapping, start, end - start + 1, false);
        }
 out:
        cleancache_invalidate_inode(mapping);