extern void __mmu_notifier_change_pte(struct mm_struct *mm,
                                      unsigned long address, pte_t pte);
 extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
-extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
-                                 bool only_end);
+extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r);
 extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
                                  unsigned long start, unsigned long end);
 extern bool
                might_sleep();
 
        if (mm_has_notifiers(range->mm))
-               __mmu_notifier_invalidate_range_end(range, false);
-}
-
-static inline void
-mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
-{
-       if (mm_has_notifiers(range->mm))
-               __mmu_notifier_invalidate_range_end(range, true);
+               __mmu_notifier_invalidate_range_end(range);
 }
 
 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
        __young;                                                        \
 })
 
-#define        ptep_clear_flush_notify(__vma, __address, __ptep)               \
-({                                                                     \
-       unsigned long ___addr = __address & PAGE_MASK;                  \
-       struct mm_struct *___mm = (__vma)->vm_mm;                       \
-       pte_t ___pte;                                                   \
-                                                                       \
-       ___pte = ptep_clear_flush(__vma, __address, __ptep);            \
-       mmu_notifier_invalidate_range(___mm, ___addr,                   \
-                                       ___addr + PAGE_SIZE);           \
-                                                                       \
-       ___pte;                                                         \
-})
-
-#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd)            \
-({                                                                     \
-       unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;              \
-       struct mm_struct *___mm = (__vma)->vm_mm;                       \
-       pmd_t ___pmd;                                                   \
-                                                                       \
-       ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd);          \
-       mmu_notifier_invalidate_range(___mm, ___haddr,                  \
-                                     ___haddr + HPAGE_PMD_SIZE);       \
-                                                                       \
-       ___pmd;                                                         \
-})
-
-#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud)            \
-({                                                                     \
-       unsigned long ___haddr = __haddr & HPAGE_PUD_MASK;              \
-       struct mm_struct *___mm = (__vma)->vm_mm;                       \
-       pud_t ___pud;                                                   \
-                                                                       \
-       ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud);          \
-       mmu_notifier_invalidate_range(___mm, ___haddr,                  \
-                                     ___haddr + HPAGE_PUD_SIZE);       \
-                                                                       \
-       ___pud;                                                         \
-})
-
 /*
  * set_pte_at_notify() sets the pte _after_ running the notifier.
  * This is safe to start by updating the secondary MMUs, because the primary MMU
 {
 }
 
-static inline void
-mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
-{
-}
-
 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
                                  unsigned long start, unsigned long end)
 {
 
        }
 
        flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte)));
-       ptep_clear_flush_notify(vma, addr, pvmw.pte);
+       ptep_clear_flush(vma, addr, pvmw.pte);
        if (new_page)
                set_pte_at_notify(mm, addr, pvmw.pte,
                                  mk_pte(new_page, vma->vm_page_prot));
 
 
        count_vm_event(THP_SPLIT_PUD);
 
-       pudp_huge_clear_flush_notify(vma, haddr, pud);
+       pudp_huge_clear_flush(vma, haddr, pud);
 }
 
 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
 
 out:
        spin_unlock(ptl);
-       /*
-        * No need to double call mmu_notifier->invalidate_range() callback as
-        * the above pudp_huge_clear_flush_notify() did already call it.
-        */
-       mmu_notifier_invalidate_range_only_end(&range);
+       mmu_notifier_invalidate_range_end(&range);
 }
 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
 
        count_vm_event(THP_SPLIT_PMD);
 
        if (!vma_is_anonymous(vma)) {
-               old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
+               old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
                /*
                 * We are going to unmap this huge page. So
                 * just go ahead and zap it
 
 out:
        spin_unlock(ptl);
-       /*
-        * No need to double call mmu_notifier->invalidate_range() callback.
-        * They are 3 cases to consider inside __split_huge_pmd_locked():
-        *  1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious
-        *  2) __split_huge_zero_page_pmd() read only zero page and any write
-        *    fault will trigger a flush_notify before pointing to a new page
-        *    (it is fine if the secondary mmu keeps pointing to the old zero
-        *    page in the meantime)
-        *  3) Split a huge pmd into pte pointing to the same page. No need
-        *     to invalidate secondary tlb entry they are all still valid.
-        *     any further changes to individual pte will notify. So no need
-        *     to call mmu_notifier->invalidate_range()
-        */
-       mmu_notifier_invalidate_range_only_end(&range);
+       mmu_notifier_invalidate_range_end(&range);
 }
 
 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
 
 
                /* Break COW or unshare */
                huge_ptep_clear_flush(vma, haddr, ptep);
-               mmu_notifier_invalidate_range(mm, range.start, range.end);
                page_remove_rmap(&old_folio->page, vma, true);
                hugepage_add_new_anon_rmap(new_folio, vma, haddr);
                if (huge_pte_uffd_wp(pte))
 
                 * that left a window where the new PTE could be loaded into
                 * some TLBs while the old PTE remains in others.
                 */
-               ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
+               ptep_clear_flush(vma, vmf->address, vmf->pte);
                folio_add_new_anon_rmap(new_folio, vma, vmf->address);
                folio_add_lru_vma(new_folio, vma);
                /*
                pte_unmap_unlock(vmf->pte, vmf->ptl);
        }
 
-       /*
-        * No need to double call mmu_notifier->invalidate_range() callback as
-        * the above ptep_clear_flush_notify() did already call it.
-        */
-       mmu_notifier_invalidate_range_only_end(&range);
+       mmu_notifier_invalidate_range_end(&range);
 
        if (new_folio)
                folio_put(new_folio);
 
 
        if (flush) {
                flush_cache_page(vma, addr, pte_pfn(orig_pte));
-               ptep_clear_flush_notify(vma, addr, ptep);
+               ptep_clear_flush(vma, addr, ptep);
                set_pte_at_notify(mm, addr, ptep, entry);
                update_mmu_cache(vma, addr, ptep);
        } else {
                        src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
        }
 
-       /*
-        * No need to double call mmu_notifier->invalidate_range() callback as
-        * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
-        * did already call it.
-        */
        if (notified)
-               mmu_notifier_invalidate_range_only_end(&range);
+               mmu_notifier_invalidate_range_end(&range);
 }
 
 /**
 
 
 static void
 mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
-                       struct mmu_notifier_range *range, bool only_end)
+                       struct mmu_notifier_range *range)
 {
        struct mmu_notifier *subscription;
        int id;
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
                                 srcu_read_lock_held(&srcu)) {
-               /*
-                * Call invalidate_range here too to avoid the need for the
-                * subsystem of having to register an invalidate_range_end
-                * call-back when there is invalidate_range already. Usually a
-                * subsystem registers either invalidate_range_start()/end() or
-                * invalidate_range(), so this will be no additional overhead
-                * (besides the pointer check).
-                *
-                * We skip call to invalidate_range() if we know it is safe ie
-                * call site use mmu_notifier_invalidate_range_only_end() which
-                * is safe to do when we know that a call to invalidate_range()
-                * already happen under page table lock.
-                */
-               if (!only_end && subscription->ops->invalidate_range)
-                       subscription->ops->invalidate_range(subscription,
-                                                           range->mm,
-                                                           range->start,
-                                                           range->end);
                if (subscription->ops->invalidate_range_end) {
                        if (!mmu_notifier_range_blockable(range))
                                non_block_start();
        srcu_read_unlock(&srcu, id);
 }
 
-void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
-                                        bool only_end)
+void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
 {
        struct mmu_notifier_subscriptions *subscriptions =
                range->mm->notifier_subscriptions;
                mn_itree_inv_end(subscriptions);
 
        if (!hlist_empty(&subscriptions->list))
-               mn_hlist_invalidate_end(subscriptions, range, only_end);
+               mn_hlist_invalidate_end(subscriptions, range);
        lock_map_release(&__mmu_notifier_invalidate_range_start_map);
 }
 
 
 #endif
                }
 
-               /*
-                * No need to call mmu_notifier_invalidate_range() as we are
-                * downgrading page table protection not changing it to point
-                * to a new page.
-                *
-                * See Documentation/mm/mmu_notifier.rst
-                */
                if (ret)
                        cleaned++;
        }
                                        hugetlb_vma_unlock_write(vma);
                                        flush_tlb_range(vma,
                                                range.start, range.end);
-                                       mmu_notifier_invalidate_range(mm,
-                                               range.start, range.end);
                                        /*
                                         * The ref count of the PMD page was
                                         * dropped which is part of the way map
                         * copied pages.
                         */
                        dec_mm_counter(mm, mm_counter(&folio->page));
-                       /* We have to invalidate as we cleared the pte */
-                       mmu_notifier_invalidate_range(mm, address,
-                                                     address + PAGE_SIZE);
                } else if (folio_test_anon(folio)) {
                        swp_entry_t entry = { .val = page_private(subpage) };
                        pte_t swp_pte;
                                        folio_test_swapcache(folio))) {
                                WARN_ON_ONCE(1);
                                ret = false;
-                               /* We have to invalidate as we cleared the pte */
-                               mmu_notifier_invalidate_range(mm, address,
-                                                       address + PAGE_SIZE);
                                page_vma_mapped_walk_done(&pvmw);
                                break;
                        }
                                 */
                                if (ref_count == 1 + map_count &&
                                    !folio_test_dirty(folio)) {
-                                       /* Invalidate as we cleared the pte */
-                                       mmu_notifier_invalidate_range(mm,
-                                               address, address + PAGE_SIZE);
                                        dec_mm_counter(mm, MM_ANONPAGES);
                                        goto discard;
                                }
                        if (pte_uffd_wp(pteval))
                                swp_pte = pte_swp_mkuffd_wp(swp_pte);
                        set_pte_at(mm, address, pvmw.pte, swp_pte);
-                       /* Invalidate as we cleared the pte */
-                       mmu_notifier_invalidate_range(mm, address,
-                                                     address + PAGE_SIZE);
                } else {
                        /*
                         * This is a locked file-backed folio,
                        dec_mm_counter(mm, mm_counter_file(&folio->page));
                }
 discard:
-               /*
-                * No need to call mmu_notifier_invalidate_range() it has be
-                * done above for all cases requiring it to happen under page
-                * table lock before mmu_notifier_invalidate_range_end()
-                *
-                * See Documentation/mm/mmu_notifier.rst
-                */
                page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
                if (vma->vm_flags & VM_LOCKED)
                        mlock_drain_local();
                                        hugetlb_vma_unlock_write(vma);
                                        flush_tlb_range(vma,
                                                range.start, range.end);
-                                       mmu_notifier_invalidate_range(mm,
-                                               range.start, range.end);
 
                                        /*
                                         * The ref count of the PMD page was
                         * copied pages.
                         */
                        dec_mm_counter(mm, mm_counter(&folio->page));
-                       /* We have to invalidate as we cleared the pte */
-                       mmu_notifier_invalidate_range(mm, address,
-                                                     address + PAGE_SIZE);
                } else {
                        swp_entry_t entry;
                        pte_t swp_pte;
                         */
                }
 
-               /*
-                * No need to call mmu_notifier_invalidate_range() it has be
-                * done above for all cases requiring it to happen under page
-                * table lock before mmu_notifier_invalidate_range_end()
-                *
-                * See Documentation/mm/mmu_notifier.rst
-                */
                page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
                if (vma->vm_flags & VM_LOCKED)
                        mlock_drain_local();