#ifndef _LINUX_HUGETLB_H
 #define _LINUX_HUGETLB_H
 
+#include <linux/mm.h>
 #include <linux/mm_types.h>
 #include <linux/mmdebug.h>
 #include <linux/fs.h>
  * huge_pte_offset(): Walk the hugetlb pgtable until the last level PTE.
  * Returns the pte_t* if found, or NULL if the address is not mapped.
  *
+ * IMPORTANT: we should normally not directly call this function, instead
+ * this is only a common interface to implement arch-specific
+ * walker. Please use hugetlb_walk() instead, because that will attempt to
+ * verify the locking for you.
+ *
  * Since this function will walk all the pgtable pages (including not only
  * high-level pgtable page, but also PUD entry that can be unshared
  * concurrently for VM_SHARED), the caller of this function should be
 #define flush_hugetlb_tlb_range(vma, addr, end)        flush_tlb_range(vma, addr, end)
 #endif
 
+static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
+{
+       return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
+}
+
+/*
+ * Safe version of huge_pte_offset() to check the locks.  See comments
+ * above huge_pte_offset().
+ */
+static inline pte_t *
+hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
+{
+#if defined(CONFIG_HUGETLB_PAGE) && \
+       defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP)
+       struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+       /*
+        * If pmd sharing possible, locking needed to safely walk the
+        * hugetlb pgtables.  More information can be found at the comment
+        * above huge_pte_offset() in the same file.
+        *
+        * NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP.
+        */
+       if (__vma_shareable_lock(vma))
+               WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) &&
+                            !lockdep_is_held(
+                                &vma->vm_file->f_mapping->i_mmap_rwsem));
+#endif
+       return huge_pte_offset(vma->vm_mm, addr, sz);
+}
+
 #endif /* _LINUX_HUGETLB_H */
 
 /*
  * hugetlb vma_lock helper routines
  */
-static bool __vma_shareable_lock(struct vm_area_struct *vma)
-{
-       return vma->vm_flags & VM_MAYSHARE && vma->vm_private_data;
-}
-
 void hugetlb_vma_lock_read(struct vm_area_struct *vma)
 {
        if (__vma_shareable_lock(vma)) {
        } else {
                /*
                 * For shared mappings the vma lock must be held before
-                * calling huge_pte_offset in the src vma. Otherwise, the
+                * calling hugetlb_walk() in the src vma. Otherwise, the
                 * returned ptep could go away if part of a shared pmd and
                 * another thread calls huge_pmd_unshare.
                 */
        last_addr_mask = hugetlb_mask_last_page(h);
        for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
                spinlock_t *src_ptl, *dst_ptl;
-               src_pte = huge_pte_offset(src, addr, sz);
+               src_pte = hugetlb_walk(src_vma, addr, sz);
                if (!src_pte) {
                        addr |= last_addr_mask;
                        continue;
        hugetlb_vma_lock_write(vma);
        i_mmap_lock_write(mapping);
        for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
-               src_pte = huge_pte_offset(mm, old_addr, sz);
+               src_pte = hugetlb_walk(vma, old_addr, sz);
                if (!src_pte) {
                        old_addr |= last_addr_mask;
                        new_addr |= last_addr_mask;
        last_addr_mask = hugetlb_mask_last_page(h);
        address = start;
        for (; address < end; address += sz) {
-               ptep = huge_pte_offset(mm, address, sz);
+               ptep = hugetlb_walk(vma, address, sz);
                if (!ptep) {
                        address |= last_addr_mask;
                        continue;
                        mutex_lock(&hugetlb_fault_mutex_table[hash]);
                        hugetlb_vma_lock_read(vma);
                        spin_lock(ptl);
-                       ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
+                       ptep = hugetlb_walk(vma, haddr, huge_page_size(h));
                        if (likely(ptep &&
                                   pte_same(huge_ptep_get(ptep), pte)))
                                goto retry_avoidcopy;
         * before the page tables are altered
         */
        spin_lock(ptl);
-       ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
+       ptep = hugetlb_walk(vma, haddr, huge_page_size(h));
        if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
                /* Break COW or unshare */
                huge_ptep_clear_flush(vma, haddr, ptep);
                return NULL;
 
        hugetlb_vma_lock_read(vma);
-       pte = huge_pte_offset(mm, haddr, huge_page_size(h));
+       pte = hugetlb_walk(vma, haddr, huge_page_size(h));
        if (!pte)
                goto out_unlock;
 
                 *
                 * Note that page table lock is not held when pte is null.
                 */
-               pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
-                                     huge_page_size(h));
+               pte = hugetlb_walk(vma, vaddr & huge_page_mask(h),
+                                  huge_page_size(h));
                if (pte)
                        ptl = huge_pte_lock(h, mm, pte);
                absent = !pte || huge_pte_none(huge_ptep_get(pte));
        last_addr_mask = hugetlb_mask_last_page(h);
        for (; address < end; address += psize) {
                spinlock_t *ptl;
-               ptep = huge_pte_offset(mm, address, psize);
+               ptep = hugetlb_walk(vma, address, psize);
                if (!ptep) {
                        if (!uffd_wp) {
                                address |= last_addr_mask;
 
                saddr = page_table_shareable(svma, vma, addr, idx);
                if (saddr) {
-                       spte = huge_pte_offset(svma->vm_mm, saddr,
-                                              vma_mmu_pagesize(svma));
+                       spte = hugetlb_walk(svma, saddr,
+                                           vma_mmu_pagesize(svma));
                        if (spte) {
                                get_page(virt_to_page(spte));
                                break;
        hugetlb_vma_lock_write(vma);
        i_mmap_lock_write(vma->vm_file->f_mapping);
        for (address = start; address < end; address += PUD_SIZE) {
-               ptep = huge_pte_offset(mm, address, sz);
+               ptep = hugetlb_walk(vma, address, sz);
                if (!ptep)
                        continue;
                ptl = huge_pte_lock(h, mm, ptep);