static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
                spinlock_t **ptl)
 {
-       VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
+       VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
        if (pmd_trans_huge(*pmd))
                return __pmd_trans_huge_lock(pmd, vma, ptl);
        else
 
 static inline void anon_vma_merge(struct vm_area_struct *vma,
                                  struct vm_area_struct *next)
 {
-       VM_BUG_ON(vma->anon_vma != next->anon_vma);
+       VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
        unlink_anon_vmas(next);
 }
 
 
        unsigned long mmun_end;         /* For mmu_notifiers */
 
        ptl = pmd_lockptr(mm, pmd);
-       VM_BUG_ON(!vma->anon_vma);
+       VM_BUG_ON_VMA(!vma->anon_vma, vma);
        haddr = address & HPAGE_PMD_MASK;
        if (is_huge_zero_pmd(orig_pmd))
                goto alloc;
        if (vma->vm_ops)
                /* khugepaged not yet working on file or special mappings */
                return 0;
-       VM_BUG_ON(vma->vm_flags & VM_NO_THP);
+       VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
        hend = vma->vm_end & HPAGE_PMD_MASK;
        if (hstart < hend)
                return false;
        if (is_vma_temporary_stack(vma))
                return false;
-       VM_BUG_ON(vma->vm_flags & VM_NO_THP);
+       VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
        return true;
 }
 
 
 
 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
 {
-       VM_BUG_ON(!is_vm_hugetlb_page(vma));
+       VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
        if (vma->vm_flags & VM_MAYSHARE) {
                struct address_space *mapping = vma->vm_file->f_mapping;
                struct inode *inode = mapping->host;
 
 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
 {
-       VM_BUG_ON(!is_vm_hugetlb_page(vma));
-       VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
+       VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
+       VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
 
        set_vma_private_data(vma, (get_vma_private_data(vma) &
                                HPAGE_RESV_MASK) | (unsigned long)map);
 
 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
 {
-       VM_BUG_ON(!is_vm_hugetlb_page(vma));
-       VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
+       VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
+       VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
 
        set_vma_private_data(vma, get_vma_private_data(vma) | flags);
 }
 
 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
 {
-       VM_BUG_ON(!is_vm_hugetlb_page(vma));
+       VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
 
        return (get_vma_private_data(vma) & flag) != 0;
 }
 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 {
-       VM_BUG_ON(!is_vm_hugetlb_page(vma));
+       VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
        if (!(vma->vm_flags & VM_MAYSHARE))
                vma->vm_private_data = (void *)0;
 }
 
        struct vm_area_struct *parent;
        unsigned long last = vma_last_pgoff(node);
 
-       VM_BUG_ON(vma_start_pgoff(node) != vma_start_pgoff(prev));
+       VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node);
 
        if (!prev->shared.linear.rb.rb_right) {
                parent = prev;
 
 
        VM_BUG_ON(start & ~PAGE_MASK);
        VM_BUG_ON(end   & ~PAGE_MASK);
-       VM_BUG_ON(start < vma->vm_start);
-       VM_BUG_ON(end   > vma->vm_end);
+       VM_BUG_ON_VMA(start < vma->vm_start, vma);
+       VM_BUG_ON_VMA(end   > vma->vm_end, vma);
        VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
        gup_flags = FOLL_TOUCH | FOLL_MLOCK;
 
        if (!anon_vma && adjust_next)
                anon_vma = next->anon_vma;
        if (anon_vma) {
-               VM_BUG_ON(adjust_next && next->anon_vma &&
-                         anon_vma != next->anon_vma);
+               VM_BUG_ON_VMA(adjust_next && next->anon_vma &&
+                         anon_vma != next->anon_vma, next);
                anon_vma_lock_write(anon_vma);
                anon_vma_interval_tree_pre_update_vma(vma);
                if (adjust_next)
                         * safe. It is only safe to keep the vm_pgoff
                         * linear if there are no pages mapped yet.
                         */
-                       VM_BUG_ON(faulted_in_anon_vma);
+                       VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
                        *vmap = vma = new_vma;
                }
                *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
 
                if (pmd_trans_huge(*old_pmd)) {
                        int err = 0;
                        if (extent == HPAGE_PMD_SIZE) {
-                               VM_BUG_ON(vma->vm_file || !vma->anon_vma);
+                               VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma,
+                                             vma);
                                /* See comment in move_ptes() */
                                if (need_rmap_locks)
                                        anon_vma_lock_write(vma->anon_vma);
 
        unsigned long address = __vma_address(page, vma);
 
        /* page should be within @vma mapping range */
-       VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
+       VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
 
        return address;
 }
        struct anon_vma *anon_vma = vma->anon_vma;
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
-       VM_BUG_ON(!anon_vma);
+       VM_BUG_ON_VMA(!anon_vma, vma);
        VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
 
        anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
 void page_add_new_anon_rmap(struct page *page,
        struct vm_area_struct *vma, unsigned long address)
 {
-       VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
+       VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
        SetPageSwapBacked(page);
        atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
        if (PageTransHuge(page))
         * structure at mapping cannot be freed and reused yet,
         * so we can safely take mapping->i_mmap_mutex.
         */
-       VM_BUG_ON(!PageLocked(page));
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
 
        if (!mapping)
                return ret;