static inline void get_head_page_multiple(struct page *page, int nr)
 {
-       VM_BUG_ON(page != compound_head(page));
-       VM_BUG_ON(page_count(page) == 0);
+       VM_BUG_ON_PAGE(page != compound_head(page), page);
+       VM_BUG_ON_PAGE(page_count(page) == 0, page);
        atomic_add(nr, &page->_count);
        SetPageReferenced(page);
 }
        head = pte_page(pte);
        page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
        do {
-               VM_BUG_ON(compound_head(page) != head);
+               VM_BUG_ON_PAGE(compound_head(page) != head, page);
                pages[*nr] = page;
                if (PageTail(page))
                        get_huge_page_tail(page);
        head = pte_page(pte);
        page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
        do {
-               VM_BUG_ON(compound_head(page) != head);
+               VM_BUG_ON_PAGE(compound_head(page) != head, page);
                pages[*nr] = page;
                if (PageTail(page))
                        get_huge_page_tail(page);
 
 #ifndef __LINUX_GFP_H
 #define __LINUX_GFP_H
 
+#include <linux/mmdebug.h>
 #include <linux/mmzone.h>
 #include <linux/stddef.h>
 #include <linux/linkage.h>
 
 #define _LINUX_HUGETLB_H
 
 #include <linux/mm_types.h>
+#include <linux/mmdebug.h>
 #include <linux/fs.h>
 #include <linux/hugetlb_inline.h>
 #include <linux/cgroup.h>
 
 static inline struct hstate *page_hstate(struct page *page)
 {
-       VM_BUG_ON(!PageHuge(page));
+       VM_BUG_ON_PAGE(!PageHuge(page), page);
        return size_to_hstate(PAGE_SIZE << compound_order(page));
 }
 
 
 #ifndef _LINUX_HUGETLB_CGROUP_H
 #define _LINUX_HUGETLB_CGROUP_H
 
+#include <linux/mmdebug.h>
 #include <linux/res_counter.h>
 
 struct hugetlb_cgroup;
 
 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
 {
-       VM_BUG_ON(!PageHuge(page));
+       VM_BUG_ON_PAGE(!PageHuge(page), page);
 
        if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
                return NULL;
 static inline
 int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
 {
-       VM_BUG_ON(!PageHuge(page));
+       VM_BUG_ON_PAGE(!PageHuge(page), page);
 
        if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
                return -1;
 
 
 #ifdef __KERNEL__
 
+#include <linux/mmdebug.h>
 #include <linux/gfp.h>
 #include <linux/bug.h>
 #include <linux/list.h>
  */
 static inline int put_page_testzero(struct page *page)
 {
-       VM_BUG_ON(atomic_read(&page->_count) == 0);
+       VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
        return atomic_dec_and_test(&page->_count);
 }
 
 static inline void compound_lock(struct page *page)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       VM_BUG_ON(PageSlab(page));
+       VM_BUG_ON_PAGE(PageSlab(page), page);
        bit_spin_lock(PG_compound_lock, &page->flags);
 #endif
 }
 static inline void compound_unlock(struct page *page)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       VM_BUG_ON(PageSlab(page));
+       VM_BUG_ON_PAGE(PageSlab(page), page);
        bit_spin_unlock(PG_compound_lock, &page->flags);
 #endif
 }
  */
 static inline bool compound_tail_refcounted(struct page *page)
 {
-       VM_BUG_ON(!PageHead(page));
+       VM_BUG_ON_PAGE(!PageHead(page), page);
        return __compound_tail_refcounted(page);
 }
 
        /*
         * __split_huge_page_refcount() cannot run from under us.
         */
-       VM_BUG_ON(!PageTail(page));
-       VM_BUG_ON(page_mapcount(page) < 0);
-       VM_BUG_ON(atomic_read(&page->_count) != 0);
+       VM_BUG_ON_PAGE(!PageTail(page), page);
+       VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
+       VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
        if (compound_tail_refcounted(page->first_page))
                atomic_inc(&page->_mapcount);
 }
         * Getting a normal page or the head of a compound page
         * requires to already have an elevated page->_count.
         */
-       VM_BUG_ON(atomic_read(&page->_count) <= 0);
+       VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
        atomic_inc(&page->_count);
 }
 
 
 static inline void __SetPageBuddy(struct page *page)
 {
-       VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
+       VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
        atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
 }
 
 static inline void __ClearPageBuddy(struct page *page)
 {
-       VM_BUG_ON(!PageBuddy(page));
+       VM_BUG_ON_PAGE(!PageBuddy(page), page);
        atomic_set(&page->_mapcount, -1);
 }
 
         * slab code uses page->slab_cache and page->first_page (for tail
         * pages), which share storage with page->ptl.
         */
-       VM_BUG_ON(*(unsigned long *)&page->ptl);
+       VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
        if (!ptlock_alloc(page))
                return false;
        spin_lock_init(ptlock_ptr(page));
 static inline void pgtable_pmd_page_dtor(struct page *page)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       VM_BUG_ON(page->pmd_huge_pte);
+       VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
 #endif
        ptlock_free(page);
 }
 extern atomic_long_t num_poisoned_pages;
 extern int soft_offline_page(struct page *page, int flags);
 
-extern void dump_page(struct page *page, char *reason);
-extern void dump_page_badflags(struct page *page, char *reason,
-                              unsigned long badflags);
-
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
 extern void clear_huge_page(struct page *page,
                            unsigned long addr,
 
 #ifndef LINUX_MM_DEBUG_H
 #define LINUX_MM_DEBUG_H 1
 
+struct page;
+
+extern void dump_page(struct page *page, char *reason);
+extern void dump_page_badflags(struct page *page, char *reason,
+                              unsigned long badflags);
+
 #ifdef CONFIG_DEBUG_VM
 #define VM_BUG_ON(cond) BUG_ON(cond)
+#define VM_BUG_ON_PAGE(cond, page) \
+       do { if (unlikely(cond)) { dump_page(page, NULL); BUG(); } } while (0)
 #else
 #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
+#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
 #endif
 
 #ifdef CONFIG_DEBUG_VIRTUAL
 
  */
 static inline int PageTransHuge(struct page *page)
 {
-       VM_BUG_ON(PageTail(page));
+       VM_BUG_ON_PAGE(PageTail(page), page);
        return PageHead(page);
 }
 
  */
 static inline int PageSlabPfmemalloc(struct page *page)
 {
-       VM_BUG_ON(!PageSlab(page));
+       VM_BUG_ON_PAGE(!PageSlab(page), page);
        return PageActive(page);
 }
 
 static inline void SetPageSlabPfmemalloc(struct page *page)
 {
-       VM_BUG_ON(!PageSlab(page));
+       VM_BUG_ON_PAGE(!PageSlab(page), page);
        SetPageActive(page);
 }
 
 static inline void __ClearPageSlabPfmemalloc(struct page *page)
 {
-       VM_BUG_ON(!PageSlab(page));
+       VM_BUG_ON_PAGE(!PageSlab(page), page);
        __ClearPageActive(page);
 }
 
 static inline void ClearPageSlabPfmemalloc(struct page *page)
 {
-       VM_BUG_ON(!PageSlab(page));
+       VM_BUG_ON_PAGE(!PageSlab(page), page);
        ClearPageActive(page);
 }
 
 
         * disabling preempt, and hence no need for the "speculative get" that
         * SMP requires.
         */
-       VM_BUG_ON(page_count(page) == 0);
+       VM_BUG_ON_PAGE(page_count(page) == 0, page);
        atomic_inc(&page->_count);
 
 #else
                return 0;
        }
 #endif
-       VM_BUG_ON(PageTail(page));
+       VM_BUG_ON_PAGE(PageTail(page), page);
 
        return 1;
 }
 # ifdef CONFIG_PREEMPT_COUNT
        VM_BUG_ON(!in_atomic());
 # endif
-       VM_BUG_ON(page_count(page) == 0);
+       VM_BUG_ON_PAGE(page_count(page) == 0, page);
        atomic_add(count, &page->_count);
 
 #else
        if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
                return 0;
 #endif
-       VM_BUG_ON(PageCompound(page) && page != compound_head(page));
+       VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
 
        return 1;
 }
 
 static inline void page_unfreeze_refs(struct page *page, int count)
 {
-       VM_BUG_ON(page_count(page) != 0);
+       VM_BUG_ON_PAGE(page_count(page) != 0, page);
        VM_BUG_ON(count == 0);
 
        atomic_set(&page->_count, count);
 
 #ifndef __LINUX_PERCPU_H
 #define __LINUX_PERCPU_H
 
+#include <linux/mmdebug.h>
 #include <linux/preempt.h>
 #include <linux/smp.h>
 #include <linux/cpumask.h>
 
                goto out;
        }
 
-       VM_BUG_ON(!PageLocked(page));
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
        fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
        if (fake_pool_id < 0)
                goto out;
                return;
        }
 
-       VM_BUG_ON(!PageLocked(page));
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
        fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
        if (fake_pool_id < 0)
                return;
                if (pool_id < 0)
                        return;
 
-               VM_BUG_ON(!PageLocked(page));
+               VM_BUG_ON_PAGE(!PageLocked(page), page);
                if (cleancache_get_key(mapping->host, &key) >= 0) {
                        cleancache_ops->invalidate_page(pool_id,
                                        key, page->index);
 
                if (__isolate_lru_page(page, mode) != 0)
                        continue;
 
-               VM_BUG_ON(PageTransCompound(page));
+               VM_BUG_ON_PAGE(PageTransCompound(page), page);
 
                /* Successfully isolated */
                cc->finished_update_migrate = true;
 
 {
        int error;
 
-       VM_BUG_ON(!PageLocked(old));
-       VM_BUG_ON(!PageLocked(new));
-       VM_BUG_ON(new->mapping);
+       VM_BUG_ON_PAGE(!PageLocked(old), old);
+       VM_BUG_ON_PAGE(!PageLocked(new), new);
+       VM_BUG_ON_PAGE(new->mapping, new);
 
        error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
        if (!error) {
 {
        int error;
 
-       VM_BUG_ON(!PageLocked(page));
-       VM_BUG_ON(PageSwapBacked(page));
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       VM_BUG_ON_PAGE(PageSwapBacked(page), page);
 
        error = mem_cgroup_cache_charge(page, current->mm,
                                        gfp_mask & GFP_RECLAIM_MASK);
  */
 void unlock_page(struct page *page)
 {
-       VM_BUG_ON(!PageLocked(page));
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
        clear_bit_unlock(PG_locked, &page->flags);
        smp_mb__after_clear_bit();
        wake_up_page(page, PG_locked);
                        page_cache_release(page);
                        goto repeat;
                }
-               VM_BUG_ON(page->index != offset);
+               VM_BUG_ON_PAGE(page->index != offset, page);
        }
        return page;
 }
                put_page(page);
                goto retry_find;
        }
-       VM_BUG_ON(page->index != offset);
+       VM_BUG_ON_PAGE(page->index != offset, page);
 
        /*
         * We have a locked page in the page cache, now we need to check
 
        pgtable_t pgtable;
        spinlock_t *ptl;
 
-       VM_BUG_ON(!PageCompound(page));
+       VM_BUG_ON_PAGE(!PageCompound(page), page);
        pgtable = pte_alloc_one(mm, haddr);
        if (unlikely(!pgtable))
                return VM_FAULT_OOM;
                goto out;
        }
        src_page = pmd_page(pmd);
-       VM_BUG_ON(!PageHead(src_page));
+       VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
        get_page(src_page);
        page_dup_rmap(src_page);
        add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
        ptl = pmd_lock(mm, pmd);
        if (unlikely(!pmd_same(*pmd, orig_pmd)))
                goto out_free_pages;
-       VM_BUG_ON(!PageHead(page));
+       VM_BUG_ON_PAGE(!PageHead(page), page);
 
        pmdp_clear_flush(vma, haddr, pmd);
        /* leave pmd empty until pte is filled */
                goto out_unlock;
 
        page = pmd_page(orig_pmd);
-       VM_BUG_ON(!PageCompound(page) || !PageHead(page));
+       VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
        if (page_mapcount(page) == 1) {
                pmd_t entry;
                entry = pmd_mkyoung(orig_pmd);
                        add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
                        put_huge_zero_page();
                } else {
-                       VM_BUG_ON(!PageHead(page));
+                       VM_BUG_ON_PAGE(!PageHead(page), page);
                        page_remove_rmap(page);
                        put_page(page);
                }
                goto out;
 
        page = pmd_page(*pmd);
-       VM_BUG_ON(!PageHead(page));
+       VM_BUG_ON_PAGE(!PageHead(page), page);
        if (flags & FOLL_TOUCH) {
                pmd_t _pmd;
                /*
                }
        }
        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
-       VM_BUG_ON(!PageCompound(page));
+       VM_BUG_ON_PAGE(!PageCompound(page), page);
        if (flags & FOLL_GET)
                get_page_foll(page);
 
                } else {
                        page = pmd_page(orig_pmd);
                        page_remove_rmap(page);
-                       VM_BUG_ON(page_mapcount(page) < 0);
+                       VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
                        add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
-                       VM_BUG_ON(!PageHead(page));
+                       VM_BUG_ON_PAGE(!PageHead(page), page);
                        atomic_long_dec(&tlb->mm->nr_ptes);
                        spin_unlock(ptl);
                        tlb_remove_page(tlb, page);
                if (unlikely(!page))
                        goto out;
 
-               VM_BUG_ON(PageCompound(page));
-               BUG_ON(!PageAnon(page));
-               VM_BUG_ON(!PageSwapBacked(page));
+               VM_BUG_ON_PAGE(PageCompound(page), page);
+               VM_BUG_ON_PAGE(!PageAnon(page), page);
+               VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 
                /* cannot use mapcount: can't collapse if there's a gup pin */
                if (page_count(page) != 1)
                }
                /* 0 stands for page_is_file_cache(page) == false */
                inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
-               VM_BUG_ON(!PageLocked(page));
-               VM_BUG_ON(PageLRU(page));
+               VM_BUG_ON_PAGE(!PageLocked(page), page);
+               VM_BUG_ON_PAGE(PageLRU(page), page);
 
                /* If there is no mapped pte young don't collapse the page */
                if (pte_young(pteval) || PageReferenced(page) ||
                } else {
                        src_page = pte_page(pteval);
                        copy_user_highpage(page, src_page, address, vma);
-                       VM_BUG_ON(page_mapcount(src_page) != 1);
+                       VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
                        release_pte_page(src_page);
                        /*
                         * ptl mostly unnecessary, but preempt has to
                       struct vm_area_struct *vma, unsigned long address,
                       int node)
 {
-       VM_BUG_ON(*hpage);
+       VM_BUG_ON_PAGE(*hpage, *hpage);
        /*
         * Allocate the page while the vma is still valid and under
         * the mmap_sem read mode so there is no memory allocation
                 */
                node = page_to_nid(page);
                khugepaged_node_load[node]++;
-               VM_BUG_ON(PageCompound(page));
+               VM_BUG_ON_PAGE(PageCompound(page), page);
                if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
                        goto out_unmap;
                /* cannot use mapcount: can't collapse if there's a gup pin */
                return;
        }
        page = pmd_page(*pmd);
-       VM_BUG_ON(!page_count(page));
+       VM_BUG_ON_PAGE(!page_count(page), page);
        get_page(page);
        spin_unlock(ptl);
        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
                                1 << PG_active | 1 << PG_reserved |
                                1 << PG_private | 1 << PG_writeback);
        }
-       VM_BUG_ON(hugetlb_cgroup_from_page(page));
+       VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
        set_compound_page_dtor(page, NULL);
        set_page_refcounted(page);
        arch_release_hugepage(page);
                 * no users -- drop the buddy allocator's reference.
                 */
                put_page_testzero(page);
-               VM_BUG_ON(page_count(page));
+               VM_BUG_ON_PAGE(page_count(page), page);
                enqueue_huge_page(h, page);
        }
 free:
 
 bool isolate_huge_page(struct page *page, struct list_head *list)
 {
-       VM_BUG_ON(!PageHead(page));
+       VM_BUG_ON_PAGE(!PageHead(page), page);
        if (!get_page_unless_zero(page))
                return false;
        spin_lock(&hugetlb_lock);
 
 void putback_active_hugepage(struct page *page)
 {
-       VM_BUG_ON(!PageHead(page));
+       VM_BUG_ON_PAGE(!PageHead(page), page);
        spin_lock(&hugetlb_lock);
        list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
        spin_unlock(&hugetlb_lock);
 
 bool is_hugepage_active(struct page *page)
 {
-       VM_BUG_ON(!PageHuge(page));
+       VM_BUG_ON_PAGE(!PageHuge(page), page);
        /*
         * This function can be called for a tail page because the caller,
         * scan_movable_pages, scans through a given pfn-range which typically
 
        if (hugetlb_cgroup_disabled())
                return;
 
-       VM_BUG_ON(!PageHuge(oldhpage));
+       VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
        spin_lock(&hugetlb_lock);
        h_cg = hugetlb_cgroup_from_page(oldhpage);
        set_hugetlb_cgroup(oldhpage, NULL);
 
  */
 static inline void set_page_refcounted(struct page *page)
 {
-       VM_BUG_ON(PageTail(page));
-       VM_BUG_ON(atomic_read(&page->_count));
+       VM_BUG_ON_PAGE(PageTail(page), page);
+       VM_BUG_ON_PAGE(atomic_read(&page->_count), page);
        set_page_count(page, 1);
 }
 
         * speculative page access (like in
         * page_cache_get_speculative()) on tail pages.
         */
-       VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0);
+       VM_BUG_ON_PAGE(atomic_read(&page->first_page->_count) <= 0, page);
        if (get_page_head)
                atomic_inc(&page->first_page->_count);
        get_huge_page_tail(page);
                 * Getting a normal page or the head of a compound page
                 * requires to already have an elevated page->_count.
                 */
-               VM_BUG_ON(atomic_read(&page->_count) <= 0);
+               VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
                atomic_inc(&page->_count);
        }
 }
 static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
                                    struct page *page)
 {
-       VM_BUG_ON(PageLRU(page));
+       VM_BUG_ON_PAGE(PageLRU(page), page);
 
        if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
                return 0;
 
        int ret = SWAP_AGAIN;
        int search_new_forks = 0;
 
-       VM_BUG_ON(!PageKsm(page));
+       VM_BUG_ON_PAGE(!PageKsm(page), page);
 
        /*
         * Rely on the page lock to protect against concurrent modifications
         * to that page's node of the stable tree.
         */
-       VM_BUG_ON(!PageLocked(page));
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
 
        stable_node = page_stable_node(page);
        if (!stable_node)
 {
        struct stable_node *stable_node;
 
-       VM_BUG_ON(!PageLocked(oldpage));
-       VM_BUG_ON(!PageLocked(newpage));
-       VM_BUG_ON(newpage->mapping != oldpage->mapping);
+       VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
+       VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
+       VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage);
 
        stable_node = page_stable_node(newpage);
        if (stable_node) {
-               VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage));
+               VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage);
                stable_node->kpfn = page_to_pfn(newpage);
                /*
                 * newpage->mapping was set in advance; now we need smp_wmb()
 
        unsigned short id;
        swp_entry_t ent;
 
-       VM_BUG_ON(!PageLocked(page));
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
 
        pc = lookup_page_cgroup(page);
        lock_page_cgroup(pc);
        bool anon;
 
        lock_page_cgroup(pc);
-       VM_BUG_ON(PageCgroupUsed(pc));
+       VM_BUG_ON_PAGE(PageCgroupUsed(pc), page);
        /*
         * we don't need page_cgroup_lock about tail pages, becase they are not
         * accessed by any other context at this point.
        if (lrucare) {
                if (was_on_lru) {
                        lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
-                       VM_BUG_ON(PageLRU(page));
+                       VM_BUG_ON_PAGE(PageLRU(page), page);
                        SetPageLRU(page);
                        add_page_to_lru_list(page, lruvec, page_lru(page));
                }
        if (!memcg)
                return;
 
-       VM_BUG_ON(mem_cgroup_is_root(memcg));
+       VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
        memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
 }
 #else
        bool anon = PageAnon(page);
 
        VM_BUG_ON(from == to);
-       VM_BUG_ON(PageLRU(page));
+       VM_BUG_ON_PAGE(PageLRU(page), page);
        /*
         * The page is isolated from LRU. So, collapse function
         * will not handle this page. But page splitting can happen.
                parent = root_mem_cgroup;
 
        if (nr_pages > 1) {
-               VM_BUG_ON(!PageTransHuge(page));
+               VM_BUG_ON_PAGE(!PageTransHuge(page), page);
                flags = compound_lock_irqsave(page);
        }
 
 
        if (PageTransHuge(page)) {
                nr_pages <<= compound_order(page);
-               VM_BUG_ON(!PageTransHuge(page));
+               VM_BUG_ON_PAGE(!PageTransHuge(page), page);
                /*
                 * Never OOM-kill a process for a huge page.  The
                 * fault handler will fall back to regular pages.
 {
        if (mem_cgroup_disabled())
                return 0;
-       VM_BUG_ON(page_mapped(page));
-       VM_BUG_ON(page->mapping && !PageAnon(page));
+       VM_BUG_ON_PAGE(page_mapped(page), page);
+       VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
        VM_BUG_ON(!mm);
        return mem_cgroup_charge_common(page, mm, gfp_mask,
                                        MEM_CGROUP_CHARGE_TYPE_ANON);
 
        if (PageTransHuge(page)) {
                nr_pages <<= compound_order(page);
-               VM_BUG_ON(!PageTransHuge(page));
+               VM_BUG_ON_PAGE(!PageTransHuge(page), page);
        }
        /*
         * Check if our page_cgroup is valid
        /* early check. */
        if (page_mapped(page))
                return;
-       VM_BUG_ON(page->mapping && !PageAnon(page));
+       VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
        /*
         * If the page is in swap cache, uncharge should be deferred
         * to the swap path, which also properly accounts swap usage
 
 void mem_cgroup_uncharge_cache_page(struct page *page)
 {
-       VM_BUG_ON(page_mapped(page));
-       VM_BUG_ON(page->mapping);
+       VM_BUG_ON_PAGE(page_mapped(page), page);
+       VM_BUG_ON_PAGE(page->mapping, page);
        __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false);
 }
 
        enum mc_target_type ret = MC_TARGET_NONE;
 
        page = pmd_page(pmd);
-       VM_BUG_ON(!page || !PageHead(page));
+       VM_BUG_ON_PAGE(!page || !PageHead(page), page);
        if (!move_anon())
                return ret;
        pc = lookup_page_cgroup(page);
 
                        return 0;
                batch = tlb->active;
        }
-       VM_BUG_ON(batch->nr > batch->max);
+       VM_BUG_ON_PAGE(batch->nr > batch->max, page);
 
        return batch->max - batch->nr;
 }
                                        goto unwritable_page;
                                }
                        } else
-                               VM_BUG_ON(!PageLocked(old_page));
+                               VM_BUG_ON_PAGE(!PageLocked(old_page), old_page);
 
                        /*
                         * Since we dropped the lock we need to revalidate
        if (unlikely(!(ret & VM_FAULT_LOCKED)))
                lock_page(vmf.page);
        else
-               VM_BUG_ON(!PageLocked(vmf.page));
+               VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page);
 
        /*
         * Should we do an early C-O-W break?
                                                goto unwritable_page;
                                        }
                                } else
-                                       VM_BUG_ON(!PageLocked(page));
+                                       VM_BUG_ON_PAGE(!PageLocked(page), page);
                                page_mkwrite = 1;
                        }
                }
 
        if (PageUptodate(page))
                SetPageUptodate(newpage);
        if (TestClearPageActive(page)) {
-               VM_BUG_ON(PageUnevictable(page));
+               VM_BUG_ON_PAGE(PageUnevictable(page), page);
                SetPageActive(newpage);
        } else if (TestClearPageUnevictable(page))
                SetPageUnevictable(newpage);
         * free the metadata, so the page can be freed.
         */
        if (!page->mapping) {
-               VM_BUG_ON(PageAnon(page));
+               VM_BUG_ON_PAGE(PageAnon(page), page);
                if (page_has_private(page)) {
                        try_to_free_buffers(page);
                        goto uncharge;
 {
        int page_lru;
 
-       VM_BUG_ON(compound_order(page) && !PageTransHuge(page));
+       VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
 
        /* Avoid migrating to a node that is nearly full */
        if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
 
 static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
                int *pgrescued)
 {
-       VM_BUG_ON(PageLRU(page));
-       VM_BUG_ON(!PageLocked(page));
+       VM_BUG_ON_PAGE(PageLRU(page), page);
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
 
        if (page_mapcount(page) <= 1 && page_evictable(page)) {
                pagevec_add(pvec, page);
 
                return 0;
 
        if (page_is_guard(buddy) && page_order(buddy) == order) {
-               VM_BUG_ON(page_count(buddy) != 0);
+               VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
                return 1;
        }
 
        if (PageBuddy(buddy) && page_order(buddy) == order) {
-               VM_BUG_ON(page_count(buddy) != 0);
+               VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
                return 1;
        }
        return 0;
 
        page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
 
-       VM_BUG_ON(page_idx & ((1 << order) - 1));
-       VM_BUG_ON(bad_range(zone, page));
+       VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
+       VM_BUG_ON_PAGE(bad_range(zone, page), page);
 
        while (order < MAX_ORDER-1) {
                buddy_idx = __find_buddy_index(page_idx, order);
                area--;
                high--;
                size >>= 1;
-               VM_BUG_ON(bad_range(zone, &page[size]));
+               VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
                if (high < debug_guardpage_minorder()) {
 
        for (page = start_page; page <= end_page;) {
                /* Make sure we are not inadvertently changing nodes */
-               VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
+               VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
 
                if (!pfn_valid_within(page_to_pfn(page))) {
                        page++;
 {
        int i;
 
-       VM_BUG_ON(PageCompound(page));
-       VM_BUG_ON(!page_count(page));
+       VM_BUG_ON_PAGE(PageCompound(page), page);
+       VM_BUG_ON_PAGE(!page_count(page), page);
 
 #ifdef CONFIG_KMEMCHECK
        /*
        zone_statistics(preferred_zone, zone, gfp_flags);
        local_irq_restore(flags);
 
-       VM_BUG_ON(bad_range(zone, page));
+       VM_BUG_ON_PAGE(bad_range(zone, page), page);
        if (prep_new_page(page, order, gfp_flags))
                goto again;
        return page;
        pfn = page_to_pfn(page);
        bitmap = get_pageblock_bitmap(zone, pfn);
        bitidx = pfn_to_bitidx(zone, pfn);
-       VM_BUG_ON(!zone_spans_pfn(zone, pfn));
+       VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page);
 
        for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
                if (flags & value)
 {
        dump_page_badflags(page, reason, 0);
 }
+EXPORT_SYMBOL_GPL(dump_page);
 
        int ret = 0;
        struct swap_info_struct *sis = page_swap_info(page);
 
-       VM_BUG_ON(!PageLocked(page));
-       VM_BUG_ON(PageUptodate(page));
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       VM_BUG_ON_PAGE(PageUptodate(page), page);
        if (frontswap_load(page) == 0) {
                SetPageUptodate(page);
                unlock_page(page);
 
 {
        struct anon_vma *anon_vma = vma->anon_vma;
 
-       VM_BUG_ON(!PageLocked(page));
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON(!anon_vma);
-       VM_BUG_ON(page->index != linear_page_index(vma, address));
+       VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
 
        anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
        page->mapping = (struct address_space *) anon_vma;
        if (unlikely(PageKsm(page)))
                return;
 
-       VM_BUG_ON(!PageLocked(page));
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
        /* address might be in next vma when migration races vma_adjust */
        if (first)
                __page_set_anon_rmap(page, vma, address, exclusive);
                .anon_lock = page_lock_anon_vma_read,
        };
 
-       VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
+       VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page);
 
        /*
         * During exec, a temporary VMA is setup and later moved.
 
        };
 
-       VM_BUG_ON(!PageLocked(page) || PageLRU(page));
+       VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
 
        ret = rmap_walk(page, &rwc);
        return ret;
 
 {
        int error;
 
-       VM_BUG_ON(!PageLocked(page));
-       VM_BUG_ON(!PageSwapBacked(page));
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 
        page_cache_get(page);
        page->mapping = mapping;
                                continue;
                        if (!unfalloc || !PageUptodate(page)) {
                                if (page->mapping == mapping) {
-                                       VM_BUG_ON(PageWriteback(page));
+                                       VM_BUG_ON_PAGE(PageWriteback(page), page);
                                        truncate_inode_page(mapping, page);
                                }
                        }
                        lock_page(page);
                        if (!unfalloc || !PageUptodate(page)) {
                                if (page->mapping == mapping) {
-                                       VM_BUG_ON(PageWriteback(page));
+                                       VM_BUG_ON_PAGE(PageWriteback(page), page);
                                        truncate_inode_page(mapping, page);
                                }
                        }
 
                new.freelist = freelist;
        }
 
-       VM_BUG_ON(new.frozen);
+       VM_BUG_ON_PAGE(new.frozen, &new);
        new.frozen = 1;
 
        if (!__cmpxchg_double_slab(s, page,
                        set_freepointer(s, freelist, prior);
                        new.counters = counters;
                        new.inuse--;
-                       VM_BUG_ON(!new.frozen);
+                       VM_BUG_ON_PAGE(!new.frozen, &new);
 
                } while (!__cmpxchg_double_slab(s, page,
                        prior, counters,
 
        old.freelist = page->freelist;
        old.counters = page->counters;
-       VM_BUG_ON(!old.frozen);
+       VM_BUG_ON_PAGE(!old.frozen, &old);
 
        /* Determine target state of the slab */
        new.counters = old.counters;
 
                        old.freelist = page->freelist;
                        old.counters = page->counters;
-                       VM_BUG_ON(!old.frozen);
+                       VM_BUG_ON_PAGE(!old.frozen, &old);
 
                        new.counters = old.counters;
                        new.freelist = old.freelist;
                counters = page->counters;
 
                new.counters = counters;
-               VM_BUG_ON(!new.frozen);
+               VM_BUG_ON_PAGE(!new.frozen, &new);
 
                new.inuse = page->objects;
                new.frozen = freelist != NULL;
         * page is pointing to the page from which the objects are obtained.
         * That page must be frozen for per cpu allocations to work.
         */
-       VM_BUG_ON(!c->page->frozen);
+       VM_BUG_ON_PAGE(!c->page->frozen, c->page);
        c->freelist = get_freepointer(s, freelist);
        c->tid = next_tid(c->tid);
        local_irq_restore(flags);
 
 
                spin_lock_irqsave(&zone->lru_lock, flags);
                lruvec = mem_cgroup_page_lruvec(page, zone);
-               VM_BUG_ON(!PageLRU(page));
+               VM_BUG_ON_PAGE(!PageLRU(page), page);
                __ClearPageLRU(page);
                del_page_from_lru_list(page, lruvec, page_off_lru(page));
                spin_unlock_irqrestore(&zone->lru_lock, flags);
                         * __split_huge_page_refcount cannot race
                         * here.
                         */
-                       VM_BUG_ON(!PageHead(page_head));
-                       VM_BUG_ON(page_mapcount(page) != 0);
+                       VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
+                       VM_BUG_ON_PAGE(page_mapcount(page) != 0, page);
                        if (put_page_testzero(page_head)) {
                                /*
                                 * If this is the tail of a slab
                                 * the compound page enters the buddy
                                 * allocator.
                                 */
-                               VM_BUG_ON(PageSlab(page_head));
+                               VM_BUG_ON_PAGE(PageSlab(page_head), page_head);
                                __put_compound_page(page_head);
                        }
                        return;
                                __put_single_page(page);
                        return;
                }
-               VM_BUG_ON(page_head != page->first_page);
+               VM_BUG_ON_PAGE(page_head != page->first_page, page);
                /*
                 * We can release the refcount taken by
                 * get_page_unless_zero() now that
                 * compound_lock.
                 */
                if (put_page_testzero(page_head))
-                       VM_BUG_ON(1);
+                       VM_BUG_ON_PAGE(1, page_head);
                /* __split_huge_page_refcount will wait now */
-               VM_BUG_ON(page_mapcount(page) <= 0);
+               VM_BUG_ON_PAGE(page_mapcount(page) <= 0, page);
                atomic_dec(&page->_mapcount);
-               VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
-               VM_BUG_ON(atomic_read(&page->_count) != 0);
+               VM_BUG_ON_PAGE(atomic_read(&page_head->_count) <= 0, page_head);
+               VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
                compound_unlock_irqrestore(page_head, flags);
 
                if (put_page_testzero(page_head)) {
                }
        } else {
                /* page_head is a dangling pointer */
-               VM_BUG_ON(PageTail(page));
+               VM_BUG_ON_PAGE(PageTail(page), page);
                goto out_put_single;
        }
 }
                         * page. __split_huge_page_refcount
                         * cannot race here.
                         */
-                       VM_BUG_ON(!PageHead(page_head));
+                       VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
                        __get_page_tail_foll(page, true);
                        return true;
                } else {
  */
 void lru_cache_add(struct page *page)
 {
-       VM_BUG_ON(PageActive(page) && PageUnevictable(page));
-       VM_BUG_ON(PageLRU(page));
+       VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
+       VM_BUG_ON_PAGE(PageLRU(page), page);
        __lru_cache_add(page);
 }
 
                        }
 
                        lruvec = mem_cgroup_page_lruvec(page, zone);
-                       VM_BUG_ON(!PageLRU(page));
+                       VM_BUG_ON_PAGE(!PageLRU(page), page);
                        __ClearPageLRU(page);
                        del_page_from_lru_list(page, lruvec, page_off_lru(page));
                }
 {
        const int file = 0;
 
-       VM_BUG_ON(!PageHead(page));
-       VM_BUG_ON(PageCompound(page_tail));
-       VM_BUG_ON(PageLRU(page_tail));
+       VM_BUG_ON_PAGE(!PageHead(page), page);
+       VM_BUG_ON_PAGE(PageCompound(page_tail), page);
+       VM_BUG_ON_PAGE(PageLRU(page_tail), page);
        VM_BUG_ON(NR_CPUS != 1 &&
                  !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
 
        int active = PageActive(page);
        enum lru_list lru = page_lru(page);
 
-       VM_BUG_ON(PageLRU(page));
+       VM_BUG_ON_PAGE(PageLRU(page), page);
 
        SetPageLRU(page);
        add_page_to_lru_list(page, lruvec, lru);
 
        int error;
        struct address_space *address_space;
 
-       VM_BUG_ON(!PageLocked(page));
-       VM_BUG_ON(PageSwapCache(page));
-       VM_BUG_ON(!PageSwapBacked(page));
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       VM_BUG_ON_PAGE(PageSwapCache(page), page);
+       VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 
        page_cache_get(page);
        SetPageSwapCache(page);
        swp_entry_t entry;
        struct address_space *address_space;
 
-       VM_BUG_ON(!PageLocked(page));
-       VM_BUG_ON(!PageSwapCache(page));
-       VM_BUG_ON(PageWriteback(page));
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       VM_BUG_ON_PAGE(!PageSwapCache(page), page);
+       VM_BUG_ON_PAGE(PageWriteback(page), page);
 
        entry.val = page_private(page);
        address_space = swap_address_space(entry);
        swp_entry_t entry;
        int err;
 
-       VM_BUG_ON(!PageLocked(page));
-       VM_BUG_ON(!PageUptodate(page));
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       VM_BUG_ON_PAGE(!PageUptodate(page), page);
 
        entry = get_swap_page();
        if (!entry.val)
 
 {
        int count;
 
-       VM_BUG_ON(!PageLocked(page));
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
        if (unlikely(PageKsm(page)))
                return 0;
        count = page_mapcount(page);
  */
 int try_to_free_swap(struct page *page)
 {
-       VM_BUG_ON(!PageLocked(page));
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
 
        if (!PageSwapCache(page))
                return 0;
  */
 struct address_space *__page_file_mapping(struct page *page)
 {
-       VM_BUG_ON(!PageSwapCache(page));
+       VM_BUG_ON_PAGE(!PageSwapCache(page), page);
        return page_swap_info(page)->swap_file->f_mapping;
 }
 EXPORT_SYMBOL_GPL(__page_file_mapping);
 pgoff_t __page_file_index(struct page *page)
 {
        swp_entry_t swap = { .val = page_private(page) };
-       VM_BUG_ON(!PageSwapCache(page));
+       VM_BUG_ON_PAGE(!PageSwapCache(page), page);
        return swp_offset(swap);
 }
 EXPORT_SYMBOL_GPL(__page_file_index);
 
        bool is_unevictable;
        int was_unevictable = PageUnevictable(page);
 
-       VM_BUG_ON(PageLRU(page));
+       VM_BUG_ON_PAGE(PageLRU(page), page);
 
 redo:
        ClearPageUnevictable(page);
                if (!trylock_page(page))
                        goto keep;
 
-               VM_BUG_ON(PageActive(page));
-               VM_BUG_ON(page_zone(page) != zone);
+               VM_BUG_ON_PAGE(PageActive(page), page);
+               VM_BUG_ON_PAGE(page_zone(page) != zone, page);
 
                sc->nr_scanned++;
 
                /* Not a candidate for swapping, so reclaim swap space. */
                if (PageSwapCache(page) && vm_swap_full())
                        try_to_free_swap(page);
-               VM_BUG_ON(PageActive(page));
+               VM_BUG_ON_PAGE(PageActive(page), page);
                SetPageActive(page);
                pgactivate++;
 keep_locked:
                unlock_page(page);
 keep:
                list_add(&page->lru, &ret_pages);
-               VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
+               VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
        }
 
        free_hot_cold_page_list(&free_pages, 1);
                page = lru_to_page(src);
                prefetchw_prev_lru_page(page, src, flags);
 
-               VM_BUG_ON(!PageLRU(page));
+               VM_BUG_ON_PAGE(!PageLRU(page), page);
 
                switch (__isolate_lru_page(page, mode)) {
                case 0:
 {
        int ret = -EBUSY;
 
-       VM_BUG_ON(!page_count(page));
+       VM_BUG_ON_PAGE(!page_count(page), page);
 
        if (PageLRU(page)) {
                struct zone *zone = page_zone(page);
                struct page *page = lru_to_page(page_list);
                int lru;
 
-               VM_BUG_ON(PageLRU(page));
+               VM_BUG_ON_PAGE(PageLRU(page), page);
                list_del(&page->lru);
                if (unlikely(!page_evictable(page))) {
                        spin_unlock_irq(&zone->lru_lock);
                page = lru_to_page(list);
                lruvec = mem_cgroup_page_lruvec(page, zone);
 
-               VM_BUG_ON(PageLRU(page));
+               VM_BUG_ON_PAGE(PageLRU(page), page);
                SetPageLRU(page);
 
                nr_pages = hpage_nr_pages(page);
                if (page_evictable(page)) {
                        enum lru_list lru = page_lru_base_type(page);
 
-                       VM_BUG_ON(PageActive(page));
+                       VM_BUG_ON_PAGE(PageActive(page), page);
                        ClearPageUnevictable(page);
                        del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
                        add_page_to_lru_list(page, lruvec, lru);