return __basepage_index(page);
 }
 
+extern int dissolve_free_huge_page(struct page *page);
 extern int dissolve_free_huge_pages(unsigned long start_pfn,
                                    unsigned long end_pfn);
 static inline bool hugepage_migration_supported(struct hstate *h)
 {
        return 1;
 }
-#define hstate_index_to_shift(index) 0
-#define hstate_index(h) 0
+
+static inline unsigned hstate_index_to_shift(unsigned index)
+{
+       return 0;
+}
+
+static inline int hstate_index(struct hstate *h)
+{
+       return 0;
+}
 
 static inline pgoff_t basepage_index(struct page *page)
 {
        return page->index;
 }
-#define dissolve_free_huge_pages(s, e) 0
-#define hugepage_migration_supported(h)        false
+
+static inline int dissolve_free_huge_page(struct page *page)
+{
+       return 0;
+}
+
+static inline int dissolve_free_huge_pages(unsigned long start_pfn,
+                                          unsigned long end_pfn)
+{
+       return 0;
+}
+
+static inline bool hugepage_migration_supported(struct hstate *h)
+{
+       return false;
+}
 
 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
                                           struct mm_struct *mm, pte_t *pte)
 
  * number of free hugepages would be reduced below the number of reserved
  * hugepages.
  */
-static int dissolve_free_huge_page(struct page *page)
+int dissolve_free_huge_page(struct page *page)
 {
        int rc = 0;
 
                        rc = -EBUSY;
                        goto out;
                }
+               /*
+                * Move PageHWPoison flag from head page to the raw error page,
+                * which makes any subpages rather than the error page reusable.
+                */
+               if (PageHWPoison(head) && page != head) {
+                       SetPageHWPoison(page);
+                       ClearPageHWPoison(head);
+               }
                list_del(&head->lru);
                h->free_huge_pages--;
                h->free_huge_pages_node[nid]--;
 
                if (ret > 0)
                        ret = -EIO;
        } else {
-               /* overcommit hugetlb page will be freed to buddy */
-               SetPageHWPoison(page);
                if (PageHuge(page))
-                       dequeue_hwpoisoned_huge_page(hpage);
-               num_poisoned_pages_inc();
+                       dissolve_free_huge_page(page);
        }
        return ret;
 }
 
 out:
        if (rc != -EAGAIN)
                putback_active_hugepage(hpage);
+       if (reason == MR_MEMORY_FAILURE && !test_set_page_hwpoison(hpage))
+               num_poisoned_pages_inc();
 
        /*
         * If migration was not successful and there's a freeing callback, use