#ifdef CONFIG_MIGRATION
 
-extern void putback_lru_pages(struct list_head *l);
 extern void putback_movable_pages(struct list_head *l);
 extern int migrate_page(struct address_space *,
                        struct page *, struct page *, enum migrate_mode);
                int extra_count);
 #else
 
-static inline void putback_lru_pages(struct list_head *l) {}
 static inline void putback_movable_pages(struct list_head *l) {}
 static inline int migrate_pages(struct list_head *l, new_page_t x,
                unsigned long private, enum migrate_mode mode, int reason)
 
                ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
                                        MIGRATE_SYNC, MR_MEMORY_FAILURE);
                if (ret) {
-                       putback_lru_pages(&pagelist);
+                       if (!list_empty(&pagelist)) {
+                               list_del(&page->lru);
+                               dec_zone_page_state(page, NR_ISOLATED_ANON +
+                                               page_is_file_cache(page));
+                               putback_lru_page(page);
+                       }
+
                        pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
                                pfn, ret, page->flags);
                        if (ret > 0)
 
        return 0;
 }
 
-/*
- * Add isolated pages on the list back to the LRU under page lock
- * to avoid leaking evictable pages back onto unevictable list.
- */
-void putback_lru_pages(struct list_head *l)
-{
-       struct page *page;
-       struct page *page2;
-
-       list_for_each_entry_safe(page, page2, l, lru) {
-               list_del(&page->lru);
-               dec_zone_page_state(page, NR_ISOLATED_ANON +
-                               page_is_file_cache(page));
-                       putback_lru_page(page);
-       }
-}
-
 /*
  * Put previously isolated pages back onto the appropriate lists
  * from where they were once taken off for compaction/migration.
  *
- * This function shall be used instead of putback_lru_pages(),
- * whenever the isolated pageset has been built by isolate_migratepages_range()
+ * This function shall be used whenever the isolated pageset has been
+ * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
+ * and isolate_huge_page().
  */
 void putback_movable_pages(struct list_head *l)
 {
        nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
                                     node, MIGRATE_ASYNC, MR_NUMA_MISPLACED);
        if (nr_remaining) {
-               putback_lru_pages(&migratepages);
+               if (!list_empty(&migratepages)) {
+                       list_del(&page->lru);
+                       dec_zone_page_state(page, NR_ISOLATED_ANON +
+                                       page_is_file_cache(page));
+                       putback_lru_page(page);
+               }
                isolated = 0;
        } else
                count_vm_numa_event(NUMA_PAGE_MIGRATE);