free_page_t put_new_page,
                                   unsigned long private, struct page *page,
                                   int force, enum migrate_mode mode,
-                                  enum migrate_reason reason)
+                                  enum migrate_reason reason,
+                                  struct list_head *ret)
 {
        int rc = MIGRATEPAGE_SUCCESS;
        struct page *newpage = NULL;
                 * migrated will have kept its references and be restored.
                 */
                list_del(&page->lru);
+       }
 
+       /*
+        * If migration is successful, releases reference grabbed during
+        * isolation. Otherwise, restore the page to right list unless
+        * we want to retry.
+        */
+       if (rc == MIGRATEPAGE_SUCCESS) {
                /*
                 * Compaction can migrate also non-LRU pages which are
                 * not accounted to NR_ISOLATED_*. They can be recognized
                if (likely(!__PageMovable(page)))
                        mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
                                        page_is_file_lru(page), -thp_nr_pages(page));
-       }
 
-       /*
-        * If migration is successful, releases reference grabbed during
-        * isolation. Otherwise, restore the page to right list unless
-        * we want to retry.
-        */
-       if (rc == MIGRATEPAGE_SUCCESS) {
                if (reason != MR_MEMORY_FAILURE)
                        /*
                         * We release the page in page_handle_poison.
                         */
                        put_page(page);
        } else {
-               if (rc != -EAGAIN) {
-                       if (likely(!__PageMovable(page))) {
-                               putback_lru_page(page);
-                               goto put_new;
-                       }
+               if (rc != -EAGAIN)
+                       list_add_tail(&page->lru, ret);
 
-                       lock_page(page);
-                       if (PageMovable(page))
-                               putback_movable_page(page);
-                       else
-                               __ClearPageIsolated(page);
-                       unlock_page(page);
-                       put_page(page);
-               }
-put_new:
                if (put_new_page)
                        put_new_page(newpage, private);
                else
 static int unmap_and_move_huge_page(new_page_t get_new_page,
                                free_page_t put_new_page, unsigned long private,
                                struct page *hpage, int force,
-                               enum migrate_mode mode, int reason)
+                               enum migrate_mode mode, int reason,
+                               struct list_head *ret)
 {
        int rc = -EAGAIN;
        int page_was_mapped = 0;
         * kicking migration.
         */
        if (!hugepage_migration_supported(page_hstate(hpage))) {
-               putback_active_hugepage(hpage);
+               list_move_tail(&hpage->lru, ret);
                return -ENOSYS;
        }
 
 out_unlock:
        unlock_page(hpage);
 out:
-       if (rc != -EAGAIN)
+       if (rc == MIGRATEPAGE_SUCCESS)
                putback_active_hugepage(hpage);
+       else if (rc != -EAGAIN && rc != MIGRATEPAGE_SUCCESS)
+               list_move_tail(&hpage->lru, ret);
 
        /*
         * If migration was not successful and there's a freeing callback, use
  *
  * The function returns after 10 attempts or if no pages are movable any more
  * because the list has become empty or no retryable pages exist any more.
- * The caller should call putback_movable_pages() to return pages to the LRU
- * or free list only if ret != 0.
+ * It is caller's responsibility to call putback_movable_pages() to return pages
+ * to the LRU or free list only if ret != 0.
  *
  * Returns the number of pages that were not migrated, or an error code.
  */
        struct page *page2;
        int swapwrite = current->flags & PF_SWAPWRITE;
        int rc, nr_subpages;
+       LIST_HEAD(ret_pages);
 
        if (!swapwrite)
                current->flags |= PF_SWAPWRITE;
                        if (PageHuge(page))
                                rc = unmap_and_move_huge_page(get_new_page,
                                                put_new_page, private, page,
-                                               pass > 2, mode, reason);
+                                               pass > 2, mode, reason,
+                                               &ret_pages);
                        else
                                rc = unmap_and_move(get_new_page, put_new_page,
                                                private, page, pass > 2, mode,
-                                               reason);
-
+                                               reason, &ret_pages);
+                       /*
+                        * The rules are:
+                        *      Success: non hugetlb page will be freed, hugetlb
+                        *               page will be put back
+                        *      -EAGAIN: stay on the from list
+                        *      -ENOMEM: stay on the from list
+                        *      Other errno: put on ret_pages list then splice to
+                        *                   from list
+                        */
                        switch(rc) {
                        case -ENOMEM:
                                /*
        nr_thp_failed += thp_retry;
        rc = nr_failed;
 out:
+       /*
+        * Put the permanent failure page back to migration list, they
+        * will be put back to the right list by the caller.
+        */
+       list_splice(&ret_pages, from);
+
        count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
        count_vm_events(PGMIGRATE_FAIL, nr_failed);
        count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded);