#ifdef CONFIG_MIGRATION
 extern int isolate_lru_page(struct page *p);
 extern int putback_lru_pages(struct list_head *l);
-extern int migrate_pages(struct list_head *l, struct list_head *t);
+extern int migrate_pages(struct list_head *l, struct list_head *t,
+               struct list_head *moved, struct list_head *failed);
 #endif
 
 #ifdef CONFIG_MMU
 
        return mpol_check_policy(mode, nodes);
 }
 
+static int swap_pages(struct list_head *pagelist)
+{
+       LIST_HEAD(moved);
+       LIST_HEAD(failed);
+       int n;
+
+       n = migrate_pages(pagelist, NULL, &moved, &failed);
+       putback_lru_pages(&failed);
+       putback_lru_pages(&moved);
+
+       return n;
+}
+
 long do_mbind(unsigned long start, unsigned long len,
                unsigned long mode, nodemask_t *nmask, unsigned long flags)
 {
              (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ? &pagelist : NULL);
        err = PTR_ERR(vma);
        if (!IS_ERR(vma)) {
+               int nr_failed = 0;
+
                err = mbind_range(vma, start, end, new);
                if (!list_empty(&pagelist))
-                       migrate_pages(&pagelist, NULL);
-               if (!err && !list_empty(&pagelist) && (flags & MPOL_MF_STRICT))
+                       nr_failed = swap_pages(&pagelist);
+
+               if (!err && nr_failed && (flags & MPOL_MF_STRICT))
                        err = -EIO;
        }
        if (!list_empty(&pagelist))
        down_read(&mm->mmap_sem);
        check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nodes,
                        flags | MPOL_MF_DISCONTIG_OK, &pagelist);
+
        if (!list_empty(&pagelist)) {
-               migrate_pages(&pagelist, NULL);
-               if (!list_empty(&pagelist))
-                       count = putback_lru_pages(&pagelist);
+               count = swap_pages(&pagelist);
+               putback_lru_pages(&pagelist);
        }
+
        up_read(&mm->mmap_sem);
        return count;
 }
 
  * list. The direct migration patchset
  * extends this function to avoid the use of swap.
  */
-int migrate_pages(struct list_head *l, struct list_head *t)
+int migrate_pages(struct list_head *from, struct list_head *to,
+                 struct list_head *moved, struct list_head *failed)
 {
        int retry;
-       LIST_HEAD(failed);
        int nr_failed = 0;
        int pass = 0;
        struct page *page;
 redo:
        retry = 0;
 
-       list_for_each_entry_safe(page, page2, l, lru) {
+       list_for_each_entry_safe(page, page2, from, lru) {
                cond_resched();
 
                if (page_count(page) == 1) {
                        /* page was freed from under us. So we are done. */
-                       move_to_lru(page);
+                       list_move(&page->lru, moved);
                        continue;
                }
                /*
                if (PageAnon(page) && !PageSwapCache(page)) {
                        if (!add_to_swap(page, GFP_KERNEL)) {
                                unlock_page(page);
-                               list_move(&page->lru, &failed);
+                               list_move(&page->lru, failed);
                                nr_failed++;
                                continue;
                        }
                 * Page is properly locked and writeback is complete.
                 * Try to migrate the page.
                 */
-               if (!swap_page(page))
+               if (!swap_page(page)) {
+                       list_move(&page->lru, moved);
                        continue;
+               }
 retry_later:
                retry++;
        }
        if (!swapwrite)
                current->flags &= ~PF_SWAPWRITE;
 
-       if (!list_empty(&failed))
-               list_splice(&failed, l);
-
        return nr_failed + retry;
 }