int may_enter_fs;
                enum page_references references = PAGEREF_RECLAIM_CLEAN;
                bool dirty, writeback;
+               unsigned int nr_pages;
 
                cond_resched();
 
 
                VM_BUG_ON_PAGE(PageActive(page), page);
 
-               sc->nr_scanned++;
+               nr_pages = 1 << compound_order(page);
+
+               /* Account the number of base pages even though THP */
+               sc->nr_scanned += nr_pages;
 
                if (unlikely(!page_evictable(page)))
                        goto activate_locked;
                case PAGEREF_ACTIVATE:
                        goto activate_locked;
                case PAGEREF_KEEP:
-                       stat->nr_ref_keep++;
+                       stat->nr_ref_keep += nr_pages;
                        goto keep_locked;
                case PAGEREF_RECLAIM:
                case PAGEREF_RECLAIM_CLEAN:
                                }
                                if (!add_to_swap(page)) {
                                        if (!PageTransHuge(page))
-                                               goto activate_locked;
+                                               goto activate_locked_split;
                                        /* Fallback to swap normal pages */
                                        if (split_huge_page_to_list(page,
                                                                    page_list))
                                        count_vm_event(THP_SWPOUT_FALLBACK);
 #endif
                                        if (!add_to_swap(page))
-                                               goto activate_locked;
+                                               goto activate_locked_split;
                                }
 
                                may_enter_fs = 1;
                                goto keep_locked;
                }
 
+               /*
+                * THP may get split above, need minus tail pages and update
+                * nr_pages to avoid accounting tail pages twice.
+                *
+                * The tail pages that are added into swap cache successfully
+                * reach here.
+                */
+               if ((nr_pages > 1) && !PageTransHuge(page)) {
+                       sc->nr_scanned -= (nr_pages - 1);
+                       nr_pages = 1;
+               }
+
                /*
                 * The page is mapped into the page tables of one or more
                 * processes. Try to unmap it here.
                        if (unlikely(PageTransHuge(page)))
                                flags |= TTU_SPLIT_HUGE_PMD;
                        if (!try_to_unmap(page, flags)) {
-                               stat->nr_unmap_fail++;
+                               stat->nr_unmap_fail += nr_pages;
                                goto activate_locked;
                        }
                }
 
                unlock_page(page);
 free_it:
-               nr_reclaimed++;
+               /*
+                * THP may get swapped out in a whole, need account
+                * all base pages.
+                */
+               nr_reclaimed += nr_pages;
 
                /*
                 * Is there need to periodically free_page_list? It would
                        list_add(&page->lru, &free_pages);
                continue;
 
+activate_locked_split:
+               /*
+                * The tail pages that are failed to add into swap cache
+                * reach here.  Fixup nr_scanned and nr_pages.
+                */
+               if (nr_pages > 1) {
+                       sc->nr_scanned -= (nr_pages - 1);
+                       nr_pages = 1;
+               }
 activate_locked:
                /* Not a candidate for swapping, so reclaim swap space. */
                if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||
                if (!PageMlocked(page)) {
                        int type = page_is_file_cache(page);
                        SetPageActive(page);
-                       pgactivate++;
-                       stat->nr_activate[type] += hpage_nr_pages(page);
+                       stat->nr_activate[type] += nr_pages;
                        count_memcg_page_event(page, PGACTIVATE);
                }
 keep_locked:
                VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
        }
 
+       pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
+
        mem_cgroup_uncharge_list(&free_pages);
        try_to_unmap_flush();
        free_unref_page_list(&free_pages);
        LIST_HEAD(pages_skipped);
        isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
 
+       total_scan = 0;
        scan = 0;
-       for (total_scan = 0;
-            scan < nr_to_scan && nr_taken < nr_to_scan && !list_empty(src);
-            total_scan++) {
+       while (scan < nr_to_scan && !list_empty(src)) {
                struct page *page;
 
                page = lru_to_page(src);
 
                VM_BUG_ON_PAGE(!PageLRU(page), page);
 
+               nr_pages = 1 << compound_order(page);
+               total_scan += nr_pages;
+
                if (page_zonenum(page) > sc->reclaim_idx) {
                        list_move(&page->lru, &pages_skipped);
-                       nr_skipped[page_zonenum(page)]++;
+                       nr_skipped[page_zonenum(page)] += nr_pages;
                        continue;
                }
 
                 * return with no isolated pages if the LRU mostly contains
                 * ineligible pages.  This causes the VM to not reclaim any
                 * pages, triggering a premature OOM.
+                *
+                * Account all tail pages of THP.  This would not cause
+                * premature OOM since __isolate_lru_page() returns -EBUSY
+                * only when the page is being freed somewhere else.
                 */
-               scan++;
+               scan += nr_pages;
                switch (__isolate_lru_page(page, mode)) {
                case 0:
-                       nr_pages = hpage_nr_pages(page);
                        nr_taken += nr_pages;
                        nr_zone_taken[page_zonenum(page)] += nr_pages;
                        list_move(&page->lru, dst);