extern int handle_pte_fault(struct mm_struct *mm,
                            struct vm_area_struct *vma, unsigned long address,
                            pte_t *pte, pmd_t *pmd, unsigned int flags);
-extern int split_huge_page(struct page *page);
+extern int split_huge_page_to_list(struct page *page, struct list_head *list);
+static inline int split_huge_page(struct page *page)
+{
+       return split_huge_page_to_list(page, NULL);
+}
 extern void __split_huge_page_pmd(struct vm_area_struct *vma,
                unsigned long address, pmd_t *pmd);
 #define split_huge_page_pmd(__vma, __address, __pmd)                   \
 #define transparent_hugepage_enabled(__vma) 0
 
 #define transparent_hugepage_flags 0UL
+static inline int
+split_huge_page_to_list(struct page *page, struct list_head *list)
+{
+       return 0;
+}
 static inline int split_huge_page(struct page *page)
 {
        return 0;
 
 extern void __lru_cache_add(struct page *, enum lru_list lru);
 extern void lru_cache_add_lru(struct page *, enum lru_list lru);
 extern void lru_add_page_tail(struct page *page, struct page *page_tail,
-                             struct lruvec *lruvec);
+                        struct lruvec *lruvec, struct list_head *head);
 extern void activate_page(struct page *);
 extern void mark_page_accessed(struct page *);
 extern void lru_add_drain(void);
 #define swap_address_space(entry) (&swapper_spaces[swp_type(entry)])
 extern unsigned long total_swapcache_pages(void);
 extern void show_swap_cache_info(void);
-extern int add_to_swap(struct page *);
+extern int add_to_swap(struct page *, struct list_head *list);
 extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
 extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
 extern void __delete_from_swap_cache(struct page *);
        return NULL;
 }
 
-static inline int add_to_swap(struct page *page)
+static inline int add_to_swap(struct page *page, struct list_head *list)
 {
        return 0;
 }
 
        return ret;
 }
 
-static void __split_huge_page_refcount(struct page *page)
+static void __split_huge_page_refcount(struct page *page,
+                                      struct list_head *list)
 {
        int i;
        struct zone *zone = page_zone(page);
                BUG_ON(!PageDirty(page_tail));
                BUG_ON(!PageSwapBacked(page_tail));
 
-               lru_add_page_tail(page, page_tail, lruvec);
+               lru_add_page_tail(page, page_tail, lruvec, list);
        }
        atomic_sub(tail_count, &page->_count);
        BUG_ON(atomic_read(&page->_count) <= 0);
 
 /* must be called with anon_vma->root->rwsem held */
 static void __split_huge_page(struct page *page,
-                             struct anon_vma *anon_vma)
+                             struct anon_vma *anon_vma,
+                             struct list_head *list)
 {
        int mapcount, mapcount2;
        pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
                       mapcount, page_mapcount(page));
        BUG_ON(mapcount != page_mapcount(page));
 
-       __split_huge_page_refcount(page);
+       __split_huge_page_refcount(page, list);
 
        mapcount2 = 0;
        anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
        BUG_ON(mapcount != mapcount2);
 }
 
-int split_huge_page(struct page *page)
+/*
+ * Split a hugepage into normal pages. This doesn't change the position of head
+ * page. If @list is null, tail pages will be added to LRU list, otherwise, to
+ * @list. Both head page and tail pages will inherit mapping, flags, and so on
+ * from the hugepage.
+ * Return 0 if the hugepage is split successfully otherwise return 1.
+ */
+int split_huge_page_to_list(struct page *page, struct list_head *list)
 {
        struct anon_vma *anon_vma;
        int ret = 1;
                goto out_unlock;
 
        BUG_ON(!PageSwapBacked(page));
-       __split_huge_page(page, anon_vma);
+       __split_huge_page(page, anon_vma, list);
        count_vm_event(THP_SPLIT);
 
        BUG_ON(PageCompound(page));
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 /* used by __split_huge_page_refcount() */
 void lru_add_page_tail(struct page *page, struct page *page_tail,
-                      struct lruvec *lruvec)
+                      struct lruvec *lruvec, struct list_head *list)
 {
        int uninitialized_var(active);
        enum lru_list lru;
        VM_BUG_ON(NR_CPUS != 1 &&
                  !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
 
-       SetPageLRU(page_tail);
+       if (!list)
+               SetPageLRU(page_tail);
 
        if (page_evictable(page_tail)) {
                if (PageActive(page)) {
 
        if (likely(PageLRU(page)))
                list_add_tail(&page_tail->lru, &page->lru);
-       else {
+       else if (list) {
+               /* page reclaim is reclaiming a huge page */
+               get_page(page_tail);
+               list_add_tail(&page_tail->lru, list);
+       } else {
                struct list_head *list_head;
                /*
                 * Head page has not yet been counted, as an hpage,
 
  * Allocate swap space for the page and add the page to the
  * swap cache.  Caller needs to hold the page lock. 
  */
-int add_to_swap(struct page *page)
+int add_to_swap(struct page *page, struct list_head *list)
 {
        swp_entry_t entry;
        int err;
                return 0;
 
        if (unlikely(PageTransHuge(page)))
-               if (unlikely(split_huge_page(page))) {
+               if (unlikely(split_huge_page_to_list(page, list))) {
                        swapcache_free(entry, NULL);
                        return 0;
                }
 
                if (PageAnon(page) && !PageSwapCache(page)) {
                        if (!(sc->gfp_mask & __GFP_IO))
                                goto keep_locked;
-                       if (!add_to_swap(page))
+                       if (!add_to_swap(page, page_list))
                                goto activate_locked;
                        may_enter_fs = 1;
                }