* that isolated the page, the page cache radix tree and
         * optional buffer heads at page->private.
         */
-       return page_count(page) - page_has_private(page) == 2;
+       int radix_pins = PageTransHuge(page) && PageSwapCache(page) ?
+               HPAGE_PMD_NR : 1;
+       return page_count(page) - page_has_private(page) == 1 + radix_pins;
 }
 
 static int may_write_to_inode(struct inode *inode, struct scan_control *sc)
                            bool reclaimed)
 {
        unsigned long flags;
+       int refcount;
 
        BUG_ON(!PageLocked(page));
        BUG_ON(mapping != page_mapping(page));
         * Note that if SetPageDirty is always performed via set_page_dirty,
         * and thus under tree_lock, then this ordering is not required.
         */
-       if (!page_ref_freeze(page, 2))
+       if (unlikely(PageTransHuge(page)) && PageSwapCache(page))
+               refcount = 1 + HPAGE_PMD_NR;
+       else
+               refcount = 2;
+       if (!page_ref_freeze(page, refcount))
                goto cannot_free;
        /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
        if (unlikely(PageDirty(page))) {
-               page_ref_unfreeze(page, 2);
+               page_ref_unfreeze(page, refcount);
                goto cannot_free;
        }
 
                 * Try to allocate it some swap space here.
                 * Lazyfree page could be freed directly
                 */
-               if (PageAnon(page) && PageSwapBacked(page) &&
-                   !PageSwapCache(page)) {
-                       if (!(sc->gfp_mask & __GFP_IO))
-                               goto keep_locked;
-                       if (PageTransHuge(page)) {
-                               /* cannot split THP, skip it */
-                               if (!can_split_huge_page(page, NULL))
-                                       goto activate_locked;
-                               /*
-                                * Split pages without a PMD map right
-                                * away. Chances are some or all of the
-                                * tail pages can be freed without IO.
-                                */
-                               if (!compound_mapcount(page) &&
-                                   split_huge_page_to_list(page, page_list))
-                                       goto activate_locked;
-                       }
-                       if (!add_to_swap(page)) {
-                               if (!PageTransHuge(page))
-                                       goto activate_locked;
-                               /* Split THP and swap individual base pages */
-                               if (split_huge_page_to_list(page, page_list))
-                                       goto activate_locked;
-                               if (!add_to_swap(page))
-                                       goto activate_locked;
-                       }
-
-                       /* XXX: We don't support THP writes */
-                       if (PageTransHuge(page) &&
-                                 split_huge_page_to_list(page, page_list)) {
-                               delete_from_swap_cache(page);
-                               goto activate_locked;
-                       }
+               if (PageAnon(page) && PageSwapBacked(page)) {
+                       if (!PageSwapCache(page)) {
+                               if (!(sc->gfp_mask & __GFP_IO))
+                                       goto keep_locked;
+                               if (PageTransHuge(page)) {
+                                       /* cannot split THP, skip it */
+                                       if (!can_split_huge_page(page, NULL))
+                                               goto activate_locked;
+                                       /*
+                                        * Split pages without a PMD map right
+                                        * away. Chances are some or all of the
+                                        * tail pages can be freed without IO.
+                                        */
+                                       if (!compound_mapcount(page) &&
+                                           split_huge_page_to_list(page,
+                                                                   page_list))
+                                               goto activate_locked;
+                               }
+                               if (!add_to_swap(page)) {
+                                       if (!PageTransHuge(page))
+                                               goto activate_locked;
+                                       /* Fallback to swap normal pages */
+                                       if (split_huge_page_to_list(page,
+                                                                   page_list))
+                                               goto activate_locked;
+                                       if (!add_to_swap(page))
+                                               goto activate_locked;
+                               }
 
-                       may_enter_fs = 1;
+                               may_enter_fs = 1;
 
-                       /* Adding to swap updated mapping */
-                       mapping = page_mapping(page);
+                               /* Adding to swap updated mapping */
+                               mapping = page_mapping(page);
+                       }
                } else if (unlikely(PageTransHuge(page))) {
                        /* Split file THP */
                        if (split_huge_page_to_list(page, page_list))
                                goto keep_locked;
                }
 
-               VM_BUG_ON_PAGE(PageTransHuge(page), page);
-
                /*
                 * The page is mapped into the page tables of one or more
                 * processes. Try to unmap it here.
                 */
                if (page_mapped(page)) {
-                       if (!try_to_unmap(page, ttu_flags | TTU_BATCH_FLUSH)) {
+                       enum ttu_flags flags = ttu_flags | TTU_BATCH_FLUSH;
+
+                       if (unlikely(PageTransHuge(page)))
+                               flags |= TTU_SPLIT_HUGE_PMD;
+                       if (!try_to_unmap(page, flags)) {
                                nr_unmap_fail++;
                                goto activate_locked;
                        }
                 * Is there need to periodically free_page_list? It would
                 * appear not as the counts should be low
                 */
-               list_add(&page->lru, &free_pages);
+               if (unlikely(PageTransHuge(page))) {
+                       mem_cgroup_uncharge(page);
+                       (*get_compound_page_dtor(page))(page);
+               } else
+                       list_add(&page->lru, &free_pages);
                continue;
 
 activate_locked: