put_swap_token(swap_token_mm);
 }
 
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+extern int mem_cgroup_cache_charge_swapin(struct page *page,
+                               struct mm_struct *mm, gfp_t mask, bool locked);
+extern void mem_cgroup_uncharge_swapcache(struct page *page);
+#else
+static inline
+int mem_cgroup_cache_charge_swapin(struct page *page,
+                               struct mm_struct *mm, gfp_t mask, bool locked)
+{
+       return 0;
+}
+static inline void mem_cgroup_uncharge_swapcache(struct page *page)
+{
+}
+#endif
+
 #else /* CONFIG_SWAP */
 
 #define nr_swap_pages                          0L
 #define has_swap_token(x) 0
 #define disable_swap_token() do { } while(0)
 
+static inline int mem_cgroup_cache_charge_swapin(struct page *page,
+                       struct mm_struct *mm, gfp_t mask, bool locked)
+{
+       return 0;
+}
+
 #endif /* CONFIG_SWAP */
 #endif /* __KERNEL__*/
 #endif /* _LINUX_SWAP_H */
 
 #include <linux/memcontrol.h>
 #include <linux/cgroup.h>
 #include <linux/mm.h>
+#include <linux/pagemap.h>
 #include <linux/smp.h>
 #include <linux/page-flags.h>
 #include <linux/backing-dev.h>
        MEM_CGROUP_CHARGE_TYPE_MAPPED,
        MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
        MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
+       MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
        NR_CHARGE_TYPE,
 };
 
                                MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
 }
 
+#ifdef CONFIG_SWAP
+int mem_cgroup_cache_charge_swapin(struct page *page,
+                       struct mm_struct *mm, gfp_t mask, bool locked)
+{
+       int ret = 0;
+
+       if (mem_cgroup_subsys.disabled)
+               return 0;
+       if (unlikely(!mm))
+               mm = &init_mm;
+       if (!locked)
+               lock_page(page);
+       /*
+        * If not locked, the page can be dropped from SwapCache until
+        * we reach here.
+        */
+       if (PageSwapCache(page)) {
+               ret = mem_cgroup_charge_common(page, mm, mask,
+                               MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
+       }
+       if (!locked)
+               unlock_page(page);
+
+       return ret;
+}
+#endif
+
 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
 {
        struct page_cgroup *pc;
        if (mem_cgroup_subsys.disabled)
                return;
 
+       if (PageSwapCache(page))
+               return;
+
        /*
         * Check if our page_cgroup is valid
         */
                return;
 
        lock_page_cgroup(pc);
-       if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED && page_mapped(page))
-            || !PageCgroupUsed(pc)) {
-               /* This happens at race in zap_pte_range() and do_swap_page()*/
-               unlock_page_cgroup(pc);
-               return;
+
+       if (!PageCgroupUsed(pc))
+               goto unlock_out;
+
+       switch (ctype) {
+       case MEM_CGROUP_CHARGE_TYPE_MAPPED:
+               if (page_mapped(page))
+                       goto unlock_out;
+               break;
+       case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
+               if (!PageAnon(page)) {  /* Shared memory */
+                       if (page->mapping && !page_is_file_cache(page))
+                               goto unlock_out;
+               } else if (page_mapped(page)) /* Anon */
+                               goto unlock_out;
+               break;
+       default:
+               break;
        }
+
        ClearPageCgroupUsed(pc);
        mem = pc->mem_cgroup;
 
        css_put(&mem->css);
 
        return;
+
+unlock_out:
+       unlock_page_cgroup(pc);
+       return;
 }
 
 void mem_cgroup_uncharge_page(struct page *page)
        __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
 }
 
+void mem_cgroup_uncharge_swapcache(struct page *page)
+{
+       __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
+}
+
 /*
  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
  * page belongs to.
                ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
 
        /* unused page is not on radix-tree now. */
-       if (unused && ctype != MEM_CGROUP_CHARGE_TYPE_MAPPED)
+       if (unused)
                __mem_cgroup_uncharge_common(unused, ctype);
 
        pc = lookup_page_cgroup(target);
 
        error = 1;
        if (!inode)
                goto out;
-       /* Charge page using GFP_HIGHUSER_MOVABLE while we can wait */
-       error = mem_cgroup_cache_charge(page, current->mm, GFP_HIGHUSER_MOVABLE);
+       /*
+        * Charge page using GFP_HIGHUSER_MOVABLE while we can wait.
+        * charged back to the user(not to caller) when swap account is used.
+        */
+       error = mem_cgroup_cache_charge_swapin(page,
+                       current->mm, GFP_HIGHUSER_MOVABLE, true);
        if (error)
                goto out;
        error = radix_tree_preload(GFP_KERNEL);
                                goto repeat;
                        }
                        wait_on_page_locked(swappage);
+                       /*
+                        * We want to avoid charge at add_to_page_cache().
+                        * charge against this swap cache here.
+                        */
+                       if (mem_cgroup_cache_charge_swapin(swappage,
+                                               current->mm, gfp, false)) {
+                               page_cache_release(swappage);
+                               error = -ENOMEM;
+                               goto failed;
+                       }
                        page_cache_release(swappage);
                        goto repeat;
                }