{
        XA_STATE(xas, &mapping->i_pages, offset);
        int huge = PageHuge(page);
-       struct mem_cgroup *memcg;
        int error;
        void *old;
 
        VM_BUG_ON_PAGE(PageSwapBacked(page), page);
        mapping_set_update(&xas, mapping);
 
-       if (!huge) {
-               error = mem_cgroup_try_charge(page, current->mm,
-                                             gfp_mask, &memcg);
-               if (error)
-                       return error;
-       }
-
        get_page(page);
        page->mapping = mapping;
        page->index = offset;
 
+       if (!huge) {
+               error = mem_cgroup_charge(page, current->mm, gfp_mask, false);
+               if (error)
+                       goto error;
+       }
+
        do {
                xas_lock_irq(&xas);
                old = xas_load(&xas);
                xas_unlock_irq(&xas);
        } while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK));
 
-       if (xas_error(&xas))
+       if (xas_error(&xas)) {
+               error = xas_error(&xas);
                goto error;
+       }
 
-       if (!huge)
-               mem_cgroup_commit_charge(page, memcg, false);
        trace_mm_filemap_add_to_page_cache(page);
        return 0;
 error:
        page->mapping = NULL;
        /* Leave page->index set: truncation relies upon it */
-       if (!huge)
-               mem_cgroup_cancel_charge(page, memcg);
        put_page(page);
-       return xas_error(&xas);
+       return error;
 }
 ALLOW_ERROR_INJECTION(__add_to_page_cache_locked, ERRNO);
 
 
        cancel_charge(memcg, nr_pages);
 }
 
+/**
+ * mem_cgroup_charge - charge a newly allocated page to a cgroup
+ * @page: page to charge
+ * @mm: mm context of the victim
+ * @gfp_mask: reclaim mode
+ * @lrucare: page might be on the LRU already
+ *
+ * Try to charge @page to the memcg that @mm belongs to, reclaiming
+ * pages according to @gfp_mask if necessary.
+ *
+ * Returns 0 on success. Otherwise, an error code is returned.
+ */
+int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask,
+                     bool lrucare)
+{
+       struct mem_cgroup *memcg;
+       int ret;
+
+       VM_BUG_ON_PAGE(!page->mapping, page);
+
+       ret = mem_cgroup_try_charge(page, mm, gfp_mask, &memcg);
+       if (ret)
+               return ret;
+       mem_cgroup_commit_charge(page, memcg, lrucare);
+       return 0;
+}
+
 struct uncharge_gather {
        struct mem_cgroup *memcg;
        unsigned long pgpgout;
 static void uncharge_page(struct page *page, struct uncharge_gather *ug)
 {
        VM_BUG_ON_PAGE(PageLRU(page), page);
-       VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) &&
-                       !PageHWPoison(page) , page);
 
        if (!page->mem_cgroup)
                return;
 
  */
 static int shmem_add_to_page_cache(struct page *page,
                                   struct address_space *mapping,
-                                  pgoff_t index, void *expected, gfp_t gfp)
+                                  pgoff_t index, void *expected, gfp_t gfp,
+                                  struct mm_struct *charge_mm)
 {
        XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
        unsigned long i = 0;
        unsigned long nr = compound_nr(page);
+       int error;
 
        VM_BUG_ON_PAGE(PageTail(page), page);
        VM_BUG_ON_PAGE(index != round_down(index, nr), page);
        page->mapping = mapping;
        page->index = index;
 
+       error = mem_cgroup_charge(page, charge_mm, gfp, PageSwapCache(page));
+       if (error) {
+               if (!PageSwapCache(page) && PageTransHuge(page)) {
+                       count_vm_event(THP_FILE_FALLBACK);
+                       count_vm_event(THP_FILE_FALLBACK_CHARGE);
+               }
+               goto error;
+       }
+       cgroup_throttle_swaprate(page, gfp);
+
        do {
                void *entry;
                xas_lock_irq(&xas);
        } while (xas_nomem(&xas, gfp));
 
        if (xas_error(&xas)) {
-               page->mapping = NULL;
-               page_ref_sub(page, nr);
-               return xas_error(&xas);
+               error = xas_error(&xas);
+               goto error;
        }
 
        return 0;
+error:
+       page->mapping = NULL;
+       page_ref_sub(page, nr);
+       return error;
 }
 
 /*
        struct address_space *mapping = inode->i_mapping;
        struct shmem_inode_info *info = SHMEM_I(inode);
        struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm;
-       struct mem_cgroup *memcg;
        struct page *page;
        swp_entry_t swap;
        int error;
                        goto failed;
        }
 
-       error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg);
-       if (error)
-               goto failed;
-
        error = shmem_add_to_page_cache(page, mapping, index,
-                                       swp_to_radix_entry(swap), gfp);
-       if (error) {
-               mem_cgroup_cancel_charge(page, memcg);
+                                       swp_to_radix_entry(swap), gfp,
+                                       charge_mm);
+       if (error)
                goto failed;
-       }
-
-       mem_cgroup_commit_charge(page, memcg, true);
 
        spin_lock_irq(&info->lock);
        info->swapped--;
        struct shmem_inode_info *info = SHMEM_I(inode);
        struct shmem_sb_info *sbinfo;
        struct mm_struct *charge_mm;
-       struct mem_cgroup *memcg;
        struct page *page;
        enum sgp_type sgp_huge = sgp;
        pgoff_t hindex = index;
        if (sgp == SGP_WRITE)
                __SetPageReferenced(page);
 
-       error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg);
-       if (error) {
-               if (PageTransHuge(page)) {
-                       count_vm_event(THP_FILE_FALLBACK);
-                       count_vm_event(THP_FILE_FALLBACK_CHARGE);
-               }
-               goto unacct;
-       }
        error = shmem_add_to_page_cache(page, mapping, hindex,
-                                       NULL, gfp & GFP_RECLAIM_MASK);
-       if (error) {
-               mem_cgroup_cancel_charge(page, memcg);
+                                       NULL, gfp & GFP_RECLAIM_MASK,
+                                       charge_mm);
+       if (error)
                goto unacct;
-       }
-       mem_cgroup_commit_charge(page, memcg, false);
        lru_cache_add_anon(page);
 
        spin_lock_irq(&info->lock);
        struct address_space *mapping = inode->i_mapping;
        gfp_t gfp = mapping_gfp_mask(mapping);
        pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
-       struct mem_cgroup *memcg;
        spinlock_t *ptl;
        void *page_kaddr;
        struct page *page;
        if (unlikely(offset >= max_off))
                goto out_release;
 
-       ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg);
-       if (ret)
-               goto out_release;
-
        ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
-                                               gfp & GFP_RECLAIM_MASK);
+                                     gfp & GFP_RECLAIM_MASK, dst_mm);
        if (ret)
-               goto out_release_uncharge;
-
-       mem_cgroup_commit_charge(page, memcg, false);
+               goto out_release;
 
        _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
        if (dst_vma->vm_flags & VM_WRITE)
        ret = -EFAULT;
        max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
        if (unlikely(offset >= max_off))
-               goto out_release_uncharge_unlock;
+               goto out_release_unlock;
 
        ret = -EEXIST;
        if (!pte_none(*dst_pte))
-               goto out_release_uncharge_unlock;
+               goto out_release_unlock;
 
        lru_cache_add_anon(page);
 
        ret = 0;
 out:
        return ret;
-out_release_uncharge_unlock:
+out_release_unlock:
        pte_unmap_unlock(dst_pte, ptl);
        ClearPageDirty(page);
        delete_from_page_cache(page);
-out_release_uncharge:
-       mem_cgroup_cancel_charge(page, memcg);
 out_release:
        unlock_page(page);
        put_page(page);