/* Cgroup-specific page state, on top of universal node page state */
 enum memcg_stat_item {
-       MEMCG_RSS = NR_VM_NODE_STAT_ITEMS,
-       MEMCG_RSS_HUGE,
+       MEMCG_RSS_HUGE = NR_VM_NODE_STAT_ITEMS,
        MEMCG_SWAP,
        MEMCG_SOCK,
        /* XXX: why are these zone and not node counters? */
 
 
        if (new_page) {
                get_page(new_page);
-               page_add_new_anon_rmap(new_page, vma, addr, false);
                mem_cgroup_commit_charge(new_page, memcg, false);
+               page_add_new_anon_rmap(new_page, vma, addr, false);
                lru_cache_add_active_or_unevictable(new_page, vma);
        } else
                /* no new page, just dec_mm_counter for old_page */
 
 
                entry = mk_huge_pmd(page, vma->vm_page_prot);
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-               page_add_new_anon_rmap(page, vma, haddr, true);
                mem_cgroup_commit_charge(page, memcg, false);
+               page_add_new_anon_rmap(page, vma, haddr, true);
                lru_cache_add_active_or_unevictable(page, vma);
                pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
                set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
 
 
        spin_lock(pmd_ptl);
        BUG_ON(!pmd_none(*pmd));
-       page_add_new_anon_rmap(new_page, vma, address, true);
        mem_cgroup_commit_charge(new_page, memcg, false);
+       page_add_new_anon_rmap(new_page, vma, address, true);
        count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
        lru_cache_add_active_or_unevictable(new_page, vma);
        pgtable_trans_huge_deposit(mm, pmd, pgtable);
 
                                         struct page *page,
                                         int nr_pages)
 {
-       /*
-        * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
-        * counted as CACHE even if it's on ANON LRU.
-        */
-       if (PageAnon(page))
-               __mod_memcg_state(memcg, MEMCG_RSS, nr_pages);
-
        if (abs(nr_pages) > 1) {
                VM_BUG_ON_PAGE(!PageTransHuge(page), page);
                __mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages);
         */
 
        seq_buf_printf(&s, "anon %llu\n",
-                      (u64)memcg_page_state(memcg, MEMCG_RSS) *
+                      (u64)memcg_page_state(memcg, NR_ANON_MAPPED) *
                       PAGE_SIZE);
        seq_buf_printf(&s, "file %llu\n",
                       (u64)memcg_page_state(memcg, NR_FILE_PAGES) *
 
        if (mem_cgroup_is_root(memcg)) {
                val = memcg_page_state(memcg, NR_FILE_PAGES) +
-                       memcg_page_state(memcg, MEMCG_RSS);
+                       memcg_page_state(memcg, NR_ANON_MAPPED);
                if (swap)
                        val += memcg_page_state(memcg, MEMCG_SWAP);
        } else {
 
 static const unsigned int memcg1_stats[] = {
        NR_FILE_PAGES,
-       MEMCG_RSS,
+       NR_ANON_MAPPED,
        MEMCG_RSS_HUGE,
        NR_SHMEM,
        NR_FILE_MAPPED,
 
        lock_page_memcg(page);
 
-       if (!PageAnon(page)) {
+       if (PageAnon(page)) {
+               if (page_mapped(page)) {
+                       __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
+                       __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
+               }
+       } else {
                __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
                __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
 
 {
        unsigned int nr_pages = hpage_nr_pages(page);
 
-       VM_BUG_ON_PAGE(!page->mapping, page);
        VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
 
        if (mem_cgroup_disabled())
        struct mem_cgroup *memcg;
        int ret;
 
-       VM_BUG_ON_PAGE(!page->mapping, page);
-
        ret = mem_cgroup_try_charge(page, mm, gfp_mask, &memcg);
        if (ret)
                return ret;
        struct mem_cgroup *memcg;
        unsigned long nr_pages;
        unsigned long pgpgout;
-       unsigned long nr_anon;
        unsigned long nr_kmem;
        unsigned long nr_huge;
        struct page *dummy_page;
        }
 
        local_irq_save(flags);
-       __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
        __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
        __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
        __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
        if (!PageKmemcg(page)) {
                if (PageTransHuge(page))
                        ug->nr_huge += nr_pages;
-               if (PageAnon(page))
-                       ug->nr_anon += nr_pages;
                ug->pgpgout++;
        } else {
                ug->nr_kmem += nr_pages;
 
                 * thread doing COW.
                 */
                ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
-               page_add_new_anon_rmap(new_page, vma, vmf->address, false);
                mem_cgroup_commit_charge(new_page, memcg, false);
+               page_add_new_anon_rmap(new_page, vma, vmf->address, false);
                lru_cache_add_active_or_unevictable(new_page, vma);
                /*
                 * We call the notify macro here because, when using secondary
 
        /* ksm created a completely new copy */
        if (unlikely(page != swapcache && swapcache)) {
-               page_add_new_anon_rmap(page, vma, vmf->address, false);
                mem_cgroup_commit_charge(page, memcg, false);
+               page_add_new_anon_rmap(page, vma, vmf->address, false);
                lru_cache_add_active_or_unevictable(page, vma);
        } else {
-               do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
                mem_cgroup_commit_charge(page, memcg, true);
+               do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
                activate_page(page);
        }
 
        }
 
        inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
-       page_add_new_anon_rmap(page, vma, vmf->address, false);
        mem_cgroup_commit_charge(page, memcg, false);
+       page_add_new_anon_rmap(page, vma, vmf->address, false);
        lru_cache_add_active_or_unevictable(page, vma);
 setpte:
        set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
        /* copy-on-write page */
        if (write && !(vma->vm_flags & VM_SHARED)) {
                inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
-               page_add_new_anon_rmap(page, vma, vmf->address, false);
                mem_cgroup_commit_charge(page, memcg, false);
+               page_add_new_anon_rmap(page, vma, vmf->address, false);
                lru_cache_add_active_or_unevictable(page, vma);
        } else {
                inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
 
                goto unlock_abort;
 
        inc_mm_counter(mm, MM_ANONPAGES);
-       page_add_new_anon_rmap(page, vma, addr, false);
        mem_cgroup_commit_charge(page, memcg, false);
+       page_add_new_anon_rmap(page, vma, addr, false);
        if (!is_zone_device_page(page))
                lru_cache_add_active_or_unevictable(page, vma);
        get_page(page);
 
        bool compound = flags & RMAP_COMPOUND;
        bool first;
 
+       if (unlikely(PageKsm(page)))
+               lock_page_memcg(page);
+       else
+               VM_BUG_ON_PAGE(!PageLocked(page), page);
+
        if (compound) {
                atomic_t *mapcount;
                VM_BUG_ON_PAGE(!PageLocked(page), page);
                 */
                if (compound)
                        __inc_node_page_state(page, NR_ANON_THPS);
-               __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
+               __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
        }
-       if (unlikely(PageKsm(page)))
-               return;
 
-       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       if (unlikely(PageKsm(page))) {
+               unlock_page_memcg(page);
+               return;
+       }
 
        /* address might be in next vma when migration races vma_adjust */
        if (first)
                /* increment count (starts at -1) */
                atomic_set(&page->_mapcount, 0);
        }
-       __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
+       __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
        __page_set_anon_rmap(page, vma, address, 1);
 }
 
        int i, nr = 1;
 
        VM_BUG_ON_PAGE(compound && !PageHead(page), page);
-       lock_page_memcg(page);
 
        /* Hugepages are not counted in NR_FILE_MAPPED for now. */
        if (unlikely(PageHuge(page))) {
                /* hugetlb pages are always mapped with pmds */
                atomic_dec(compound_mapcount_ptr(page));
-               goto out;
+               return;
        }
 
        /* page still mapped by someone else? */
                                nr++;
                }
                if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
-                       goto out;
+                       return;
                if (PageSwapBacked(page))
                        __dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
                else
                        __dec_node_page_state(page, NR_FILE_PMDMAPPED);
        } else {
                if (!atomic_add_negative(-1, &page->_mapcount))
-                       goto out;
+                       return;
        }
 
        /*
 
        if (unlikely(PageMlocked(page)))
                clear_page_mlock(page);
-out:
-       unlock_page_memcg(page);
 }
 
 static void page_remove_anon_compound_rmap(struct page *page)
                clear_page_mlock(page);
 
        if (nr)
-               __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr);
+               __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr);
 }
 
 /**
  */
 void page_remove_rmap(struct page *page, bool compound)
 {
-       if (!PageAnon(page))
-               return page_remove_file_rmap(page, compound);
+       lock_page_memcg(page);
 
-       if (compound)
-               return page_remove_anon_compound_rmap(page);
+       if (!PageAnon(page)) {
+               page_remove_file_rmap(page, compound);
+               goto out;
+       }
+
+       if (compound) {
+               page_remove_anon_compound_rmap(page);
+               goto out;
+       }
 
        /* page still mapped by someone else? */
        if (!atomic_add_negative(-1, &page->_mapcount))
-               return;
+               goto out;
 
        /*
         * We use the irq-unsafe __{inc|mod}_zone_page_stat because
         * these counters are not modified in interrupt context, and
         * pte lock(a spinlock) is held, which implies preemption disabled.
         */
-       __dec_node_page_state(page, NR_ANON_MAPPED);
+       __dec_lruvec_page_state(page, NR_ANON_MAPPED);
 
        if (unlikely(PageMlocked(page)))
                clear_page_mlock(page);
         * Leaving it set also helps swapoff to reinstate ptes
         * faster for those pages still in swapcache.
         */
+out:
+       unlock_page_memcg(page);
 }
 
 /*
 
        set_pte_at(vma->vm_mm, addr, pte,
                   pte_mkold(mk_pte(page, vma->vm_page_prot)));
        if (page == swapcache) {
-               page_add_anon_rmap(page, vma, addr, false);
                mem_cgroup_commit_charge(page, memcg, true);
+               page_add_anon_rmap(page, vma, addr, false);
        } else { /* ksm created a completely new copy */
-               page_add_new_anon_rmap(page, vma, addr, false);
                mem_cgroup_commit_charge(page, memcg, false);
+               page_add_new_anon_rmap(page, vma, addr, false);
                lru_cache_add_active_or_unevictable(page, vma);
        }
        swap_free(entry);
 
                goto out_release_uncharge_unlock;
 
        inc_mm_counter(dst_mm, MM_ANONPAGES);
-       page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
        mem_cgroup_commit_charge(page, memcg, false);
+       page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
        lru_cache_add_active_or_unevictable(page, dst_vma);
 
        set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);