{
        struct page_cgroup *pc;
        struct mem_cgroup_per_zone *mz;
+       int page_size = PAGE_SIZE;
+
+       if (PageTransHuge(page))
+               page_size <<= compound_order(page);
 
        if (mem_cgroup_disabled())
                return NULL;
  * oom-killer can be invoked.
  */
 static int __mem_cgroup_try_charge(struct mm_struct *mm,
-               gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
+                                  gfp_t gfp_mask,
+                                  struct mem_cgroup **memcg, bool oom,
+                                  int page_size)
 {
        int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
        struct mem_cgroup *mem = NULL;
        int ret;
-       int csize = CHARGE_SIZE;
+       int csize = max(CHARGE_SIZE, (unsigned long) page_size);
 
        /*
         * Unlike gloval-vm's OOM-kill, we're not in memory shortage
                VM_BUG_ON(css_is_removed(&mem->css));
                if (mem_cgroup_is_root(mem))
                        goto done;
-               if (consume_stock(mem))
+               if (page_size == PAGE_SIZE && consume_stock(mem))
                        goto done;
                css_get(&mem->css);
        } else {
                        rcu_read_unlock();
                        goto done;
                }
-               if (consume_stock(mem)) {
+               if (page_size == PAGE_SIZE && consume_stock(mem)) {
                        /*
                         * It seems dagerous to access memcg without css_get().
                         * But considering how consume_stok works, it's not
                case CHARGE_OK:
                        break;
                case CHARGE_RETRY: /* not in OOM situation but retry */
-                       csize = PAGE_SIZE;
+                       csize = page_size;
                        css_put(&mem->css);
                        mem = NULL;
                        goto again;
                }
        } while (ret != CHARGE_OK);
 
-       if (csize > PAGE_SIZE)
-               refill_stock(mem, csize - PAGE_SIZE);
+       if (csize > page_size)
+               refill_stock(mem, csize - page_size);
        css_put(&mem->css);
 done:
        *memcg = mem;
        }
 }
 
-static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
+static void mem_cgroup_cancel_charge(struct mem_cgroup *mem,
+                                    int page_size)
 {
-       __mem_cgroup_cancel_charge(mem, 1);
+       __mem_cgroup_cancel_charge(mem, page_size >> PAGE_SHIFT);
 }
 
 /*
  */
 
 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
-                                    struct page_cgroup *pc,
-                                    enum charge_type ctype)
+                                      struct page_cgroup *pc,
+                                      enum charge_type ctype,
+                                      int page_size)
 {
        /* try_charge() can return NULL to *memcg, taking care of it. */
        if (!mem)
        lock_page_cgroup(pc);
        if (unlikely(PageCgroupUsed(pc))) {
                unlock_page_cgroup(pc);
-               mem_cgroup_cancel_charge(mem);
+               mem_cgroup_cancel_charge(mem, page_size);
                return;
        }
 
        mem_cgroup_charge_statistics(from, pc, false);
        if (uncharge)
                /* This is not "cancel", but cancel_charge does all we need. */
-               mem_cgroup_cancel_charge(from);
+               mem_cgroup_cancel_charge(from, PAGE_SIZE);
 
        /* caller should have done css_get */
        pc->mem_cgroup = to;
                goto put;
 
        parent = mem_cgroup_from_cont(pcg);
-       ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
+       ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false,
+                                     PAGE_SIZE);
        if (ret || !parent)
                goto put_back;
 
        ret = mem_cgroup_move_account(pc, child, parent, true);
        if (ret)
-               mem_cgroup_cancel_charge(parent);
+               mem_cgroup_cancel_charge(parent, PAGE_SIZE);
 put_back:
        putback_lru_page(page);
 put:
        struct mem_cgroup *mem = NULL;
        struct page_cgroup *pc;
        int ret;
+       int page_size = PAGE_SIZE;
+
+       if (PageTransHuge(page))
+               page_size <<= compound_order(page);
 
        pc = lookup_page_cgroup(page);
        /* can happen at boot */
                return 0;
        prefetchw(pc);
 
-       ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
+       ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page_size);
        if (ret || !mem)
                return ret;
 
-       __mem_cgroup_commit_charge(mem, pc, ctype);
+       __mem_cgroup_commit_charge(mem, pc, ctype, page_size);
        return 0;
 }
 
 {
        if (mem_cgroup_disabled())
                return 0;
-       if (PageCompound(page))
-               return 0;
        /*
         * If already mapped, we don't have to account.
         * If page cache, page->mapping has address_space.
        if (!mem)
                goto charge_cur_mm;
        *ptr = mem;
-       ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
+       ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, PAGE_SIZE);
        css_put(&mem->css);
        return ret;
 charge_cur_mm:
        if (unlikely(!mm))
                mm = &init_mm;
-       return __mem_cgroup_try_charge(mm, mask, ptr, true);
+       return __mem_cgroup_try_charge(mm, mask, ptr, true, PAGE_SIZE);
 }
 
 static void
        cgroup_exclude_rmdir(&ptr->css);
        pc = lookup_page_cgroup(page);
        mem_cgroup_lru_del_before_commit_swapcache(page);
-       __mem_cgroup_commit_charge(ptr, pc, ctype);
+       __mem_cgroup_commit_charge(ptr, pc, ctype, PAGE_SIZE);
        mem_cgroup_lru_add_after_commit_swapcache(page);
        /*
         * Now swap is on-memory. This means this page may be
                return;
        if (!mem)
                return;
-       mem_cgroup_cancel_charge(mem);
+       mem_cgroup_cancel_charge(mem, PAGE_SIZE);
 }
 
 static void
-__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
+__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype,
+             int page_size)
 {
        struct memcg_batch_info *batch = NULL;
        bool uncharge_memsw = true;
        if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
                goto direct_uncharge;
 
+       if (page_size != PAGE_SIZE)
+               goto direct_uncharge;
+
        /*
         * In typical case, batch->memcg == mem. This means we can
         * merge a series of uncharges to an uncharge of res_counter.
                batch->memsw_bytes += PAGE_SIZE;
        return;
 direct_uncharge:
-       res_counter_uncharge(&mem->res, PAGE_SIZE);
+       res_counter_uncharge(&mem->res, page_size);
        if (uncharge_memsw)
-               res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+               res_counter_uncharge(&mem->memsw, page_size);
        if (unlikely(batch->memcg != mem))
                memcg_oom_recover(mem);
        return;
 {
        struct page_cgroup *pc;
        struct mem_cgroup *mem = NULL;
+       int page_size = PAGE_SIZE;
 
        if (mem_cgroup_disabled())
                return NULL;
        if (PageSwapCache(page))
                return NULL;
 
+       if (PageTransHuge(page))
+               page_size <<= compound_order(page);
+
        /*
         * Check if our page_cgroup is valid
         */
                mem_cgroup_get(mem);
        }
        if (!mem_cgroup_is_root(mem))
-               __do_uncharge(mem, ctype);
+               __do_uncharge(mem, ctype, page_size);
 
        return mem;
 
        enum charge_type ctype;
        int ret = 0;
 
+       VM_BUG_ON(PageTransHuge(page));
        if (mem_cgroup_disabled())
                return 0;
 
                return 0;
 
        *ptr = mem;
-       ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
+       ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false, PAGE_SIZE);
        css_put(&mem->css);/* drop extra refcnt */
        if (ret || *ptr == NULL) {
                if (PageAnon(page)) {
                ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
        else
                ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
-       __mem_cgroup_commit_charge(mem, pc, ctype);
+       __mem_cgroup_commit_charge(mem, pc, ctype, PAGE_SIZE);
        return ret;
 }
 
                        batch_count = PRECHARGE_COUNT_AT_ONCE;
                        cond_resched();
                }
-               ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
+               ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
+                                             PAGE_SIZE);
                if (ret || !mem)
                        /* mem_cgroup_clear_mc() will do uncharge later */
                        return -ENOMEM;
        pte_t *pte;
        spinlock_t *ptl;
 
+       VM_BUG_ON(pmd_trans_huge(*pmd));
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        for (; addr != end; pte++, addr += PAGE_SIZE)
                if (is_target_pte_for_mc(vma, addr, *pte, NULL))
        spinlock_t *ptl;
 
 retry:
+       VM_BUG_ON(pmd_trans_huge(*pmd));
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        for (; addr != end; addr += PAGE_SIZE) {
                pte_t ptent = *(pte++);