unsigned long limit;
 
        count = page_counter_read(&memcg->memory);
-       limit = READ_ONCE(memcg->memory.limit);
+       limit = READ_ONCE(memcg->memory.max);
        if (count < limit)
                margin = limit - count;
 
        if (do_memsw_account()) {
                count = page_counter_read(&memcg->memsw);
-               limit = READ_ONCE(memcg->memsw.limit);
+               limit = READ_ONCE(memcg->memsw.max);
                if (count <= limit)
                        margin = min(margin, limit - count);
                else
 
        pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
                K((u64)page_counter_read(&memcg->memory)),
-               K((u64)memcg->memory.limit), memcg->memory.failcnt);
+               K((u64)memcg->memory.max), memcg->memory.failcnt);
        pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
                K((u64)page_counter_read(&memcg->memsw)),
-               K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
+               K((u64)memcg->memsw.max), memcg->memsw.failcnt);
        pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
                K((u64)page_counter_read(&memcg->kmem)),
-               K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
+               K((u64)memcg->kmem.max), memcg->kmem.failcnt);
 
        for_each_mem_cgroup_tree(iter, memcg) {
                pr_info("Memory cgroup stats for ");
 /*
  * Return the memory (and swap, if configured) limit for a memcg.
  */
-unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
+unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
 {
-       unsigned long limit;
+       unsigned long max;
 
-       limit = memcg->memory.limit;
+       max = memcg->memory.max;
        if (mem_cgroup_swappiness(memcg)) {
-               unsigned long memsw_limit;
-               unsigned long swap_limit;
+               unsigned long memsw_max;
+               unsigned long swap_max;
 
-               memsw_limit = memcg->memsw.limit;
-               swap_limit = memcg->swap.limit;
-               swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
-               limit = min(limit + swap_limit, memsw_limit);
+               memsw_max = memcg->memsw.max;
+               swap_max = memcg->swap.max;
+               swap_max = min(swap_max, (unsigned long)total_swap_pages);
+               max = min(max + swap_max, memsw_max);
        }
-       return limit;
+       return max;
 }
 
 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
 }
 #endif
 
-static DEFINE_MUTEX(memcg_limit_mutex);
+static DEFINE_MUTEX(memcg_max_mutex);
 
-static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
-                                  unsigned long limit, bool memsw)
+static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
+                                unsigned long max, bool memsw)
 {
        bool enlarge = false;
        int ret;
                        break;
                }
 
-               mutex_lock(&memcg_limit_mutex);
+               mutex_lock(&memcg_max_mutex);
                /*
                 * Make sure that the new limit (memsw or memory limit) doesn't
-                * break our basic invariant rule memory.limit <= memsw.limit.
+                * break our basic invariant rule memory.max <= memsw.max.
                 */
-               limits_invariant = memsw ? limit >= memcg->memory.limit :
-                                          limit <= memcg->memsw.limit;
+               limits_invariant = memsw ? max >= memcg->memory.max :
+                                          max <= memcg->memsw.max;
                if (!limits_invariant) {
-                       mutex_unlock(&memcg_limit_mutex);
+                       mutex_unlock(&memcg_max_mutex);
                        ret = -EINVAL;
                        break;
                }
-               if (limit > counter->limit)
+               if (max > counter->max)
                        enlarge = true;
-               ret = page_counter_limit(counter, limit);
-               mutex_unlock(&memcg_limit_mutex);
+               ret = page_counter_set_max(counter, max);
+               mutex_unlock(&memcg_max_mutex);
 
                if (!ret)
                        break;
                        return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
                return (u64)page_counter_read(counter) * PAGE_SIZE;
        case RES_LIMIT:
-               return (u64)counter->limit * PAGE_SIZE;
+               return (u64)counter->max * PAGE_SIZE;
        case RES_MAX_USAGE:
                return (u64)counter->watermark * PAGE_SIZE;
        case RES_FAILCNT:
 }
 #endif /* !CONFIG_SLOB */
 
-static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
-                                  unsigned long limit)
+static int memcg_update_kmem_max(struct mem_cgroup *memcg,
+                                unsigned long max)
 {
        int ret;
 
-       mutex_lock(&memcg_limit_mutex);
-       ret = page_counter_limit(&memcg->kmem, limit);
-       mutex_unlock(&memcg_limit_mutex);
+       mutex_lock(&memcg_max_mutex);
+       ret = page_counter_set_max(&memcg->kmem, max);
+       mutex_unlock(&memcg_max_mutex);
        return ret;
 }
 
-static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
+static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
 {
        int ret;
 
-       mutex_lock(&memcg_limit_mutex);
+       mutex_lock(&memcg_max_mutex);
 
-       ret = page_counter_limit(&memcg->tcpmem, limit);
+       ret = page_counter_set_max(&memcg->tcpmem, max);
        if (ret)
                goto out;
 
                memcg->tcpmem_active = true;
        }
 out:
-       mutex_unlock(&memcg_limit_mutex);
+       mutex_unlock(&memcg_max_mutex);
        return ret;
 }
 
                }
                switch (MEMFILE_TYPE(of_cft(of)->private)) {
                case _MEM:
-                       ret = mem_cgroup_resize_limit(memcg, nr_pages, false);
+                       ret = mem_cgroup_resize_max(memcg, nr_pages, false);
                        break;
                case _MEMSWAP:
-                       ret = mem_cgroup_resize_limit(memcg, nr_pages, true);
+                       ret = mem_cgroup_resize_max(memcg, nr_pages, true);
                        break;
                case _KMEM:
-                       ret = memcg_update_kmem_limit(memcg, nr_pages);
+                       ret = memcg_update_kmem_max(memcg, nr_pages);
                        break;
                case _TCP:
-                       ret = memcg_update_tcp_limit(memcg, nr_pages);
+                       ret = memcg_update_tcp_max(memcg, nr_pages);
                        break;
                }
                break;
        /* Hierarchical information */
        memory = memsw = PAGE_COUNTER_MAX;
        for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
-               memory = min(memory, mi->memory.limit);
-               memsw = min(memsw, mi->memsw.limit);
+               memory = min(memory, mi->memory.max);
+               memsw = min(memsw, mi->memsw.max);
        }
        seq_printf(m, "hierarchical_memory_limit %llu\n",
                   (u64)memory * PAGE_SIZE);
        *pheadroom = PAGE_COUNTER_MAX;
 
        while ((parent = parent_mem_cgroup(memcg))) {
-               unsigned long ceiling = min(memcg->memory.limit, memcg->high);
+               unsigned long ceiling = min(memcg->memory.max, memcg->high);
                unsigned long used = page_counter_read(&memcg->memory);
 
                *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
-       page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
-       page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
-       page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
-       page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
-       page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
        memcg->low = 0;
+       page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
+       page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
+       page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX);
+       page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
+       page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
        memcg->high = PAGE_COUNTER_MAX;
        memcg->soft_limit = PAGE_COUNTER_MAX;
        memcg_wb_domain_size_changed(memcg);
 static int memory_max_show(struct seq_file *m, void *v)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
-       unsigned long max = READ_ONCE(memcg->memory.limit);
+       unsigned long max = READ_ONCE(memcg->memory.max);
 
        if (max == PAGE_COUNTER_MAX)
                seq_puts(m, "max\n");
        if (err)
                return err;
 
-       xchg(&memcg->memory.limit, max);
+       xchg(&memcg->memory.max, max);
 
        for (;;) {
                unsigned long nr_pages = page_counter_read(&memcg->memory);
                return nr_swap_pages;
        for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
                nr_swap_pages = min_t(long, nr_swap_pages,
-                                     READ_ONCE(memcg->swap.limit) -
+                                     READ_ONCE(memcg->swap.max) -
                                      page_counter_read(&memcg->swap));
        return nr_swap_pages;
 }
                return false;
 
        for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
-               if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
+               if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.max)
                        return true;
 
        return false;
 static int swap_max_show(struct seq_file *m, void *v)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
-       unsigned long max = READ_ONCE(memcg->swap.limit);
+       unsigned long max = READ_ONCE(memcg->swap.max);
 
        if (max == PAGE_COUNTER_MAX)
                seq_puts(m, "max\n");
        if (err)
                return err;
 
-       mutex_lock(&memcg_limit_mutex);
-       err = page_counter_limit(&memcg->swap, max);
-       mutex_unlock(&memcg_limit_mutex);
+       mutex_lock(&memcg_max_mutex);
+       err = page_counter_set_max(&memcg->swap, max);
+       mutex_unlock(&memcg_max_mutex);
        if (err)
                return err;
 
 
 {
        long new;
 
-       new = atomic_long_sub_return(nr_pages, &counter->count);
+       new = atomic_long_sub_return(nr_pages, &counter->usage);
        /* More uncharges than charges? */
        WARN_ON_ONCE(new < 0);
 }
        for (c = counter; c; c = c->parent) {
                long new;
 
-               new = atomic_long_add_return(nr_pages, &c->count);
+               new = atomic_long_add_return(nr_pages, &c->usage);
                /*
                 * This is indeed racy, but we can live with some
                 * inaccuracy in the watermark.
                 * we either see the new limit or the setter sees the
                 * counter has changed and retries.
                 */
-               new = atomic_long_add_return(nr_pages, &c->count);
-               if (new > c->limit) {
-                       atomic_long_sub(nr_pages, &c->count);
+               new = atomic_long_add_return(nr_pages, &c->usage);
+               if (new > c->max) {
+                       atomic_long_sub(nr_pages, &c->usage);
                        /*
                         * This is racy, but we can live with some
                         * inaccuracy in the failcnt.
 }
 
 /**
- * page_counter_limit - limit the number of pages allowed
+ * page_counter_set_max - set the maximum number of pages allowed
  * @counter: counter
- * @limit: limit to set
+ * @nr_pages: limit to set
  *
  * Returns 0 on success, -EBUSY if the current number of pages on the
  * counter already exceeds the specified limit.
  *
  * The caller must serialize invocations on the same counter.
  */
-int page_counter_limit(struct page_counter *counter, unsigned long limit)
+int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages)
 {
        for (;;) {
                unsigned long old;
-               long count;
+               long usage;
 
                /*
                 * Update the limit while making sure that it's not
                 * the limit, so if it sees the old limit, we see the
                 * modified counter and retry.
                 */
-               count = atomic_long_read(&counter->count);
+               usage = atomic_long_read(&counter->usage);
 
-               if (count > limit)
+               if (usage > nr_pages)
                        return -EBUSY;
 
-               old = xchg(&counter->limit, limit);
+               old = xchg(&counter->max, nr_pages);
 
-               if (atomic_long_read(&counter->count) <= count)
+               if (atomic_long_read(&counter->usage) <= usage)
                        return 0;
 
-               counter->limit = old;
+               counter->max = old;
                cond_resched();
        }
 }