gfp_t gfp, int order,
                                             struct kmem_cache *s)
 {
+       unsigned int nr_pages = 1 << order;
        struct mem_cgroup *memcg;
        struct lruvec *lruvec;
        int ret;
 
        if (unlikely(!memcg || mem_cgroup_is_root(memcg))) {
                mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
-                                   (1 << order));
-               percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
+                                   nr_pages);
+               percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages);
                return 0;
        }
 
-       ret = memcg_kmem_charge_memcg(memcg, gfp, 1 << order);
+       ret = memcg_kmem_charge_memcg(memcg, gfp, nr_pages);
        if (ret)
                goto out;
 
        lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
-       mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order);
+       mod_lruvec_state(lruvec, cache_vmstat_idx(s), nr_pages);
 
        /* transer try_charge() page references to kmem_cache */
-       percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
-       css_put_many(&memcg->css, 1 << order);
+       percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages);
+       css_put_many(&memcg->css, nr_pages);
 out:
        css_put(&memcg->css);
        return ret;
 static __always_inline void memcg_uncharge_slab(struct page *page, int order,
                                                struct kmem_cache *s)
 {
+       unsigned int nr_pages = 1 << order;
        struct mem_cgroup *memcg;
        struct lruvec *lruvec;
 
        memcg = READ_ONCE(s->memcg_params.memcg);
        if (likely(!mem_cgroup_is_root(memcg))) {
                lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
-               mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order));
-               memcg_kmem_uncharge_memcg(memcg, order);
+               mod_lruvec_state(lruvec, cache_vmstat_idx(s), -nr_pages);
+               memcg_kmem_uncharge_memcg(memcg, nr_pages);
        } else {
                mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
-                                   -(1 << order));
+                                   -nr_pages);
        }
        rcu_read_unlock();
 
-       percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order);
+       percpu_ref_put_many(&s->memcg_params.refcnt, nr_pages);
 }
 
 extern void slab_init_memcg_params(struct kmem_cache *);