return swappiness;
 }
 
+static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
+{
+       int *val = data;
+       (*val)++;
+       return 0;
+}
+/*
+ * This function returns the number of memcg under hierarchy tree. Returns
+ * 1(self count) if no children.
+ */
+static int mem_cgroup_count_children(struct mem_cgroup *mem)
+{
+       int num = 0;
+       mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
+       return num;
+}
+
 /*
  * Visit the first child (need not be the first child as per the ordering
  * of the cgroup list, since we track last_scanned_child) of @mem and use
  *
  * We give up and return to the caller when we visit root_mem twice.
  * (other groups can be removed while we're walking....)
+ *
+ * If shrink==true, for avoiding to free too much, this returns immedieately.
  */
 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
-                                               gfp_t gfp_mask, bool noswap)
+                                  gfp_t gfp_mask, bool noswap, bool shrink)
 {
        struct mem_cgroup *victim;
        int ret, total = 0;
                ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, noswap,
                                                   get_swappiness(victim));
                css_put(&victim->css);
+               /*
+                * At shrinking usage, we can't check we should stop here or
+                * reclaim more. It's depends on callers. last_scanned_child
+                * will work enough for keeping fairness under tree.
+                */
+               if (shrink)
+                       return ret;
                total += ret;
                if (mem_cgroup_check_under_limit(root_mem))
                        return 1 + total;
                        goto nomem;
 
                ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
-                                                       noswap);
+                                                       noswap, false);
                if (ret)
                        continue;
 
                return 0;
 
        do {
-               progress = mem_cgroup_hierarchical_reclaim(mem, gfp_mask, true);
+               progress = mem_cgroup_hierarchical_reclaim(mem,
+                                       gfp_mask, true, false);
                progress += mem_cgroup_check_under_limit(mem);
        } while (!progress && --retry);
 
 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
                                unsigned long long val)
 {
-
-       int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
+       int retry_count;
        int progress;
        u64 memswlimit;
        int ret = 0;
+       int children = mem_cgroup_count_children(memcg);
+       u64 curusage, oldusage;
+
+       /*
+        * For keeping hierarchical_reclaim simple, how long we should retry
+        * is depends on callers. We set our retry-count to be function
+        * of # of children which we should visit in this loop.
+        */
+       retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
+
+       oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
 
        while (retry_count) {
                if (signal_pending(current)) {
                        break;
 
                progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
-                                                          false);
-               if (!progress)                  retry_count--;
+                                                  false, true);
+               curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
+               /* Usage is reduced ? */
+               if (curusage >= oldusage)
+                       retry_count--;
+               else
+                       oldusage = curusage;
        }
 
        return ret;
 int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
                                unsigned long long val)
 {
-       int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
+       int retry_count;
        u64 memlimit, oldusage, curusage;
-       int ret;
+       int children = mem_cgroup_count_children(memcg);
+       int ret = -EBUSY;
 
        if (!do_swap_account)
                return -EINVAL;
-
+       /* see mem_cgroup_resize_res_limit */
+       retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
+       oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
        while (retry_count) {
                if (signal_pending(current)) {
                        ret = -EINTR;
                if (!ret)
                        break;
 
-               oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
-               mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true);
+               mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true, true);
                curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
+               /* Usage is reduced ? */
                if (curusage >= oldusage)
                        retry_count--;
+               else
+                       oldusage = curusage;
        }
        return ret;
 }