Use helper function to check if we need to deal with oom condition.
Signed-off-by: Qiang Huang <h.huangqiang@huawei.com>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
        oom_killer_disabled = false;
 }
 
+static inline bool oom_gfp_allowed(gfp_t gfp_mask)
+{
+       return (gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY);
+}
+
 extern struct task_struct *find_lock_task_mm(struct task_struct *p);
 
 /* sysctls */
 
        struct res_counter *fail_res;
        struct mem_cgroup *_memcg;
        int ret = 0;
-       bool may_oom;
 
        ret = res_counter_charge(&memcg->kmem, size, &fail_res);
        if (ret)
                return ret;
 
-       /*
-        * Conditions under which we can wait for the oom_killer. Those are
-        * the same conditions tested by the core page allocator
-        */
-       may_oom = (gfp & __GFP_FS) && !(gfp & __GFP_NORETRY);
-
        _memcg = memcg;
        ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT,
-                                     &_memcg, may_oom);
+                                     &_memcg, oom_gfp_allowed(gfp));
 
        if (ret == -EINTR)  {
                /*
 
         * running out of options and have to consider going OOM
         */
        if (!did_some_progress) {
-               if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
+               if (oom_gfp_allowed(gfp_mask)) {
                        if (oom_killer_disabled)
                                goto nopage;
                        /* Coredumps can quickly deplete all memory reserves */