struct mem_cgroup *eviction_memcg;
        struct lruvec *eviction_lruvec;
        unsigned long refault_distance;
+       unsigned long workingset_size;
        struct pglist_data *pgdat;
-       unsigned long active_file;
        struct mem_cgroup *memcg;
        unsigned long eviction;
        struct lruvec *lruvec;
                goto out;
        eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
        refault = atomic_long_read(&eviction_lruvec->inactive_age);
-       active_file = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE);
 
        /*
         * Calculate the refault distance
 
        /*
         * Compare the distance to the existing workingset size. We
-        * don't act on pages that couldn't stay resident even if all
-        * the memory was available to the page cache.
+        * don't activate pages that couldn't stay resident even if
+        * all the memory was available to the page cache. Whether
+        * cache can compete with anon or not depends on having swap.
         */
-       if (refault_distance > active_file)
+       workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE);
+       if (mem_cgroup_get_nr_swap_pages(memcg) > 0) {
+               workingset_size += lruvec_page_state(eviction_lruvec,
+                                                    NR_INACTIVE_ANON);
+               workingset_size += lruvec_page_state(eviction_lruvec,
+                                                    NR_ACTIVE_ANON);
+       }
+       if (refault_distance > workingset_size)
                goto out;
 
        SetPageActive(page);