]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: compaction: make isolate_lru_page() filter-aware
authorMinchan Kim <minchan.kim@gmail.com>
Tue, 1 Nov 2011 00:06:51 +0000 (17:06 -0700)
committerJerry Snitselaar <jerry.snitselaar@oracle.com>
Sun, 7 Oct 2012 06:00:52 +0000 (23:00 -0700)
commit 39deaf8585152f1a35c1676d3d7dc6ae0fb65967 upstream.

Stable note: Not tracked in Bugzilla. THP and compaction disrupt the LRU
list leading to poor reclaim decisions which has a variable
performance impact.

In async mode, compaction doesn't migrate dirty or writeback pages.  So,
it's meaningless to pick the page and re-add it to lru list.

Of course, when we isolate the page in compaction, the page might be dirty
or writeback but when we try to migrate the page, the page would be not
dirty, writeback.  So it could be migrated.  But it's very unlikely as
isolate and migration cycle is much faster than writeout.

So, this patch helps cpu overhead and prevent unnecessary LRU churning.

Signed-off-by: Minchan Kim <minchan.kim@gmail.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Acked-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
(cherry picked from commit 19faec0520b3b16dfd58cde30938a3c4d3dcdd5b)

Signed-off-by: Jerry Snitselaar <jerry.snitselaar@oracle.com>
include/linux/mmzone.h
mm/compaction.c
mm/vmscan.c

index 5a5286d240192ac7f934d61cf35de6b76bbc5b04..632107ec1a94d862336371b59d2deb6afc652456 100644 (file)
@@ -162,6 +162,8 @@ static inline int is_unevictable_lru(enum lru_list l)
 #define ISOLATE_INACTIVE       ((__force isolate_mode_t)0x1)
 /* Isolate active pages */
 #define ISOLATE_ACTIVE         ((__force isolate_mode_t)0x2)
+/* Isolate clean file */
+#define ISOLATE_CLEAN          ((__force isolate_mode_t)0x4)
 
 /* LRU Isolation modes. */
 typedef unsigned __bitwise__ isolate_mode_t;
index 4fbbbd040756b6f18394131b67d44f5d669e75c8..61e68a5a83e0e2ae550c7cc2203a874b940c65d1 100644 (file)
@@ -261,6 +261,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
        unsigned long last_pageblock_nr = 0, pageblock_nr;
        unsigned long nr_scanned = 0, nr_isolated = 0;
        struct list_head *migratelist = &cc->migratepages;
+       isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE;
 
        /* Do not scan outside zone boundaries */
        low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
@@ -370,9 +371,11 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
                        continue;
                }
 
+               if (!cc->sync)
+                       mode |= ISOLATE_CLEAN;
+
                /* Try isolate the page */
-               if (__isolate_lru_page(page,
-                               ISOLATE_ACTIVE|ISOLATE_INACTIVE, 0) != 0)
+               if (__isolate_lru_page(page, mode, 0) != 0)
                        continue;
 
                VM_BUG_ON(PageTransCompound(page));
index 5da367f3b69bf1391393e267d289f53d5795b3b5..9cbe312a678ae050a6d282ebeba2e2ef35484210 100644 (file)
@@ -1045,6 +1045,9 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
 
        ret = -EBUSY;
 
+       if ((mode & ISOLATE_CLEAN) && (PageDirty(page) || PageWriteback(page)))
+               return ret;
+
        if (likely(get_page_unless_zero(page))) {
                /*
                 * Be careful not to clear PageLRU until after we're