]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: optimization on page allocation when CMA enabled
authorZhaoyang Huang <zhaoyang.huang@unisoc.com>
Thu, 11 May 2023 05:22:30 +0000 (13:22 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 20 Nov 2023 21:16:45 +0000 (13:16 -0800)
According to current CMA utilization policy, an alloc_pages(GFP_USER)
could 'steal' UNMOVABLE & RECLAIMABLE page blocks via the help of CMA(pass
zone_watermark_ok by counting CMA in but use U&R in rmqueue), which could
lead to following alloc_pages(GFP_KERNEL) fail.  Solving this by
introducing second watermark checking for GFP_MOVABLE, which could have
the allocation use CMA when proper.

-- Free_pages(30MB)
|
|
-- WMARK_LOW(25MB)
|
-- Free_CMA(12MB)
|
|
--

Link: https://lkml.kernel.org/r/20231016071245.2865233-1-zhaoyang.huang@unisoc.com
Link: https://lkml.kernel.org/r/1683782550-25799-1-git-send-email-zhaoyang.huang@unisoc.com
Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: ke.wang <ke.wang@unisoc.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Zhaoyang Huang <huangzhaoyang@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/page_alloc.c

index dd5e8a759d27e7961b46568ffe7b80b8eb097a90..7a8dac0c1c74be818cec8970de01a46f65cb6d9f 100644 (file)
@@ -2075,6 +2075,43 @@ do_steal:
 
 }
 
+#ifdef CONFIG_CMA
+/*
+ * GFP_MOVABLE allocation could drain UNMOVABLE & RECLAIMABLE page blocks via
+ * the help of CMA which makes GFP_KERNEL failed. Checking if zone_watermark_ok
+ * again without ALLOC_CMA to see if to use CMA first.
+ */
+static bool use_cma_first(struct zone *zone, unsigned int order, unsigned int alloc_flags)
+{
+       unsigned long watermark;
+       bool cma_first = false;
+
+       watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
+       /* check if GFP_MOVABLE pass previous zone_watermark_ok via the help of CMA */
+       if (zone_watermark_ok(zone, order, watermark, 0, alloc_flags & (~ALLOC_CMA))) {
+               /*
+                * Balance movable allocations between regular and CMA areas by
+                * allocating from CMA when over half of the zone's free memory
+                * is in the CMA area.
+                */
+               cma_first = (zone_page_state(zone, NR_FREE_CMA_PAGES) >
+                               zone_page_state(zone, NR_FREE_PAGES) / 2);
+       } else {
+               /*
+                * watermark failed means UNMOVABLE & RECLAIMBLE is not enough
+                * now, we should use cma first to keep them stay around the
+                * corresponding watermark
+                */
+               cma_first = true;
+       }
+       return cma_first;
+}
+#else
+static bool use_cma_first(struct zone *zone, unsigned int order, unsigned int alloc_flags)
+{
+       return false;
+}
+#endif
 /*
  * Do the hard work of removing an element from the buddy allocator.
  * Call me with the zone->lock already held.
@@ -2088,12 +2125,11 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
        if (IS_ENABLED(CONFIG_CMA)) {
                /*
                 * Balance movable allocations between regular and CMA areas by
-                * allocating from CMA when over half of the zone's free memory
-                * is in the CMA area.
+                * allocating from CMA base on judging zone_watermark_ok again
+                * to see if the latest check got pass via the help of CMA
                 */
                if (alloc_flags & ALLOC_CMA &&
-                   zone_page_state(zone, NR_FREE_CMA_PAGES) >
-                   zone_page_state(zone, NR_FREE_PAGES) / 2) {
+                       use_cma_first(zone, order, alloc_flags)) {
                        page = __rmqueue_cma_fallback(zone, order);
                        if (page)
                                return page;