]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/page_alloc: disassociate the pcp->high from pcp->batch -fix
authorMel Gorman <mgorman@techsingularity.net>
Wed, 2 Jun 2021 03:52:39 +0000 (13:52 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 2 Jun 2021 03:52:39 +0000 (13:52 +1000)
Vlastimil Babka noted that __setup_per_zone_wmarks updating pcp->high did
not protect watermark-related sysctl handlers from a parallel memory
hotplug operations.  This patch moves the PCP update to
setup_per_zone_wmarks and updates the PCP high value while protected by
the pcp_batch_high_lock mutex.  As a side-effect, the zone_pcp_update
calls during memory hotplug operations becomes redundant and can be
removed.

This is a fix to the mmotm patch
mm-page_alloc-disassociate-the-pcp-high-from-pcp-batch.patch.  It'll cause
a conflict with
mm-page_alloc-adjust-pcp-high-after-cpu-hotplug-events.patch but the
resolution is simple as the zone_pcp_update callers in
setup_per_zone_wmarks no longer exist.

Link: https://lkml.kernel.org/r/20210528105925.GN30378@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
mm/memory_hotplug.c
mm/page_alloc.c

index 70620d0dd923a23bd77bffef4dd8b68f6e33eb14..974a565797d86d8e7e3f06660b80414d3e2000f3 100644 (file)
@@ -961,7 +961,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *z
        node_states_set_node(nid, &arg);
        if (need_zonelists_rebuild)
                build_all_zonelists(NULL);
-       zone_pcp_update(zone);
 
        /* Basic onlining is complete, allow allocation of onlined pages. */
        undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE);
@@ -974,6 +973,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *z
         */
        shuffle_zone(zone);
 
+       /* reinitialise watermarks and update pcp limits */
        init_per_zone_wmark_min();
 
        kswapd_run(nid);
@@ -1829,13 +1829,13 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
        adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages);
        adjust_present_page_count(zone, -nr_pages);
 
+       /* reinitialise watermarks and update pcp limits */
        init_per_zone_wmark_min();
 
        if (!populated_zone(zone)) {
                zone_pcp_reset(zone);
                build_all_zonelists(NULL);
-       } else
-               zone_pcp_update(zone);
+       }
 
        node_states_clear_node(node, &arg);
        if (arg.status_change_nid >= 0) {
index a36c9739adf006678a9623b08386d3778342af08..ce1236a860df2dd16caee8de7a38d6240a4bf804 100644 (file)
@@ -8198,12 +8198,6 @@ static void __setup_per_zone_wmarks(void)
                zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
                zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
 
-               /*
-                * The watermark size have changed so update the pcpu batch
-                * and high limits or the limits may be inappropriate.
-                */
-               zone_set_pageset_high_and_batch(zone);
-
                spin_unlock_irqrestore(&zone->lock, flags);
        }
 
@@ -8220,11 +8214,19 @@ static void __setup_per_zone_wmarks(void)
  */
 void setup_per_zone_wmarks(void)
 {
+       struct zone *zone;
        static DEFINE_SPINLOCK(lock);
 
        spin_lock(&lock);
        __setup_per_zone_wmarks();
        spin_unlock(&lock);
+
+       /*
+        * The watermark size have changed so update the pcpu batch
+        * and high limits or the limits may be inappropriate.
+        */
+       for_each_zone(zone)
+               zone_pcp_update(zone);
 }
 
 /*