]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
percpu: fix data race with pcpu_nr_empty_pop_pages
authorDennis Zhou <dennis@kernel.org>
Tue, 8 Oct 2024 00:19:42 +0000 (17:19 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 7 Nov 2024 04:11:11 +0000 (20:11 -0800)
Fixes the data race by moving the read to be behind the pcpu_lock. This
is okay because the code (initially) above it will not increase the
empty populated page count because it is populating backing pages that
already have allocations served out of them.

Link: https://lkml.kernel.org/r/20241008001942.8114-1-dennis@kernel.org
Reported-by: kernel test robot <oliver.sang@intel.com>
Closes: https://lore.kernel.org/oe-lkp/202407191651.f24e499d-oliver.sang@intel.com
Signed-off-by: Dennis Zhou <dennis@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/percpu.c

index da21680ff294cb53dfb42bf0d3b3bbd2654d2cfa..d1a73cf65c53242837bb97339a9debbc7d4ebf55 100644 (file)
@@ -1864,6 +1864,10 @@ restart:
 
 area_found:
        pcpu_stats_area_alloc(chunk, size);
+
+       if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
+               pcpu_schedule_balance_work();
+
        spin_unlock_irqrestore(&pcpu_lock, flags);
 
        /* populate if not all pages are already there */
@@ -1891,9 +1895,6 @@ area_found:
                mutex_unlock(&pcpu_alloc_mutex);
        }
 
-       if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
-               pcpu_schedule_balance_work();
-
        /* clear the areas and return address relative to base address */
        for_each_possible_cpu(cpu)
                memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);