From: Marcelo Tosatti Date: Mon, 20 Mar 2023 18:03:34 +0000 (-0300) Subject: this_cpu_cmpxchg: ARM64: switch this_cpu_cmpxchg to locked, add _local function X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=b605a5a8782f7ec0e2e5f5b17e238ed5c38c615a;p=users%2Fjedix%2Flinux-maple.git this_cpu_cmpxchg: ARM64: switch this_cpu_cmpxchg to locked, add _local function Goal is to have vmstat_shepherd to transfer from per-CPU counters to global counters remotely. For this, an atomic this_cpu_cmpxchg is necessary. Following the kernel convention for cmpxchg/cmpxchg_local, change ARM's this_cpu_cmpxchg_ helpers to be atomic, and add this_cpu_cmpxchg_local_ helpers which are not atomic. Link: https://lkml.kernel.org/r/20230320180745.582248645@redhat.com Signed-off-by: Marcelo Tosatti Cc: Aaron Tomlin Cc: Christoph Lameter Cc: Frederic Weisbecker Cc: Heiko Carstens Cc: Huacai Chen Cc: Michal Hocko Cc: Peter Xu Cc: "Russell King (Oracle)" Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index b9ba19dbdb694..6ff552e48007b 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -232,13 +232,23 @@ PERCPU_RET_OP(add, add, ldadd) _pcp_protect_return(xchg_relaxed, pcp, val) #define this_cpu_cmpxchg_1(pcp, o, n) \ - _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) + _pcp_protect_return(cmpxchg, pcp, o, n) #define this_cpu_cmpxchg_2(pcp, o, n) \ - _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) + _pcp_protect_return(cmpxchg, pcp, o, n) #define this_cpu_cmpxchg_4(pcp, o, n) \ - _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) + _pcp_protect_return(cmpxchg, pcp, o, n) #define this_cpu_cmpxchg_8(pcp, o, n) \ + _pcp_protect_return(cmpxchg, pcp, o, n) + +#define this_cpu_cmpxchg_local_1(pcp, o, n) \ _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) +#define this_cpu_cmpxchg_local_2(pcp, o, n) \ + _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) +#define this_cpu_cmpxchg_local_4(pcp, o, n) \ + _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) +#define this_cpu_cmpxchg_local_8(pcp, o, n) \ + _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) + #ifdef __KVM_NVHE_HYPERVISOR__ extern unsigned long __hyp_per_cpu_offset(unsigned int cpu);