]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
this_cpu_cmpxchg: loongarch: switch this_cpu_cmpxchg to locked, add _local function
authorMarcelo Tosatti <mtosatti@redhat.com>
Mon, 20 Mar 2023 18:03:35 +0000 (15:03 -0300)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 28 Mar 2023 23:25:08 +0000 (16:25 -0700)
Goal is to have vmstat_shepherd to transfer from per-CPU counters to
global counters remotely.  For this, an atomic this_cpu_cmpxchg is
necessary.

Following the kernel convention for cmpxchg/cmpxchg_local, add
this_cpu_cmpxchg_local helpers to Loongarch.

Link: https://lkml.kernel.org/r/20230320180745.607294360@redhat.com
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Aaron Tomlin <atomlin@atomlin.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Frederic Weisbecker <frederic@kernel.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: "Russell King (Oracle)" <linux@armlinux.org.uk>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/loongarch/include/asm/percpu.h

index ad8d88494554a73951ac1c7b12c5b322c9750ab5..901727df7cbe48d28a585cc808103b359a7fa356 100644 (file)
@@ -150,6 +150,16 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
 }
 
 /* this_cpu_cmpxchg */
+#define _protect_cmpxchg(pcp, o, n)                            \
+({                                                             \
+       typeof(*raw_cpu_ptr(&(pcp))) __ret;                     \
+       preempt_disable_notrace();                              \
+       __ret = cmpxchg(raw_cpu_ptr(&(pcp)), o, n);             \
+       preempt_enable_notrace();                               \
+       __ret;                                                  \
+})
+
+/* this_cpu_cmpxchg_local */
 #define _protect_cmpxchg_local(pcp, o, n)                      \
 ({                                                             \
        typeof(*raw_cpu_ptr(&(pcp))) __ret;                     \
@@ -222,10 +232,15 @@ do {                                                                      \
 #define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
 #define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val)
 
-#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
-#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
-#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
-#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_local_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_local_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_local_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_local_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+
+#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg(ptr, o, n)
+#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg(ptr, o, n)
+#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg(ptr, o, n)
+#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg(ptr, o, n)
 
 #include <asm-generic/percpu.h>