]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
asm-generic: add __smp_xxx wrappers
authorMichael S. Tsirkin <mst@redhat.com>
Sun, 27 Dec 2015 11:50:07 +0000 (13:50 +0200)
committerSomasundaram Krishnasamy <somasundaram.krishnasamy@oracle.com>
Thu, 31 Aug 2017 17:26:20 +0000 (10:26 -0700)
Orabug: 26668113

On !SMP, most architectures define their
barriers as compiler barriers.
On SMP, most need an actual barrier.

Make it possible to remove the code duplication for
!SMP by defining low-level __smp_xxx barriers
which do not depend on the value of SMP, then
use them from asm-generic conditionally.

Besides reducing code duplication, these low level APIs will also be
useful for virtualization, where a barrier is sometimes needed even if
!SMP since we might be talking to another kernel on the same SMP system.

Both virtio and Xen drivers will benefit.

The smp_xxx variants should use __smp_XXX ones or barrier() depending on
SMP, identically for all architectures.

We keep ifndef guards around them for now - once/if all
architectures are converted to use the generic
code, we'll be able to remove these.

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
(cherry picked from commit a9e4252a9b147043142282ebb65da94dcb951e2a)
Signed-off-by: Somasundaram Krishnasamy <somasundaram.krishnasamy@oracle.com>
Reviewed-by: Jack Vogel <jack.vogel@oracle.com>
Conflicts:
include/asm-generic/barrier.h

include/asm-generic/barrier.h

index f5c40b0fadc2a50be563304727db2e7ad7fe6699..8680eb1591f1c244b5c1620330fcd262f77d591d 100644 (file)
 #define read_barrier_depends()         do { } while (0)
 #endif
 
+#ifndef __smp_mb
+#define __smp_mb()     mb()
+#endif
+
+#ifndef __smp_rmb
+#define __smp_rmb()    rmb()
+#endif
+
+#ifndef __smp_wmb
+#define __smp_wmb()    wmb()
+#endif
+
+#ifndef __smp_read_barrier_depends
+#define __smp_read_barrier_depends()   read_barrier_depends()
+#endif
+
 #ifdef CONFIG_SMP
-#define smp_mb()       mb()
-#define smp_rmb()      rmb()
-#define smp_wmb()      wmb()
-#define smp_read_barrier_depends()     read_barrier_depends()
-#else
+
+#ifndef smp_mb
+#define smp_mb()       __smp_mb()
+#endif
+
+#ifndef smp_rmb
+#define smp_rmb()      __smp_rmb()
+#endif
+
+#ifndef smp_wmb
+#define smp_wmb()      __smp_wmb()
+#endif
+
+#ifndef smp_read_barrier_depends
+#define smp_read_barrier_depends()     __smp_read_barrier_depends()
+#endif
+
+#else  /* !CONFIG_SMP */
+
+#ifndef smp_mb
 #define smp_mb()       barrier()
 #define smp_rmb()      barrier()
 #define smp_wmb()      barrier()
 #define set_mb(var, value)  do { (var) = (value); mb(); } while (0)
 #endif
 
+#endif /* CONFIG_SMP */
+
+#ifndef __smp_store_mb
+#define __smp_store_mb(var, value)  do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
+#endif
+
+#ifndef __smp_mb__before_atomic
+#define __smp_mb__before_atomic()      __smp_mb()
+#endif
+
+#ifndef __smp_mb__after_atomic
+#define __smp_mb__after_atomic()       __smp_mb()
+#endif
+
+#ifndef __smp_store_release
+#define __smp_store_release(p, v)                                      \
+do {                                                                   \
+       compiletime_assert_atomic_type(*p);                             \
+       __smp_mb();                                                     \
+       WRITE_ONCE(*p, v);                                              \
+} while (0)
+#endif
+
+#ifndef __smp_load_acquire
+#define __smp_load_acquire(p)                                          \
+({                                                                     \
+       typeof(*p) ___p1 = READ_ONCE(*p);                               \
+       compiletime_assert_atomic_type(*p);                             \
+       __smp_mb();                                                     \
+       ___p1;                                                          \
+})
+#endif
+
+#ifdef CONFIG_SMP
+
+#ifndef smp_store_mb
+#define smp_store_mb(var, value)  __smp_store_mb(var, value)
+#endif
+
 #ifndef smp_mb__before_atomic
-#define smp_mb__before_atomic()        smp_mb()
+#define smp_mb__before_atomic()        __smp_mb__before_atomic()
 #endif
 
 #ifndef smp_mb__after_atomic
-#define smp_mb__after_atomic() smp_mb()
+#define smp_mb__after_atomic() __smp_mb__after_atomic()
+#endif
+
+#ifndef smp_store_release
+#define smp_store_release(p, v) __smp_store_release(p, v)
+#endif
+
+#ifndef smp_load_acquire
+#define smp_load_acquire(p) __smp_load_acquire(p)
+#endif
+
+#else  /* !CONFIG_SMP */
+
+#ifndef smp_store_mb
+#define smp_store_mb(var, value)  do { WRITE_ONCE(var, value); barrier(); } while (0)
+#endif
+
+#ifndef smp_mb__before_atomic
+#define smp_mb__before_atomic()        barrier()
+#endif
+
+#ifndef smp_mb__after_atomic
+#define smp_mb__after_atomic() barrier()
 #endif
 
 #define smp_store_release(p, v)                                                \
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
-       smp_mb();                                                       \
-       ACCESS_ONCE(*p) = (v);                                          \
+       barrier();                                                      \
+       WRITE_ONCE(*p, v);                                              \
 } while (0)
 
 #define smp_load_acquire(p)                                            \
 ({                                                                     \
        typeof(*p) ___p1 = ACCESS_ONCE(*p);                             \
        compiletime_assert_atomic_type(*p);                             \
-       smp_mb();                                                       \
+       barrier();                                                      \
        ___p1;                                                          \
 })
 
+#endif
+
 #endif /* !__ASSEMBLY__ */
 #endif /* __ASM_GENERIC_BARRIER_H */