]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86: define __smp_xxx
authorMichael S. Tsirkin <mst@redhat.com>
Sun, 27 Dec 2015 13:04:42 +0000 (15:04 +0200)
committerSomasundaram Krishnasamy <somasundaram.krishnasamy@oracle.com>
Thu, 31 Aug 2017 17:26:20 +0000 (10:26 -0700)
Orabug: 26668113

This defines __smp_xxx barriers for x86,
for use by virtualization.

smp_xxx barriers are removed as they are
defined correctly by asm-generic/barriers.h

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
(cherry picked from commit 1638fb72070f8faf2ac0787fafbb839d0c859d5b)
Signed-off-by: Somasundaram Krishnasamy <somasundaram.krishnasamy@oracle.com>
Reviewed-by: Jack Vogel <jack.vogel@oracle.com>
Conflicts:
arch/x86/include/asm/barrier.h

arch/x86/include/asm/barrier.h

index d89d0c52d50414cf595c7519df0b518341182cf6..5c9b03b681a96d7f4e8f43b2011656320aa4cfd1 100644 (file)
 #define dma_wmb()      barrier()
 
 #ifdef CONFIG_SMP
-#define smp_mb()       mb()
-#define smp_rmb()      dma_rmb()
-#define smp_wmb()      barrier()
 #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
 #else /* !SMP */
-#define smp_mb()       barrier()
-#define smp_rmb()      barrier()
-#define smp_wmb()      barrier()
 #define set_mb(var, value) do { var = value; barrier(); } while (0)
 #endif /* SMP */
+#define __smp_mb()     mb()
+#define __smp_rmb()    dma_rmb()
+#define __smp_wmb()    barrier()
+#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
 
 #if defined(CONFIG_X86_PPRO_FENCE)
 
  * model and we should fall back to full barriers.
  */
 
-#define smp_store_release(p, v)                                                \
+#define __smp_store_release(p, v)                                      \
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
-       smp_mb();                                                       \
-       ACCESS_ONCE(*p) = (v);                                          \
+       __smp_mb();                                                     \
+       WRITE_ONCE(*p, v);                                              \
 } while (0)
 
-#define smp_load_acquire(p)                                            \
+#define __smp_load_acquire(p)                                          \
 ({                                                                     \
        typeof(*p) ___p1 = ACCESS_ONCE(*p);                             \
        compiletime_assert_atomic_type(*p);                             \
-       smp_mb();                                                       \
+       __smp_mb();                                                     \
        ___p1;                                                          \
 })
 
 #else /* regular x86 TSO memory ordering */
 
-#define smp_store_release(p, v)                                                \
+#define __smp_store_release(p, v)                                      \
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
        barrier();                                                      \
        ACCESS_ONCE(*p) = (v);                                          \
 } while (0)
 
-#define smp_load_acquire(p)                                            \
+#define __smp_load_acquire(p)                                          \
 ({                                                                     \
        typeof(*p) ___p1 = ACCESS_ONCE(*p);                             \
        compiletime_assert_atomic_type(*p);                             \
@@ -85,8 +83,8 @@ do {                                                                  \
 #endif
 
 /* Atomic operations are already serializing on x86 */
-#define smp_mb__before_atomic()        barrier()
-#define smp_mb__after_atomic() barrier()
+#define __smp_mb__before_atomic()      barrier()
+#define __smp_mb__after_atomic()       barrier()
 
 /*
  * Stop RDTSC speculation. This is needed when you need to use RDTSC