From: Michael S. Tsirkin Date: Sun, 27 Dec 2015 13:04:42 +0000 (+0200) Subject: x86: define __smp_xxx X-Git-Tag: v4.1.12-111.0.20170907_2225~8 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=7b60d5009fb288d4e5351481787b7a9bfdf70300;p=users%2Fjedix%2Flinux-maple.git x86: define __smp_xxx Orabug: 26668113 This defines __smp_xxx barriers for x86, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) Reviewed-by: Thomas Gleixner (cherry picked from commit 1638fb72070f8faf2ac0787fafbb839d0c859d5b) Signed-off-by: Somasundaram Krishnasamy Reviewed-by: Jack Vogel Conflicts: arch/x86/include/asm/barrier.h --- diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index d89d0c52d5041..5c9b03b681a96 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -32,16 +32,14 @@ #define dma_wmb() barrier() #ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() dma_rmb() -#define smp_wmb() barrier() #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) #else /* !SMP */ -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() #define set_mb(var, value) do { var = value; barrier(); } while (0) #endif /* SMP */ +#define __smp_mb() mb() +#define __smp_rmb() dma_rmb() +#define __smp_wmb() barrier() +#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) #if defined(CONFIG_X86_PPRO_FENCE) @@ -50,31 +48,31 @@ * model and we should fall back to full barriers. */ -#define smp_store_release(p, v) \ +#define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - ACCESS_ONCE(*p) = (v); \ + __smp_mb(); \ + WRITE_ONCE(*p, v); \ } while (0) -#define smp_load_acquire(p) \ +#define __smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = ACCESS_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ - smp_mb(); \ + __smp_mb(); \ ___p1; \ }) #else /* regular x86 TSO memory ordering */ -#define smp_store_release(p, v) \ +#define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ ACCESS_ONCE(*p) = (v); \ } while (0) -#define smp_load_acquire(p) \ +#define __smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = ACCESS_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ @@ -85,8 +83,8 @@ do { \ #endif /* Atomic operations are already serializing on x86 */ -#define smp_mb__before_atomic() barrier() -#define smp_mb__after_atomic() barrier() +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() /* * Stop RDTSC speculation. This is needed when you need to use RDTSC