]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
arm64: lse: fix LSE atomics with LLVM's integrated assembler
authorSami Tolvanen <samitolvanen@google.com>
Thu, 31 Oct 2019 19:57:05 +0000 (12:57 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 30 Dec 2020 10:25:43 +0000 (11:25 +0100)
commit e0d5896bd356cd577f9710a02d7a474cdf58426b upstream.

Unlike gcc, clang considers each inline assembly block to be independent
and therefore, when using the integrated assembler for inline assembly,
any preambles that enable features must be repeated in each block.

This change defines __LSE_PREAMBLE and adds it to each inline assembly
block that has LSE instructions, which allows them to be compiled also
with clang's assembler.

Link: https://github.com/ClangBuiltLinux/linux/issues/671
Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
Tested-by: Andrew Murray <andrew.murray@arm.com>
Tested-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Andrew Murray <andrew.murray@arm.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
[nd: backport adjusted due to missing:
  commit addfc38672c7 ("arm64: atomics: avoid out-of-line ll/sc atomics")]
Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm64/include/asm/atomic_lse.h
arch/arm64/include/asm/lse.h

index f9b0b09153e0eaa3b15728fd42471c77c2d1955a..eab3de4f2ad25c87046aeba7a57c4e4cba2f8758 100644 (file)
@@ -32,7 +32,9 @@ static inline void atomic_##op(int i, atomic_t *v)                    \
        register int w0 asm ("w0") = i;                                 \
        register atomic_t *x1 asm ("x1") = v;                           \
                                                                        \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op),          \
+       asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
+       ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op),                       \
 "      " #asm_op "     %w[i], %[v]\n")                                 \
        : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
        : "r" (x1)                                                      \
@@ -52,7 +54,9 @@ static inline int atomic_fetch_##op##name(int i, atomic_t *v)         \
        register int w0 asm ("w0") = i;                                 \
        register atomic_t *x1 asm ("x1") = v;                           \
                                                                        \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
+       ARM64_LSE_ATOMIC_INSN(                                          \
        /* LL/SC */                                                     \
        __LL_SC_ATOMIC(fetch_##op##name),                               \
        /* LSE atomics */                                               \
@@ -84,7 +88,9 @@ static inline int atomic_add_return##name(int i, atomic_t *v)         \
        register int w0 asm ("w0") = i;                                 \
        register atomic_t *x1 asm ("x1") = v;                           \
                                                                        \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
+       ARM64_LSE_ATOMIC_INSN(                                          \
        /* LL/SC */                                                     \
        __LL_SC_ATOMIC(add_return##name)                                \
        __nops(1),                                                      \
@@ -110,7 +116,9 @@ static inline void atomic_and(int i, atomic_t *v)
        register int w0 asm ("w0") = i;
        register atomic_t *x1 asm ("x1") = v;
 
-       asm volatile(ARM64_LSE_ATOMIC_INSN(
+       asm volatile(
+       __LSE_PREAMBLE
+       ARM64_LSE_ATOMIC_INSN(
        /* LL/SC */
        __LL_SC_ATOMIC(and)
        __nops(1),
@@ -128,7 +136,9 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v)                \
        register int w0 asm ("w0") = i;                                 \
        register atomic_t *x1 asm ("x1") = v;                           \
                                                                        \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
+       ARM64_LSE_ATOMIC_INSN(                                          \
        /* LL/SC */                                                     \
        __LL_SC_ATOMIC(fetch_and##name)                                 \
        __nops(1),                                                      \
@@ -154,7 +164,9 @@ static inline void atomic_sub(int i, atomic_t *v)
        register int w0 asm ("w0") = i;
        register atomic_t *x1 asm ("x1") = v;
 
-       asm volatile(ARM64_LSE_ATOMIC_INSN(
+       asm volatile(
+       __LSE_PREAMBLE
+       ARM64_LSE_ATOMIC_INSN(
        /* LL/SC */
        __LL_SC_ATOMIC(sub)
        __nops(1),
@@ -172,7 +184,9 @@ static inline int atomic_sub_return##name(int i, atomic_t *v)               \
        register int w0 asm ("w0") = i;                                 \
        register atomic_t *x1 asm ("x1") = v;                           \
                                                                        \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
+       ARM64_LSE_ATOMIC_INSN(                                          \
        /* LL/SC */                                                     \
        __LL_SC_ATOMIC(sub_return##name)                                \
        __nops(2),                                                      \
@@ -200,7 +214,9 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v)                \
        register int w0 asm ("w0") = i;                                 \
        register atomic_t *x1 asm ("x1") = v;                           \
                                                                        \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
+       ARM64_LSE_ATOMIC_INSN(                                          \
        /* LL/SC */                                                     \
        __LL_SC_ATOMIC(fetch_sub##name)                                 \
        __nops(1),                                                      \
@@ -229,7 +245,9 @@ static inline void atomic64_##op(long i, atomic64_t *v)                     \
        register long x0 asm ("x0") = i;                                \
        register atomic64_t *x1 asm ("x1") = v;                         \
                                                                        \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op),        \
+       asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
+       ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op),                     \
 "      " #asm_op "     %[i], %[v]\n")                                  \
        : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
        : "r" (x1)                                                      \
@@ -249,7 +267,9 @@ static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \
        register long x0 asm ("x0") = i;                                \
        register atomic64_t *x1 asm ("x1") = v;                         \
                                                                        \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
+       ARM64_LSE_ATOMIC_INSN(                                          \
        /* LL/SC */                                                     \
        __LL_SC_ATOMIC64(fetch_##op##name),                             \
        /* LSE atomics */                                               \
@@ -281,7 +301,9 @@ static inline long atomic64_add_return##name(long i, atomic64_t *v) \
        register long x0 asm ("x0") = i;                                \
        register atomic64_t *x1 asm ("x1") = v;                         \
                                                                        \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
+       ARM64_LSE_ATOMIC_INSN(                                          \
        /* LL/SC */                                                     \
        __LL_SC_ATOMIC64(add_return##name)                              \
        __nops(1),                                                      \
@@ -307,7 +329,9 @@ static inline void atomic64_and(long i, atomic64_t *v)
        register long x0 asm ("x0") = i;
        register atomic64_t *x1 asm ("x1") = v;
 
-       asm volatile(ARM64_LSE_ATOMIC_INSN(
+       asm volatile(
+       __LSE_PREAMBLE
+       ARM64_LSE_ATOMIC_INSN(
        /* LL/SC */
        __LL_SC_ATOMIC64(and)
        __nops(1),
@@ -325,7 +349,9 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v)  \
        register long x0 asm ("x0") = i;                                \
        register atomic64_t *x1 asm ("x1") = v;                         \
                                                                        \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
+       ARM64_LSE_ATOMIC_INSN(                                          \
        /* LL/SC */                                                     \
        __LL_SC_ATOMIC64(fetch_and##name)                               \
        __nops(1),                                                      \
@@ -351,7 +377,9 @@ static inline void atomic64_sub(long i, atomic64_t *v)
        register long x0 asm ("x0") = i;
        register atomic64_t *x1 asm ("x1") = v;
 
-       asm volatile(ARM64_LSE_ATOMIC_INSN(
+       asm volatile(
+       __LSE_PREAMBLE
+       ARM64_LSE_ATOMIC_INSN(
        /* LL/SC */
        __LL_SC_ATOMIC64(sub)
        __nops(1),
@@ -369,7 +397,9 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
        register long x0 asm ("x0") = i;                                \
        register atomic64_t *x1 asm ("x1") = v;                         \
                                                                        \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
+       ARM64_LSE_ATOMIC_INSN(                                          \
        /* LL/SC */                                                     \
        __LL_SC_ATOMIC64(sub_return##name)                              \
        __nops(2),                                                      \
@@ -397,7 +427,9 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v)  \
        register long x0 asm ("x0") = i;                                \
        register atomic64_t *x1 asm ("x1") = v;                         \
                                                                        \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
+       ARM64_LSE_ATOMIC_INSN(                                          \
        /* LL/SC */                                                     \
        __LL_SC_ATOMIC64(fetch_sub##name)                               \
        __nops(1),                                                      \
@@ -422,7 +454,9 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
 {
        register long x0 asm ("x0") = (long)v;
 
-       asm volatile(ARM64_LSE_ATOMIC_INSN(
+       asm volatile(
+       __LSE_PREAMBLE
+       ARM64_LSE_ATOMIC_INSN(
        /* LL/SC */
        __LL_SC_ATOMIC64(dec_if_positive)
        __nops(6),
@@ -455,7 +489,9 @@ static inline unsigned long __cmpxchg_case_##name(volatile void *ptr,       \
        register unsigned long x1 asm ("x1") = old;                     \
        register unsigned long x2 asm ("x2") = new;                     \
                                                                        \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
+       ARM64_LSE_ATOMIC_INSN(                                          \
        /* LL/SC */                                                     \
        __LL_SC_CMPXCHG(name)                                           \
        __nops(2),                                                      \
@@ -507,7 +543,9 @@ static inline long __cmpxchg_double##name(unsigned long old1,               \
        register unsigned long x3 asm ("x3") = new2;                    \
        register unsigned long x4 asm ("x4") = (unsigned long)ptr;      \
                                                                        \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
+       ARM64_LSE_ATOMIC_INSN(                                          \
        /* LL/SC */                                                     \
        __LL_SC_CMPXCHG_DBL(name)                                       \
        __nops(3),                                                      \
index 8262325e2fc66ec3d42f83ae6621ea4ec7e19651..960213767f1ec367e1d7f4df2304f63ae79731f5 100644 (file)
@@ -4,6 +4,8 @@
 
 #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
 
+#define __LSE_PREAMBLE ".arch armv8-a+lse\n"
+
 #include <linux/compiler_types.h>
 #include <linux/export.h>
 #include <linux/stringify.h>
@@ -20,8 +22,6 @@
 
 #else  /* __ASSEMBLER__ */
 
-__asm__(".arch_extension       lse");
-
 /* Move the ll/sc atomics out-of-line */
 #define __LL_SC_INLINE         notrace
 #define __LL_SC_PREFIX(x)      __ll_sc_##x
@@ -33,7 +33,7 @@ __asm__(".arch_extension      lse");
 
 /* In-line patching at runtime */
 #define ARM64_LSE_ATOMIC_INSN(llsc, lse)                               \
-       ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
+       ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS)
 
 #endif /* __ASSEMBLER__ */
 #else  /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */