]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86-32: fix cmpxchg8b_emu build error with clang
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 26 Jun 2024 00:50:04 +0000 (17:50 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 30 Jun 2024 16:21:29 +0000 (09:21 -0700)
The kernel test robot reported that clang no longer compiles the 32-bit
x86 kernel in some configurations due to commit 95ece48165c1
("locking/atomic/x86: Rewrite x86_32 arch_atomic64_{,fetch}_{and,or,xor}()
functions").

The build fails with

  arch/x86/include/asm/cmpxchg_32.h:149:9: error: inline assembly requires more registers than available

and the reason seems to be that not only does the cmpxchg8b instruction
need four fixed registers (EDX:EAX and ECX:EBX), with the emulation
fallback the inline asm also wants a fifth fixed register for the
address (it uses %esi for that, but that's just a software convention
with cmpxchg8b_emu).

Avoiding using another pointer input to the asm (and just forcing it to
use the "0(%esi)" addressing that we end up requiring for the sw
fallback) seems to fix the issue.

Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202406230912.F6XFIyA6-lkp@intel.com/
Fixes: 95ece48165c1 ("locking/atomic/x86: Rewrite x86_32 arch_atomic64_{,fetch}_{and,or,xor}() functions")
Link: https://lore.kernel.org/all/202406230912.F6XFIyA6-lkp@intel.com/
Suggested-by: Uros Bizjak <ubizjak@gmail.com>
Reviewed-and-Tested-by: Uros Bizjak <ubizjak@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/x86/include/asm/cmpxchg_32.h

index ed2797f132ce09f39837588141cabde041c01f1f..62cef2113ca749ad933ee312b637a5b3f6fb4f76 100644 (file)
@@ -93,10 +93,9 @@ static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp,
                                                                        \
        asm volatile(ALTERNATIVE(_lock_loc                              \
                                 "call cmpxchg8b_emu",                  \
-                                _lock "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
-                    : [ptr] "+m" (*(_ptr)),                            \
-                      "+a" (o.low), "+d" (o.high)                      \
-                    : "b" (n.low), "c" (n.high), "S" (_ptr)            \
+                                _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
+                    : "+a" (o.low), "+d" (o.high)                      \
+                    : "b" (n.low), "c" (n.high), [ptr] "S" (_ptr)      \
                     : "memory");                                       \
                                                                        \
        o.full;                                                         \
@@ -122,12 +121,11 @@ static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64
                                                                        \
        asm volatile(ALTERNATIVE(_lock_loc                              \
                                 "call cmpxchg8b_emu",                  \
-                                _lock "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
+                                _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
                     CC_SET(e)                                          \
                     : CC_OUT(e) (ret),                                 \
-                      [ptr] "+m" (*(_ptr)),                            \
                       "+a" (o.low), "+d" (o.high)                      \
-                    : "b" (n.low), "c" (n.high), "S" (_ptr)            \
+                    : "b" (n.low), "c" (n.high), [ptr] "S" (_ptr)      \
                     : "memory");                                       \
                                                                        \
        if (unlikely(!ret))                                             \