From: Allen Pais Date: Wed, 14 Jun 2017 07:37:36 +0000 (+0530) Subject: arch/sparc: Enable queued spinlock support for SPARC X-Git-Tag: v4.1.12-104.0.20170618_1145~26 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=2fef41cdc75563dcce211ad1bd6ec4035d3a8ad5;p=users%2Fjedix%2Flinux-maple.git arch/sparc: Enable queued spinlock support for SPARC This patch makes the necessary changes in SPARC architecture to enable queued spinlock support. Here are some of the earlier discussions about this feature. https://lwn.net/Articles/561775/ https://lwn.net/Articles/590243/ Cleaned-up the spinlock_64.h. The definitions of arch_spin_xxx are replaced by the function in Signed-off-by: Babu Moger Reviewed-by: HÃ¥kon Bugge Reviewed-by: Jane Chu Reviewed-by: Shannon Nelson Reviewed-by: Vijay Kumar Signed-off-by: David S. Miller (cherry picked from commit 145d978585977438ebb55079487827006c604e39) Conflicts: arch/sparc/include/asm/spinlock_64.h Orabug: 26183741 Signed-off-by: Allen Pais --- diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 5acc81b65cc8a..d7066e6141c0e 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -84,6 +84,7 @@ config SPARC64 select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_CLOCKSOURCE_DATA select ARCH_USE_QUEUED_RWLOCKS + select ARCH_USE_QUEUED_SPINLOCKS config ARCH_DMA_ADDR_T_64BIT bool diff --git a/arch/sparc/include/asm/qspinlock.h b/arch/sparc/include/asm/qspinlock.h new file mode 100644 index 0000000000000..5ae9a28028464 --- /dev/null +++ b/arch/sparc/include/asm/qspinlock.h @@ -0,0 +1,7 @@ +#ifndef _ASM_SPARC_QSPINLOCK_H +#define _ASM_SPARC_QSPINLOCK_H + +#include +#include + +#endif /* _ASM_SPARC_QSPINLOCK_H */ diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 9e5d5e10fccb5..dbc970cc12445 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -10,87 +10,7 @@ #include -/* To get debugging spinlocks which detect and catch - * deadlock situations, set CONFIG_DEBUG_SPINLOCK - * and rebuild your kernel. - */ - -/* Because we play games to save cycles in the non-contention case, we - * need to be extra careful about branch targets into the "spinning" - * code. They live in their own section, but the newer V9 branches - * have a shorter range than the traditional 32-bit sparc branch - * variants. The rule is that the branches that go into and out of - * the spinner sections must be pre-V9 branches. - */ - -#define arch_spin_is_locked(lp) ((lp)->lock != 0) - -#define arch_spin_unlock_wait(lp) \ - do { rmb(); \ - } while((lp)->lock) - -static inline void arch_spin_lock(arch_spinlock_t *lock) -{ - unsigned long tmp; - - __asm__ __volatile__( -"1: ldstub [%1], %0\n" -" brnz,pn %0, 2f\n" -" nop\n" -" .subsection 2\n" -"2: ldub [%1], %0\n" -" brnz,pt %0, 2b\n" -" nop\n" -" ba,a,pt %%xcc, 1b\n" -" .previous" - : "=&r" (tmp) - : "r" (lock) - : "memory"); -} - -static inline int arch_spin_trylock(arch_spinlock_t *lock) -{ - unsigned long result; - - __asm__ __volatile__( -" ldstub [%1], %0\n" - : "=r" (result) - : "r" (lock) - : "memory"); - - return (result == 0UL); -} - -static inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - __asm__ __volatile__( -" stb %%g0, [%0]" - : /* No outputs */ - : "r" (lock) - : "memory"); -} - -static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) -{ - unsigned long tmp1, tmp2; - - __asm__ __volatile__( -"1: ldstub [%2], %0\n" -" brnz,pn %0, 2f\n" -" nop\n" -" .subsection 2\n" -"2: rdpr %%pil, %1\n" -" wrpr %3, %%pil\n" -"3: ldub [%2], %0\n" -" brnz,pt %0, 3b\n" -" nop\n" -" ba,pt %%xcc, 1b\n" -" wrpr %1, %%pil\n" -" .previous" - : "=&r" (tmp1), "=&r" (tmp2) - : "r"(lock), "r"(flags) - : "memory"); -} +#include #define arch_read_lock_flags(p, f) arch_read_lock(p) #define arch_write_lock_flags(p, f) arch_write_lock(p) diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h index 4e0dbed626135..2164729cb9077 100644 --- a/arch/sparc/include/asm/spinlock_types.h +++ b/arch/sparc/include/asm/spinlock_types.h @@ -1,11 +1,16 @@ #ifndef __SPARC_SPINLOCK_TYPES_H #define __SPARC_SPINLOCK_TYPES_H +#ifdef CONFIG_QUEUED_SPINLOCKS +#include +#else + typedef struct { volatile unsigned char lock; } arch_spinlock_t; #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } +#endif /*CONFIG_QUEUED_SPINLOCKS */ #ifdef CONFIG_QUEUED_RWLOCKS #include