]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
arch/sparc: Introduce xchg16 for SPARC
authorBabu Moger <babu.moger@oracle.com>
Tue, 30 May 2017 20:59:02 +0000 (13:59 -0700)
committerAllen Pais <allen.pais@oracle.com>
Fri, 16 Jun 2017 08:47:15 +0000 (14:17 +0530)
SPARC supports 32 bit and 64 bit xchg right now. Add the support
for 16 bit (2 byte) xchg. This is required to support queued spinlock
feature which uses 2 byte xchg. This is achieved using 4 byte cas
instructions with byte manipulations.

Also re-arranged the code to call __cmpxchg_u32 inside xchg16.

Signed-off-by: Babu Moger <babu.moger@oracle.com>
Reviewed-by: HÃ¥kon Bugge <haakon.bugge@oracle.com>
Reviewed-by: Steven Sistare <steven.sistare@oracle.com>
Reviewed-by: Shannon Nelson <shannon.nelson@oracle.com>
Reviewed-by: Jane Chu <jane.chu@oracle.com>
Reviewed-by: Vijay Kumar <vijay.ac.kumar@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 79d39e2bab60d18a68a5abc00be4506864397efc)

Conflicts:

arch/sparc/include/asm/cmpxchg_64.h

Orabug: 26183741
Signed-off-by: Allen Pais <allen.pais@oracle.com>
arch/sparc/include/asm/cmpxchg_64.h

index 896a8e7f58677e172bdbf08c2d56cd3c09785882..3190407f74d3e1c1952b6f3b0747b01607258546 100644 (file)
@@ -6,6 +6,17 @@
 #ifndef __ARCH_SPARC64_CMPXCHG__
 #define __ARCH_SPARC64_CMPXCHG__
 
+static inline unsigned long
+__cmpxchg_u32(volatile int *m, int old, int new)
+{
+       __asm__ __volatile__("cas [%2], %3, %0"
+                            : "=&r" (new)
+                            : "0" (new), "r" (m), "r" (old)
+                            : "memory");
+
+       return new;
+}
+
 static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
 {
        unsigned long tmp1, tmp2;
@@ -44,10 +55,38 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
 
 void __xchg_called_with_bad_pointer(void);
 
+/*
+ * Use 4 byte cas instruction to achieve 2 byte xchg. Main logic
+ * here is to get the bit shift of the byte we are interested in.
+ * The XOR is handy for reversing the bits for big-endian byte order.
+ */
+static inline unsigned long
+xchg16(__volatile__ unsigned short *m, unsigned short val)
+{
+       unsigned long maddr = (unsigned long)m;
+       int bit_shift = (((unsigned long)m & 2) ^ 2) << 3;
+       unsigned int mask = 0xffff << bit_shift;
+       unsigned int *ptr = (unsigned int  *) (maddr & ~2);
+       unsigned int old32, new32, load32;
+
+       /* Read the old value */
+       load32 = *ptr;
+
+       do {
+               old32 = load32;
+               new32 = (load32 & (~mask)) | val << bit_shift;
+               load32 = __cmpxchg_u32(ptr, old32, new32);
+       } while (load32 != old32);
+
+       return (load32 & mask) >> bit_shift;
+}
+
 static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
                                       int size)
 {
        switch (size) {
+       case 2:
+               return xchg16(ptr, x);
        case 4:
                return xchg32(ptr, x);
        case 8:
@@ -67,17 +106,6 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
 
 #define __HAVE_ARCH_CMPXCHG 1
 
-static inline unsigned long
-__cmpxchg_u32(volatile int *m, int old, int new)
-{
-       __asm__ __volatile__("cas [%2], %3, %0"
-                            : "=&r" (new)
-                            : "0" (new), "r" (m), "r" (old)
-                            : "memory");
-
-       return new;
-}
-
 static inline unsigned long
 __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
 {