From: Martin Schwidefsky <schwidefsky@de.ibm.com>
Date: Fri, 11 Nov 2016 15:22:47 +0000 (+0100)
Subject: s390/bitops: use atomic primitives for bitops
X-Git-Tag: v4.10-rc1~136^2~45
X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=1993dbc7e09a7ffdd98975589aa83387928e9fe7;p=users%2Fhch%2Fblock.git

s390/bitops: use atomic primitives for bitops

Replace the bitops specific atomic update code by the functions
from atomic_ops.h. This saves a few lines of non-trivial code.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
---

diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 8043f10da6b5..d92047da5ccb 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -42,57 +42,9 @@
 
 #include <linux/typecheck.h>
 #include <linux/compiler.h>
+#include <asm/atomic_ops.h>
 #include <asm/barrier.h>
 
-#define __BITOPS_NO_BARRIER	"\n"
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __BITOPS_OR		"laog"
-#define __BITOPS_AND		"lang"
-#define __BITOPS_XOR		"laxg"
-#define __BITOPS_BARRIER	"bcr	14,0\n"
-
-#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier)	\
-({								\
-	unsigned long __old;					\
-								\
-	typecheck(unsigned long *, (__addr));			\
-	asm volatile(						\
-		__op_string "	%0,%2,%1\n"			\
-		__barrier					\
-		: "=d" (__old),	"+Q" (*(__addr))		\
-		: "d" (__val)					\
-		: "cc", "memory");				\
-	__old;							\
-})
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-#define __BITOPS_OR		"ogr"
-#define __BITOPS_AND		"ngr"
-#define __BITOPS_XOR		"xgr"
-#define __BITOPS_BARRIER	"\n"
-
-#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier)	\
-({								\
-	unsigned long __old, __new;				\
-								\
-	typecheck(unsigned long *, (__addr));			\
-	asm volatile(						\
-		"	lg	%0,%2\n"			\
-		"0:	lgr	%1,%0\n"			\
-		__op_string "	%1,%3\n"			\
-		"	csg	%0,%1,%2\n"			\
-		"	jl	0b"				\
-		: "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
-		: "d" (__val)					\
-		: "cc", "memory");				\
-	__old;							\
-})
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
 #define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
 
 static inline unsigned long *
@@ -128,7 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
 	}
 #endif
 	mask = 1UL << (nr & (BITS_PER_LONG - 1));
-	__BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER);
+	__atomic64_or(mask, addr);
 }
 
 static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
@@ -149,7 +101,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
 	}
 #endif
 	mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
-	__BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER);
+	__atomic64_and(mask, addr);
 }
 
 static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
@@ -170,7 +122,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
 	}
 #endif
 	mask = 1UL << (nr & (BITS_PER_LONG - 1));
-	__BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER);
+	__atomic64_xor(mask, addr);
 }
 
 static inline int
@@ -180,7 +132,7 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
 	unsigned long old, mask;
 
 	mask = 1UL << (nr & (BITS_PER_LONG - 1));
-	old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER);
+	old = __atomic64_or_barrier(mask, addr);
 	return (old & mask) != 0;
 }
 
@@ -191,7 +143,7 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
 	unsigned long old, mask;
 
 	mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
-	old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER);
+	old = __atomic64_and_barrier(mask, addr);
 	return (old & ~mask) != 0;
 }
 
@@ -202,7 +154,7 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
 	unsigned long old, mask;
 
 	mask = 1UL << (nr & (BITS_PER_LONG - 1));
-	old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER);
+	old = __atomic64_xor_barrier(mask, addr);
 	return (old & mask) != 0;
 }