*/
 #define atomic_inc_and_test(v)         (atomic_inc_return(v) == 0)
 
+/**
+ * atomic_xchg - atomically exchange contents of memory with a new value
+ * @v: pointer of type atomic_t
+ * @i: integer value to store in memory
+ *
+ * Atomically sets @v to @i and returns old @v
+ */
+static inline int atomic_xchg(atomic_t *v, int n)
+{
+       return xchg(&v->counter, n);
+}
+
+/**
+ * atomic_cmpxchg - atomically exchange contents of memory if it matches
+ * @v: pointer of type atomic_t
+ * @o: old value that memory should have
+ * @n: new value to write to memory if it matches
+ *
+ * Atomically checks if @v holds @o and replaces it with @n if so.
+ * Returns the old value at @v.
+ */
+static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
+{
+       return cmpxchg(&v->counter, o, n);
+}
+
 /**
  * atomic_add_negative - add and test if negative
  * @v: pointer of type atomic_t
 
 #ifndef __ASSEMBLY__
 
+/**
+ * atomic64_xchg - atomically exchange contents of memory with a new value
+ * @v: pointer of type atomic64_t
+ * @i: integer value to store in memory
+ *
+ * Atomically sets @v to @i and returns old @v
+ */
+static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
+{
+       return xchg64(&v->counter, n);
+}
+
+/**
+ * atomic64_cmpxchg - atomically exchange contents of memory if it matches
+ * @v: pointer of type atomic64_t
+ * @o: old value that memory should have
+ * @n: new value to write to memory if it matches
+ *
+ * Atomically checks if @v holds @o and replaces it with @n if so.
+ * Returns the old value at @v.
+ */
+static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
+{
+       return cmpxchg64(&v->counter, o, n);
+}
+
 static inline long long atomic64_dec_if_positive(atomic64_t *v)
 {
        long long c, old, dec;
 
 
 #ifndef __ASSEMBLY__
 
-/* Tile-specific routines to support <linux/atomic.h>. */
-int _atomic_xchg(atomic_t *v, int n);
-int _atomic_xchg_add(atomic_t *v, int i);
-int _atomic_xchg_add_unless(atomic_t *v, int a, int u);
-int _atomic_cmpxchg(atomic_t *v, int o, int n);
-
-/**
- * atomic_xchg - atomically exchange contents of memory with a new value
- * @v: pointer of type atomic_t
- * @i: integer value to store in memory
- *
- * Atomically sets @v to @i and returns old @v
- */
-static inline int atomic_xchg(atomic_t *v, int n)
-{
-       smp_mb();  /* barrier for proper semantics */
-       return _atomic_xchg(v, n);
-}
-
-/**
- * atomic_cmpxchg - atomically exchange contents of memory if it matches
- * @v: pointer of type atomic_t
- * @o: old value that memory should have
- * @n: new value to write to memory if it matches
- *
- * Atomically checks if @v holds @o and replaces it with @n if so.
- * Returns the old value at @v.
- */
-static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
-{
-       smp_mb();  /* barrier for proper semantics */
-       return _atomic_cmpxchg(v, o, n);
-}
-
 /**
  * atomic_add - add integer to atomic variable
  * @i: integer value to add
  */
 static inline void atomic_add(int i, atomic_t *v)
 {
-       _atomic_xchg_add(v, i);
+       _atomic_xchg_add(&v->counter, i);
 }
 
 /**
 static inline int atomic_add_return(int i, atomic_t *v)
 {
        smp_mb();  /* barrier for proper semantics */
-       return _atomic_xchg_add(v, i) + i;
+       return _atomic_xchg_add(&v->counter, i) + i;
 }
 
 /**
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
        smp_mb();  /* barrier for proper semantics */
-       return _atomic_xchg_add_unless(v, a, u);
+       return _atomic_xchg_add_unless(&v->counter, a, u);
 }
 
 /**
  */
 static inline void atomic_set(atomic_t *v, int n)
 {
-       _atomic_xchg(v, n);
+       _atomic_xchg(&v->counter, n);
 }
 
 /* A 64bit atomic type */
 
 #define ATOMIC64_INIT(val) { (val) }
 
-u64 _atomic64_xchg(atomic64_t *v, u64 n);
-u64 _atomic64_xchg_add(atomic64_t *v, u64 i);
-u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u);
-u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n);
-
 /**
  * atomic64_read - read atomic variable
  * @v: pointer of type atomic64_t
         * Casting away const is safe since the atomic support routines
         * do not write to memory if the value has not been modified.
         */
-       return _atomic64_xchg_add((atomic64_t *)v, 0);
-}
-
-/**
- * atomic64_xchg - atomically exchange contents of memory with a new value
- * @v: pointer of type atomic64_t
- * @i: integer value to store in memory
- *
- * Atomically sets @v to @i and returns old @v
- */
-static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
-{
-       smp_mb();  /* barrier for proper semantics */
-       return _atomic64_xchg(v, n);
-}
-
-/**
- * atomic64_cmpxchg - atomically exchange contents of memory if it matches
- * @v: pointer of type atomic64_t
- * @o: old value that memory should have
- * @n: new value to write to memory if it matches
- *
- * Atomically checks if @v holds @o and replaces it with @n if so.
- * Returns the old value at @v.
- */
-static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
-{
-       smp_mb();  /* barrier for proper semantics */
-       return _atomic64_cmpxchg(v, o, n);
+       return _atomic64_xchg_add((u64 *)&v->counter, 0);
 }
 
 /**
  */
 static inline void atomic64_add(u64 i, atomic64_t *v)
 {
-       _atomic64_xchg_add(v, i);
+       _atomic64_xchg_add(&v->counter, i);
 }
 
 /**
 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
 {
        smp_mb();  /* barrier for proper semantics */
-       return _atomic64_xchg_add(v, i) + i;
+       return _atomic64_xchg_add(&v->counter, i) + i;
 }
 
 /**
 static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
 {
        smp_mb();  /* barrier for proper semantics */
-       return _atomic64_xchg_add_unless(v, a, u) != u;
+       return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
 }
 
 /**
  */
 static inline void atomic64_set(atomic64_t *v, u64 n)
 {
-       _atomic64_xchg(v, n);
+       _atomic64_xchg(&v->counter, n);
 }
 
 #define atomic64_add_negative(a, v)    (atomic64_add_return((a), (v)) < 0)
 
  * on any routine which updates memory and returns a value.
  */
 
-static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
-{
-       int val;
-       __insn_mtspr(SPR_CMPEXCH_VALUE, o);
-       smp_mb();  /* barrier for proper semantics */
-       val = __insn_cmpexch4((void *)&v->counter, n);
-       smp_mb();  /* barrier for proper semantics */
-       return val;
-}
-
-static inline int atomic_xchg(atomic_t *v, int n)
-{
-       int val;
-       smp_mb();  /* barrier for proper semantics */
-       val = __insn_exch4((void *)&v->counter, n);
-       smp_mb();  /* barrier for proper semantics */
-       return val;
-}
-
 static inline void atomic_add(int i, atomic_t *v)
 {
        __insn_fetchadd4((void *)&v->counter, i);
                if (oldval == u)
                        break;
                guess = oldval;
-               oldval = atomic_cmpxchg(v, guess, guess + a);
+               oldval = cmpxchg(&v->counter, guess, guess + a);
        } while (guess != oldval);
        return oldval;
 }
 #define atomic64_read(v)               ((v)->counter)
 #define atomic64_set(v, i) ((v)->counter = (i))
 
-static inline long atomic64_cmpxchg(atomic64_t *v, long o, long n)
-{
-       long val;
-       smp_mb();  /* barrier for proper semantics */
-       __insn_mtspr(SPR_CMPEXCH_VALUE, o);
-       val = __insn_cmpexch((void *)&v->counter, n);
-       smp_mb();  /* barrier for proper semantics */
-       return val;
-}
-
-static inline long atomic64_xchg(atomic64_t *v, long n)
-{
-       long val;
-       smp_mb();  /* barrier for proper semantics */
-       val = __insn_exch((void *)&v->counter, n);
-       smp_mb();  /* barrier for proper semantics */
-       return val;
-}
-
 static inline void atomic64_add(long i, atomic64_t *v)
 {
        __insn_fetchadd((void *)&v->counter, i);
                if (oldval == u)
                        break;
                guess = oldval;
-               oldval = atomic64_cmpxchg(v, guess, guess + a);
+               oldval = cmpxchg(&v->counter, guess, guess + a);
        } while (guess != oldval);
        return oldval != u;
 }
 
 #define _ASM_TILE_BITOPS_32_H
 
 #include <linux/compiler.h>
-#include <linux/atomic.h>
+#include <asm/barrier.h>
 
 /* Tile-specific routines to support <asm/bitops.h>. */
 unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask);
 
 #define _ASM_TILE_BITOPS_64_H
 
 #include <linux/compiler.h>
-#include <linux/atomic.h>
+#include <asm/cmpxchg.h>
 
 /* See <asm/bitops.h> for API comments. */
 
        oldval = *addr;
        do {
                guess = oldval;
-               oldval = atomic64_cmpxchg((atomic64_t *)addr,
-                                         guess, guess ^ mask);
+               oldval = cmpxchg(addr, guess, guess ^ mask);
        } while (guess != oldval);
 }
 
        oldval = *addr;
        do {
                guess = oldval;
-               oldval = atomic64_cmpxchg((atomic64_t *)addr,
-                                         guess, guess ^ mask);
+               oldval = cmpxchg(addr, guess, guess ^ mask);
        } while (guess != oldval);
        return (oldval & mask) != 0;
 }
 
 
 #ifndef __ASSEMBLY__
 
-/* Nonexistent functions intended to cause link errors. */
-extern unsigned long __xchg_called_with_bad_pointer(void);
-extern unsigned long __cmpxchg_called_with_bad_pointer(void);
+#include <asm/barrier.h>
 
-#define xchg(ptr, x)                                                   \
+/* Nonexistent functions intended to cause compile errors. */
+extern void __xchg_called_with_bad_pointer(void)
+       __compiletime_error("Bad argument size for xchg");
+extern void __cmpxchg_called_with_bad_pointer(void)
+       __compiletime_error("Bad argument size for cmpxchg");
+
+#ifndef __tilegx__
+
+/* Note the _atomic_xxx() routines include a final mb(). */
+int _atomic_xchg(int *ptr, int n);
+int _atomic_xchg_add(int *v, int i);
+int _atomic_xchg_add_unless(int *v, int a, int u);
+int _atomic_cmpxchg(int *ptr, int o, int n);
+u64 _atomic64_xchg(u64 *v, u64 n);
+u64 _atomic64_xchg_add(u64 *v, u64 i);
+u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u);
+u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
+
+#define xchg(ptr, n)                                                   \
+       ({                                                              \
+               if (sizeof(*(ptr)) != 4)                                \
+                       __xchg_called_with_bad_pointer();               \
+               smp_mb();                                               \
+               (typeof(*(ptr)))_atomic_xchg((int *)(ptr), (int)(n));   \
+       })
+
+#define cmpxchg(ptr, o, n)                                             \
+       ({                                                              \
+               if (sizeof(*(ptr)) != 4)                                \
+                       __cmpxchg_called_with_bad_pointer();            \
+               smp_mb();                                               \
+               (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \
+       })
+
+#define xchg64(ptr, n)                                                 \
+       ({                                                              \
+               if (sizeof(*(ptr)) != 8)                                \
+                       __xchg_called_with_bad_pointer();               \
+               smp_mb();                                               \
+               (typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \
+       })
+
+#define cmpxchg64(ptr, o, n)                                           \
+       ({                                                              \
+               if (sizeof(*(ptr)) != 8)                                \
+                       __cmpxchg_called_with_bad_pointer();            \
+               smp_mb();                                               \
+               (typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \
+       })
+
+#else
+
+#define xchg(ptr, n)                                                   \
        ({                                                              \
                typeof(*(ptr)) __x;                                     \
+               smp_mb();                                               \
                switch (sizeof(*(ptr))) {                               \
                case 4:                                                 \
-                       __x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \
-                               (atomic_t *)(ptr),                      \
-                               (u32)(typeof((x)-(x)))(x));             \
+                       __x = (typeof(__x))(unsigned long)              \
+                               __insn_exch4((ptr), (u32)(unsigned long)(n)); \
                        break;                                          \
                case 8:                                                 \
-                       __x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \
-                               (atomic64_t *)(ptr),                    \
-                               (u64)(typeof((x)-(x)))(x));             \
+                       __x = (typeof(__x))                     \
+                               __insn_exch((ptr), (unsigned long)(n)); \
                        break;                                          \
                default:                                                \
                        __xchg_called_with_bad_pointer();               \
+                       break;                                          \
                }                                                       \
+               smp_mb();                                               \
                __x;                                                    \
        })
 
 #define cmpxchg(ptr, o, n)                                             \
        ({                                                              \
                typeof(*(ptr)) __x;                                     \
+               __insn_mtspr(SPR_CMPEXCH_VALUE, (unsigned long)(o));    \
+               smp_mb();                                               \
                switch (sizeof(*(ptr))) {                               \
                case 4:                                                 \
-                       __x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \
-                               (atomic_t *)(ptr),                      \
-                               (u32)(typeof((o)-(o)))(o),              \
-                               (u32)(typeof((n)-(n)))(n));             \
+                       __x = (typeof(__x))(unsigned long)              \
+                               __insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \
                        break;                                          \
                case 8:                                                 \
-                       __x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \
-                               (atomic64_t *)(ptr),                    \
-                               (u64)(typeof((o)-(o)))(o),              \
-                               (u64)(typeof((n)-(n)))(n));             \
+                       __x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \
                        break;                                          \
                default:                                                \
                        __cmpxchg_called_with_bad_pointer();            \
+                       break;                                          \
                }                                                       \
+               smp_mb();                                               \
                __x;                                                    \
        })
 
-#define tas(ptr) (xchg((ptr), 1))
+#define xchg64 xchg
+#define cmpxchg64 cmpxchg
 
-#define cmpxchg64(ptr, o, n)                                           \
-({                                                                     \
-       BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
-       cmpxchg((ptr), (o), (n));                                       \
-})
+#endif
+
+#define tas(ptr) xchg((ptr), 1)
 
 #endif /* __ASSEMBLY__ */
 
 
        return __atomic_hashed_lock(v);
 }
 
-int _atomic_xchg(atomic_t *v, int n)
+int _atomic_xchg(int *v, int n)
 {
-       return __atomic_xchg(&v->counter, __atomic_setup(v), n).val;
+       return __atomic_xchg(v, __atomic_setup(v), n).val;
 }
 EXPORT_SYMBOL(_atomic_xchg);
 
-int _atomic_xchg_add(atomic_t *v, int i)
+int _atomic_xchg_add(int *v, int i)
 {
-       return __atomic_xchg_add(&v->counter, __atomic_setup(v), i).val;
+       return __atomic_xchg_add(v, __atomic_setup(v), i).val;
 }
 EXPORT_SYMBOL(_atomic_xchg_add);
 
-int _atomic_xchg_add_unless(atomic_t *v, int a, int u)
+int _atomic_xchg_add_unless(int *v, int a, int u)
 {
        /*
         * Note: argument order is switched here since it is easier
         * to use the first argument consistently as the "old value"
         * in the assembly, as is done for _atomic_cmpxchg().
         */
-       return __atomic_xchg_add_unless(&v->counter, __atomic_setup(v), u, a)
-               .val;
+       return __atomic_xchg_add_unless(v, __atomic_setup(v), u, a).val;
 }
 EXPORT_SYMBOL(_atomic_xchg_add_unless);
 
-int _atomic_cmpxchg(atomic_t *v, int o, int n)
+int _atomic_cmpxchg(int *v, int o, int n)
 {
-       return __atomic_cmpxchg(&v->counter, __atomic_setup(v), o, n).val;
+       return __atomic_cmpxchg(v, __atomic_setup(v), o, n).val;
 }
 EXPORT_SYMBOL(_atomic_cmpxchg);
 
 EXPORT_SYMBOL(_atomic_xor);
 
 
-u64 _atomic64_xchg(atomic64_t *v, u64 n)
+u64 _atomic64_xchg(u64 *v, u64 n)
 {
-       return __atomic64_xchg(&v->counter, __atomic_setup(v), n);
+       return __atomic64_xchg(v, __atomic_setup(v), n);
 }
 EXPORT_SYMBOL(_atomic64_xchg);
 
-u64 _atomic64_xchg_add(atomic64_t *v, u64 i)
+u64 _atomic64_xchg_add(u64 *v, u64 i)
 {
-       return __atomic64_xchg_add(&v->counter, __atomic_setup(v), i);
+       return __atomic64_xchg_add(v, __atomic_setup(v), i);
 }
 EXPORT_SYMBOL(_atomic64_xchg_add);
 
-u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u)
+u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
 {
        /*
         * Note: argument order is switched here since it is easier
         * to use the first argument consistently as the "old value"
         * in the assembly, as is done for _atomic_cmpxchg().
         */
-       return __atomic64_xchg_add_unless(&v->counter, __atomic_setup(v),
-                                         u, a);
+       return __atomic64_xchg_add_unless(v, __atomic_setup(v), u, a);
 }
 EXPORT_SYMBOL(_atomic64_xchg_add_unless);
 
-u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
+u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n)
 {
-       return __atomic64_cmpxchg(&v->counter, __atomic_setup(v), o, n);
+       return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
 }
 EXPORT_SYMBOL(_atomic64_cmpxchg);