#include <asm/barrier.h>
 #include <asm/smp.h>
 
-#define atomic_read(v)  READ_ONCE((v)->counter)
+#define arch_atomic_read(v)  READ_ONCE((v)->counter)
 
 #ifdef CONFIG_ARC_HAS_LLSC
 
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
 
 #define ATOMIC_OP(op, c_op, asm_op)                                    \
-static inline void atomic_##op(int i, atomic_t *v)                     \
+static inline void arch_atomic_##op(int i, atomic_t *v)                        \
 {                                                                      \
        unsigned int val;                                               \
                                                                        \
 }                                                                      \
 
 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                             \
-static inline int atomic_##op##_return(int i, atomic_t *v)             \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v)                \
 {                                                                      \
        unsigned int val;                                               \
                                                                        \
 }
 
 #define ATOMIC_FETCH_OP(op, c_op, asm_op)                              \
-static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v)           \
 {                                                                      \
        unsigned int val, orig;                                         \
                                                                        \
 #ifndef CONFIG_SMP
 
  /* violating atomic_xxx API locking protocol in UP for optimization sake */
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
 
 #else
 
-static inline void atomic_set(atomic_t *v, int i)
+static inline void arch_atomic_set(atomic_t *v, int i)
 {
        /*
         * Independent of hardware support, all of the atomic_xxx() APIs need
        atomic_ops_unlock(flags);
 }
 
-#define atomic_set_release(v, i)       atomic_set((v), (i))
+#define arch_atomic_set_release(v, i)  arch_atomic_set((v), (i))
 
 #endif
 
  */
 
 #define ATOMIC_OP(op, c_op, asm_op)                                    \
-static inline void atomic_##op(int i, atomic_t *v)                     \
+static inline void arch_atomic_##op(int i, atomic_t *v)                        \
 {                                                                      \
        unsigned long flags;                                            \
                                                                        \
 }
 
 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                             \
-static inline int atomic_##op##_return(int i, atomic_t *v)             \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v)                \
 {                                                                      \
        unsigned long flags;                                            \
        unsigned long temp;                                             \
 }
 
 #define ATOMIC_FETCH_OP(op, c_op, asm_op)                              \
-static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v)           \
 {                                                                      \
        unsigned long flags;                                            \
        unsigned long orig;                                             \
 ATOMIC_OPS(add, +=, add)
 ATOMIC_OPS(sub, -=, sub)
 
-#define atomic_andnot          atomic_andnot
-#define atomic_fetch_andnot    atomic_fetch_andnot
-
 #undef ATOMIC_OPS
 #define ATOMIC_OPS(op, c_op, asm_op)                                   \
        ATOMIC_OP(op, c_op, asm_op)                                     \
 ATOMIC_OPS(or, |=, or)
 ATOMIC_OPS(xor, ^=, xor)
 
+#define arch_atomic_andnot             arch_atomic_andnot
+#define arch_atomic_fetch_andnot       arch_atomic_fetch_andnot
+
 #undef ATOMIC_OPS
 #undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 
 #define ATOMIC64_INIT(a) { (a) }
 
-static inline s64 atomic64_read(const atomic64_t *v)
+static inline s64 arch_atomic64_read(const atomic64_t *v)
 {
        s64 val;
 
        return val;
 }
 
-static inline void atomic64_set(atomic64_t *v, s64 a)
+static inline void arch_atomic64_set(atomic64_t *v, s64 a)
 {
        /*
         * This could have been a simple assignment in "C" but would need
 }
 
 #define ATOMIC64_OP(op, op1, op2)                                      \
-static inline void atomic64_##op(s64 a, atomic64_t *v)                 \
+static inline void arch_atomic64_##op(s64 a, atomic64_t *v)            \
 {                                                                      \
        s64 val;                                                        \
                                                                        \
 }                                                                      \
 
 #define ATOMIC64_OP_RETURN(op, op1, op2)                               \
-static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v)         \
+static inline s64 arch_atomic64_##op##_return(s64 a, atomic64_t *v)    \
 {                                                                      \
        s64 val;                                                        \
                                                                        \
 }
 
 #define ATOMIC64_FETCH_OP(op, op1, op2)                                        \
-static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v)            \
+static inline s64 arch_atomic64_fetch_##op(s64 a, atomic64_t *v)       \
 {                                                                      \
        s64 val, orig;                                                  \
                                                                        \
        ATOMIC64_OP_RETURN(op, op1, op2)                                \
        ATOMIC64_FETCH_OP(op, op1, op2)
 
-#define atomic64_andnot                atomic64_andnot
-#define atomic64_fetch_andnot  atomic64_fetch_andnot
-
 ATOMIC64_OPS(add, add.f, adc)
 ATOMIC64_OPS(sub, sub.f, sbc)
 ATOMIC64_OPS(and, and, and)
 ATOMIC64_OPS(or, or, or)
 ATOMIC64_OPS(xor, xor, xor)
 
+#define arch_atomic64_andnot           arch_atomic64_andnot
+#define arch_atomic64_fetch_andnot     arch_atomic64_fetch_andnot
+
 #undef ATOMIC64_OPS
 #undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
 
 static inline s64
-atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
+arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
 {
        s64 prev;
 
        return prev;
 }
 
-static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new)
+static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
 {
        s64 prev;
 
 }
 
 /**
- * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
  * @v: pointer of type atomic64_t
  *
  * The function returns the old value of *v minus 1, even if
  * the atomic variable, v, was not decremented.
  */
 
-static inline s64 atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
 {
        s64 val;
 
 
        return val;
 }
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
 
 /**
- * atomic64_fetch_add_unless - add unless the number is a given value
+ * arch_atomic64_fetch_add_unless - add unless the number is a given value
  * @v: pointer of type atomic64_t
  * @a: the amount to add to v...
  * @u: ...unless v is equal to u.
  * Atomically adds @a to @v, if it was not @u.
  * Returns the old value of @v
  */
-static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
 {
        s64 old, temp;
 
 
        return old;
 }
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
 
 #endif /* !CONFIG_GENERIC_ATOMIC64 */
 
 
 
 #endif
 
-#define cmpxchg(ptr, o, n) ({                          \
+#define arch_cmpxchg(ptr, o, n) ({                     \
        (typeof(*(ptr)))__cmpxchg((ptr),                \
                                  (unsigned long)(o),   \
                                  (unsigned long)(n));  \
  *  !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
  *         semantics, and this lock also happens to be used by atomic_*()
  */
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
 
 
 /*
 
 #if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
 
-#define xchg(ptr, with)                        \
+#define arch_xchg(ptr, with)           \
 ({                                     \
        unsigned long flags;            \
        typeof(*(ptr)) old_val;         \
 
 #else
 
-#define xchg(ptr, with)  _xchg(ptr, with)
+#define arch_xchg(ptr, with)  _xchg(ptr, with)
 
 #endif
 
  *         can't be clobbered by others. Thus no serialization required when
  *         atomic_xchg is involved.
  */
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
 
 #endif