config SPARC
        bool
        default y
+       select ARCH_ATOMIC
        select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI
        select ARCH_MIGHT_HAVE_PC_SERIO
        select DMA_OPS
 
 #include <asm/barrier.h>
 #include <asm-generic/atomic64.h>
 
-int atomic_add_return(int, atomic_t *);
-int atomic_fetch_add(int, atomic_t *);
-int atomic_fetch_and(int, atomic_t *);
-int atomic_fetch_or(int, atomic_t *);
-int atomic_fetch_xor(int, atomic_t *);
-int atomic_cmpxchg(atomic_t *, int, int);
-int atomic_xchg(atomic_t *, int);
-int atomic_fetch_add_unless(atomic_t *, int, int);
-void atomic_set(atomic_t *, int);
+int arch_atomic_add_return(int, atomic_t *);
+int arch_atomic_fetch_add(int, atomic_t *);
+int arch_atomic_fetch_and(int, atomic_t *);
+int arch_atomic_fetch_or(int, atomic_t *);
+int arch_atomic_fetch_xor(int, atomic_t *);
+int arch_atomic_cmpxchg(atomic_t *, int, int);
+int arch_atomic_xchg(atomic_t *, int);
+int arch_atomic_fetch_add_unless(atomic_t *, int, int);
+void arch_atomic_set(atomic_t *, int);
 
-#define atomic_fetch_add_unless        atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
 
-#define atomic_set_release(v, i)       atomic_set((v), (i))
+#define arch_atomic_set_release(v, i)  arch_atomic_set((v), (i))
 
-#define atomic_read(v)          READ_ONCE((v)->counter)
+#define arch_atomic_read(v)            READ_ONCE((v)->counter)
 
-#define atomic_add(i, v)       ((void)atomic_add_return( (int)(i), (v)))
-#define atomic_sub(i, v)       ((void)atomic_add_return(-(int)(i), (v)))
+#define arch_atomic_add(i, v)  ((void)arch_atomic_add_return( (int)(i), (v)))
+#define arch_atomic_sub(i, v)  ((void)arch_atomic_add_return(-(int)(i), (v)))
 
-#define atomic_and(i, v)       ((void)atomic_fetch_and((i), (v)))
-#define atomic_or(i, v)                ((void)atomic_fetch_or((i), (v)))
-#define atomic_xor(i, v)       ((void)atomic_fetch_xor((i), (v)))
+#define arch_atomic_and(i, v)  ((void)arch_atomic_fetch_and((i), (v)))
+#define arch_atomic_or(i, v)   ((void)arch_atomic_fetch_or((i), (v)))
+#define arch_atomic_xor(i, v)  ((void)arch_atomic_fetch_xor((i), (v)))
 
-#define atomic_sub_return(i, v)        (atomic_add_return(-(int)(i), (v)))
-#define atomic_fetch_sub(i, v)  (atomic_fetch_add (-(int)(i), (v)))
+#define arch_atomic_sub_return(i, v)   (arch_atomic_add_return(-(int)(i), (v)))
+#define arch_atomic_fetch_sub(i, v)    (arch_atomic_fetch_add (-(int)(i), (v)))
 
 #endif /* !(__ARCH_SPARC_ATOMIC__) */
 
 
 #define ATOMIC64_INIT(i)       { (i) }
 
-#define atomic_read(v)         READ_ONCE((v)->counter)
-#define atomic64_read(v)       READ_ONCE((v)->counter)
+#define arch_atomic_read(v)    READ_ONCE((v)->counter)
+#define arch_atomic64_read(v)  READ_ONCE((v)->counter)
 
-#define atomic_set(v, i)       WRITE_ONCE(((v)->counter), (i))
-#define atomic64_set(v, i)     WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_set(v, i)  WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic64_set(v, i)        WRITE_ONCE(((v)->counter), (i))
 
 #define ATOMIC_OP(op)                                                  \
-void atomic_##op(int, atomic_t *);                                     \
-void atomic64_##op(s64, atomic64_t *);
+void arch_atomic_##op(int, atomic_t *);                                        \
+void arch_atomic64_##op(s64, atomic64_t *);
 
 #define ATOMIC_OP_RETURN(op)                                           \
-int atomic_##op##_return(int, atomic_t *);                             \
-s64 atomic64_##op##_return(s64, atomic64_t *);
+int arch_atomic_##op##_return(int, atomic_t *);                                \
+s64 arch_atomic64_##op##_return(s64, atomic64_t *);
 
 #define ATOMIC_FETCH_OP(op)                                            \
-int atomic_fetch_##op(int, atomic_t *);                                        \
-s64 atomic64_fetch_##op(s64, atomic64_t *);
+int arch_atomic_fetch_##op(int, atomic_t *);                           \
+s64 arch_atomic64_fetch_##op(s64, atomic64_t *);
 
 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
 
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
 
-static inline int atomic_xchg(atomic_t *v, int new)
+static inline int arch_atomic_xchg(atomic_t *v, int new)
 {
-       return xchg(&v->counter, new);
+       return arch_xchg(&v->counter, new);
 }
 
-#define atomic64_cmpxchg(v, o, n) \
-       ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic64_cmpxchg(v, o, n) \
+       ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
 
-s64 atomic64_dec_if_positive(atomic64_t *v);
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+s64 arch_atomic64_dec_if_positive(atomic64_t *v);
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
 
 #endif /* !(__ARCH_SPARC64_ATOMIC__) */
 
        return x;
 }
 
-#define xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
+#define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
 
 /* Emulate cmpxchg() the same way we emulate atomics,
  * by hashing the object address and indexing into an array
        return old;
 }
 
-#define cmpxchg(ptr, o, n)                                             \
+#define arch_cmpxchg(ptr, o, n)                                                \
 ({                                                                     \
        __typeof__(*(ptr)) _o_ = (o);                                   \
        __typeof__(*(ptr)) _n_ = (n);                                   \
 })
 
 u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
-#define cmpxchg64(ptr, old, new)       __cmpxchg_u64(ptr, old, new)
+#define arch_cmpxchg64(ptr, old, new)  __cmpxchg_u64(ptr, old, new)
 
 #include <asm-generic/cmpxchg-local.h>
 
  * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
  * them available.
  */
-#define cmpxchg_local(ptr, o, n)                                              \
+#define arch_cmpxchg_local(ptr, o, n)                                         \
        ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o),\
                        (unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
 
 #endif /* __ARCH_SPARC_CMPXCHG__ */
 
        return val;
 }
 
-#define xchg(ptr,x)                                                    \
+#define arch_xchg(ptr,x)                                                       \
 ({     __typeof__(*(ptr)) __ret;                                       \
        __ret = (__typeof__(*(ptr)))                                    \
                __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)));      \
        return old;
 }
 
-#define cmpxchg(ptr,o,n)                                                \
+#define arch_cmpxchg(ptr,o,n)                                           \
   ({                                                                    \
      __typeof__(*(ptr)) _o_ = (o);                                      \
      __typeof__(*(ptr)) _n_ = (n);                                      \
        return old;
 }
 
-#define cmpxchg_local(ptr, o, n)                                       \
+#define arch_cmpxchg_local(ptr, o, n)                                  \
        ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
                        (unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n)                                     \
+#define arch_cmpxchg64_local(ptr, o, n)                                        \
   ({                                                                   \
        BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
        cmpxchg_local((ptr), (o), (n));                                 \
   })
-#define cmpxchg64(ptr, o, n)   cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n)      arch_cmpxchg64_local((ptr), (o), (n))
 
 #endif /* __ARCH_SPARC64_CMPXCHG__ */
 
 #endif /* SMP */
 
 #define ATOMIC_FETCH_OP(op, c_op)                                      \
-int atomic_fetch_##op(int i, atomic_t *v)                              \
+int arch_atomic_fetch_##op(int i, atomic_t *v)                         \
 {                                                                      \
        int ret;                                                        \
        unsigned long flags;                                            \
        spin_unlock_irqrestore(ATOMIC_HASH(v), flags);                  \
        return ret;                                                     \
 }                                                                      \
-EXPORT_SYMBOL(atomic_fetch_##op);
+EXPORT_SYMBOL(arch_atomic_fetch_##op);
 
 #define ATOMIC_OP_RETURN(op, c_op)                                     \
-int atomic_##op##_return(int i, atomic_t *v)                           \
+int arch_atomic_##op##_return(int i, atomic_t *v)                      \
 {                                                                      \
        int ret;                                                        \
        unsigned long flags;                                            \
        spin_unlock_irqrestore(ATOMIC_HASH(v), flags);                  \
        return ret;                                                     \
 }                                                                      \
-EXPORT_SYMBOL(atomic_##op##_return);
+EXPORT_SYMBOL(arch_atomic_##op##_return);
 
 ATOMIC_OP_RETURN(add, +=)
 
 #undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 
-int atomic_xchg(atomic_t *v, int new)
+int arch_atomic_xchg(atomic_t *v, int new)
 {
        int ret;
        unsigned long flags;
        spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
        return ret;
 }
-EXPORT_SYMBOL(atomic_xchg);
+EXPORT_SYMBOL(arch_atomic_xchg);
 
-int atomic_cmpxchg(atomic_t *v, int old, int new)
+int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
 {
        int ret;
        unsigned long flags;
        spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
        return ret;
 }
-EXPORT_SYMBOL(atomic_cmpxchg);
+EXPORT_SYMBOL(arch_atomic_cmpxchg);
 
-int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
 {
        int ret;
        unsigned long flags;
        spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
        return ret;
 }
-EXPORT_SYMBOL(atomic_fetch_add_unless);
+EXPORT_SYMBOL(arch_atomic_fetch_add_unless);
 
 /* Atomic operations are already serializing */
-void atomic_set(atomic_t *v, int i)
+void arch_atomic_set(atomic_t *v, int i)
 {
        unsigned long flags;
 
        v->counter = i;
        spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
 }
-EXPORT_SYMBOL(atomic_set);
+EXPORT_SYMBOL(arch_atomic_set);
 
 unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
 {
 
         */
 
 #define ATOMIC_OP(op)                                                  \
-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */             \
+ENTRY(arch_atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */                \
        BACKOFF_SETUP(%o2);                                             \
 1:     lduw    [%o1], %g1;                                             \
        op      %g1, %o0, %g7;                                          \
        retl;                                                           \
         nop;                                                           \
 2:     BACKOFF_SPIN(%o2, %o3, 1b);                                     \
-ENDPROC(atomic_##op);                                                  \
-EXPORT_SYMBOL(atomic_##op);
+ENDPROC(arch_atomic_##op);                                             \
+EXPORT_SYMBOL(arch_atomic_##op);
 
 #define ATOMIC_OP_RETURN(op)                                           \
-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */    \
+ENTRY(arch_atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */\
        BACKOFF_SETUP(%o2);                                             \
 1:     lduw    [%o1], %g1;                                             \
        op      %g1, %o0, %g7;                                          \
        retl;                                                           \
         sra    %g1, 0, %o0;                                            \
 2:     BACKOFF_SPIN(%o2, %o3, 1b);                                     \
-ENDPROC(atomic_##op##_return);                                         \
-EXPORT_SYMBOL(atomic_##op##_return);
+ENDPROC(arch_atomic_##op##_return);                                    \
+EXPORT_SYMBOL(arch_atomic_##op##_return);
 
 #define ATOMIC_FETCH_OP(op)                                            \
-ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */       \
+ENTRY(arch_atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */  \
        BACKOFF_SETUP(%o2);                                             \
 1:     lduw    [%o1], %g1;                                             \
        op      %g1, %o0, %g7;                                          \
        retl;                                                           \
         sra    %g1, 0, %o0;                                            \
 2:     BACKOFF_SPIN(%o2, %o3, 1b);                                     \
-ENDPROC(atomic_fetch_##op);                                            \
-EXPORT_SYMBOL(atomic_fetch_##op);
+ENDPROC(arch_atomic_fetch_##op);                                       \
+EXPORT_SYMBOL(arch_atomic_fetch_##op);
 
 ATOMIC_OP(add)
 ATOMIC_OP_RETURN(add)
 #undef ATOMIC_OP
 
 #define ATOMIC64_OP(op)                                                        \
-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */           \
+ENTRY(arch_atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */      \
        BACKOFF_SETUP(%o2);                                             \
 1:     ldx     [%o1], %g1;                                             \
        op      %g1, %o0, %g7;                                          \
        retl;                                                           \
         nop;                                                           \
 2:     BACKOFF_SPIN(%o2, %o3, 1b);                                     \
-ENDPROC(atomic64_##op);                                                        \
-EXPORT_SYMBOL(atomic64_##op);
+ENDPROC(arch_atomic64_##op);                                           \
+EXPORT_SYMBOL(arch_atomic64_##op);
 
 #define ATOMIC64_OP_RETURN(op)                                         \
-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */  \
+ENTRY(arch_atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */     \
        BACKOFF_SETUP(%o2);                                             \
 1:     ldx     [%o1], %g1;                                             \
        op      %g1, %o0, %g7;                                          \
        retl;                                                           \
         op     %g1, %o0, %o0;                                          \
 2:     BACKOFF_SPIN(%o2, %o3, 1b);                                     \
-ENDPROC(atomic64_##op##_return);                                       \
-EXPORT_SYMBOL(atomic64_##op##_return);
+ENDPROC(arch_atomic64_##op##_return);                                  \
+EXPORT_SYMBOL(arch_atomic64_##op##_return);
 
 #define ATOMIC64_FETCH_OP(op)                                          \
-ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */     \
+ENTRY(arch_atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */        \
        BACKOFF_SETUP(%o2);                                             \
 1:     ldx     [%o1], %g1;                                             \
        op      %g1, %o0, %g7;                                          \
        retl;                                                           \
         mov    %g1, %o0;                                               \
 2:     BACKOFF_SPIN(%o2, %o3, 1b);                                     \
-ENDPROC(atomic64_fetch_##op);                                          \
-EXPORT_SYMBOL(atomic64_fetch_##op);
+ENDPROC(arch_atomic64_fetch_##op);                                     \
+EXPORT_SYMBOL(arch_atomic64_fetch_##op);
 
 ATOMIC64_OP(add)
 ATOMIC64_OP_RETURN(add)
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
 
-ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
+ENTRY(arch_atomic64_dec_if_positive) /* %o0 = atomic_ptr */
        BACKOFF_SETUP(%o2)
 1:     ldx     [%o0], %g1
        brlez,pn %g1, 3f
 3:     retl
         sub    %g1, 1, %o0
 2:     BACKOFF_SPIN(%o2, %o3, 1b)
-ENDPROC(atomic64_dec_if_positive)
-EXPORT_SYMBOL(atomic64_dec_if_positive)
+ENDPROC(arch_atomic64_dec_if_positive)
+EXPORT_SYMBOL(arch_atomic64_dec_if_positive)