return xchg(&v->counter, new);
 }
 
+#define ATOMIC_OP(op)                                                  \
+static inline void atomic_##op(int i, atomic_t *v)                     \
+{                                                                      \
+       asm volatile(LOCK_PREFIX #op"l %1,%0"                           \
+                       : "+m" (v->counter)                             \
+                       : "ir" (i)                                      \
+                       : "memory");                                    \
+}
+
+#define CONFIG_ARCH_HAS_ATOMIC_OR
+
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
+#undef ATOMIC_OP
+
 /**
  * __atomic_add_unless - add unless the number is already a given value
  * @v: pointer of type atomic_t
        return *v;
 }
 
-/* These are x86-specific, used by some header files */
-#define atomic_clear_mask(mask, addr)                          \
-       asm volatile(LOCK_PREFIX "andl %0,%1"                   \
-                    : : "r" (~(mask)), "m" (*(addr)) : "memory")
+static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+       atomic_and(~mask, v);
+}
 
-#define atomic_set_mask(mask, addr)                            \
-       asm volatile(LOCK_PREFIX "orl %0,%1"                    \
-                    : : "r" ((unsigned)(mask)), "m" (*(addr))  \
-                    : "memory")
+static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+       atomic_or(mask, v);
+}
 
 #ifdef CONFIG_X86_32
 # include <asm/atomic64_32.h>
 
 #undef alternative_atomic64
 #undef __alternative_atomic64
 
+#define ATOMIC64_OP(op, c_op)                                          \
+static inline void atomic64_##op(long long i, atomic64_t *v)           \
+{                                                                      \
+       long long old, c = 0;                                           \
+       while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c)           \
+               c = old;                                                \
+}
+
+ATOMIC64_OP(and, &)
+ATOMIC64_OP(or, |)
+ATOMIC64_OP(xor, ^)
+
+#undef ATOMIC64_OP
+
 #endif /* _ASM_X86_ATOMIC64_32_H */
 
        return dec;
 }
 
+#define ATOMIC64_OP(op)                                                        \
+static inline void atomic64_##op(long i, atomic64_t *v)                        \
+{                                                                      \
+       asm volatile(LOCK_PREFIX #op"q %1,%0"                           \
+                       : "+m" (v->counter)                             \
+                       : "er" (i)                                      \
+                       : "memory");                                    \
+}
+
+ATOMIC64_OP(and)
+ATOMIC64_OP(or)
+ATOMIC64_OP(xor)
+
+#undef ATOMIC64_OP
+
 #endif /* _ASM_X86_ATOMIC64_64_H */