}
 
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
+                             int oldval, int newval)
 {
-       int prev, cmp;
+       int ret = 0, prev, cmp;
 
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
        __asm__ __volatile__ (
                __ASM_SMP_MB
-       "1:     ldl_l   %0,0(%2)\n"
-       "       cmpeq   %0,%3,%1\n"
-       "       beq     %1,3f\n"
-       "       mov     %4,%1\n"
-       "2:     stl_c   %1,0(%2)\n"
-       "       beq     %1,4f\n"
+       "1:     ldl_l   %1,0(%3)\n"
+       "       cmpeq   %1,%4,%2\n"
+       "       beq     %2,3f\n"
+       "       mov     %5,%2\n"
+       "2:     stl_c   %2,0(%3)\n"
+       "       beq     %2,4f\n"
        "3:     .subsection 2\n"
        "4:     br      1b\n"
        "       .previous\n"
        "       .long   2b-.\n"
        "       lda     $31,3b-2b(%0)\n"
        "       .previous\n"
-       :       "=&r"(prev), "=&r"(cmp)
+       :       "+r"(ret), "=&r"(prev), "=&r"(cmp)
        :       "r"(uaddr), "r"((long)oldval), "r"(newval)
        :       "memory");
 
-       return prev;
+       *uval = prev;
+       return ret;
 }
 
 #endif /* __KERNEL__ */
 
 }
 
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
+                             int oldval, int newval)
 {
-       int val;
+       int ret = 0, val;
 
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
         * call sites. */
 
        __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
-       "1:     " T(ldr) "      %0, [%3]\n"
-       "       teq     %0, %1\n"
+       "1:     " T(ldr) "      %1, [%4]\n"
+       "       teq     %1, %2\n"
        "       it      eq      @ explicit IT needed for the 2b label\n"
-       "2:     " T(streq) "    %2, [%3]\n"
+       "2:     " T(streq) "    %3, [%4]\n"
        "3:\n"
        "       .pushsection __ex_table,\"a\"\n"
        "       .align  3\n"
        "       .long   1b, 4f, 2b, 4f\n"
        "       .popsection\n"
        "       .pushsection .fixup,\"ax\"\n"
-       "4:     mov     %0, %4\n"
+       "4:     mov     %0, %5\n"
        "       b       3b\n"
        "       .popsection"
-       : "=&r" (val)
+       : "+r" (ret), "=&r" (val)
        : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
        : "cc", "memory");
 
-       return val;
+       *uval = val;
+       return ret;
 }
 
 #endif /* !SMP */
 
 extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr);
 
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
+                             int oldval, int newval)
 {
        return -ENOSYS;
 }
 
 }
 
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
+                             int oldval, int newval)
 {
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
        {
-               register unsigned long r8 __asm ("r8");
+               register unsigned long r8 __asm ("r8") = 0;
+               unsigned long prev;
                __asm__ __volatile__(
                        "       mf;;                                    \n"
                        "       mov ar.ccv=%3;;                         \n"
                        "[1:]   cmpxchg4.acq %0=[%1],%2,ar.ccv          \n"
                        "       .xdata4 \"__ex_table\", 1b-., 2f-.      \n"
                        "[2:]"
-                       : "=r" (r8)
+                       : "=r" (prev)
                        : "r" (uaddr), "r" (newval),
                          "rO" ((long) (unsigned) oldval)
                        : "memory");
+               *uval = prev;
                return r8;
        }
 }
 
 }
 
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
+                             int oldval, int newval)
 {
-       int prev, cmp;
+       int ret = 0, prev, cmp;
 
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
-       __asm__ __volatile__ ("1:       lwx     %0, %2, r0;             \
-                                       cmp     %1, %0, %3;             \
-                                       beqi    %1, 3f;                 \
-                               2:      swx     %4, %2, r0;             \
-                                       addic   %1, r0, 0;              \
-                                       bnei    %1, 1b;                 \
+       __asm__ __volatile__ ("1:       lwx     %1, %3, r0;             \
+                                       cmp     %2, %1, %4;             \
+                                       beqi    %2, 3f;                 \
+                               2:      swx     %5, %3, r0;             \
+                                       addic   %2, r0, 0;              \
+                                       bnei    %2, 1b;                 \
                                3:                                      \
                                .section .fixup,\"ax\";                 \
                                4:      brid    3b;                     \
-                                       addik   %0, r0, %5;             \
+                                       addik   %0, r0, %6;             \
                                .previous;                              \
                                .section __ex_table,\"a\";              \
                                .word   1b,4b,2b,4b;                    \
                                .previous;"                             \
-               : "=&r" (prev), "=&r"(cmp)                              \
+               : "+r" (ret), "=&r" (prev), "=&r"(cmp)  \
                : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT));
 
-       return prev;
+       *uval = prev;
+       return ret;
 }
 
 #endif /* __KERNEL__ */
 
 }
 
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
+                             int oldval, int newval)
 {
-       int retval;
+       int ret = 0, val;
 
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
                "       .set    push                                    \n"
                "       .set    noat                                    \n"
                "       .set    mips3                                   \n"
-               "1:     ll      %0, %2                                  \n"
-               "       bne     %0, %z3, 3f                             \n"
+               "1:     ll      %1, %3                                  \n"
+               "       bne     %1, %z4, 3f                             \n"
                "       .set    mips0                                   \n"
-               "       move    $1, %z4                                 \n"
+               "       move    $1, %z5                                 \n"
                "       .set    mips3                                   \n"
-               "2:     sc      $1, %1                                  \n"
+               "2:     sc      $1, %2                                  \n"
                "       beqzl   $1, 1b                                  \n"
                __WEAK_LLSC_MB
                "3:                                                     \n"
                "       .set    pop                                     \n"
                "       .section .fixup,\"ax\"                          \n"
-               "4:     li      %0, %5                                  \n"
+               "4:     li      %0, %6                                  \n"
                "       j       3b                                      \n"
                "       .previous                                       \n"
                "       .section __ex_table,\"a\"                       \n"
                "       "__UA_ADDR "\t1b, 4b                            \n"
                "       "__UA_ADDR "\t2b, 4b                            \n"
                "       .previous                                       \n"
-               : "=&r" (retval), "=R" (*uaddr)
+               : "+r" (ret), "=&r" (val), "=R" (*uaddr)
                : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
                : "memory");
        } else if (cpu_has_llsc) {
                "       .set    push                                    \n"
                "       .set    noat                                    \n"
                "       .set    mips3                                   \n"
-               "1:     ll      %0, %2                                  \n"
-               "       bne     %0, %z3, 3f                             \n"
+               "1:     ll      %1, %3                                  \n"
+               "       bne     %1, %z4, 3f                             \n"
                "       .set    mips0                                   \n"
-               "       move    $1, %z4                                 \n"
+               "       move    $1, %z5                                 \n"
                "       .set    mips3                                   \n"
-               "2:     sc      $1, %1                                  \n"
+               "2:     sc      $1, %2                                  \n"
                "       beqz    $1, 1b                                  \n"
                __WEAK_LLSC_MB
                "3:                                                     \n"
                "       .set    pop                                     \n"
                "       .section .fixup,\"ax\"                          \n"
-               "4:     li      %0, %5                                  \n"
+               "4:     li      %0, %6                                  \n"
                "       j       3b                                      \n"
                "       .previous                                       \n"
                "       .section __ex_table,\"a\"                       \n"
                "       "__UA_ADDR "\t1b, 4b                            \n"
                "       "__UA_ADDR "\t2b, 4b                            \n"
                "       .previous                                       \n"
-               : "=&r" (retval), "=R" (*uaddr)
+               : "+r" (ret), "=&r" (val), "=R" (*uaddr)
                : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
                : "memory");
        } else
                return -ENOSYS;
 
-       return retval;
+       *uval = val;
+       return ret;
 }
 
 #endif
 
 
 /* Non-atomic version */
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
+                             int oldval, int newval)
 {
-       int err = 0;
-       int uval;
+       int val;
 
        /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
         * our gateway page, and causes no end of trouble...
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
-       err = get_user(uval, uaddr);
-       if (err) return -EFAULT;
-       if (uval == oldval)
-               err = put_user(newval, uaddr);
-       if (err) return -EFAULT;
-       return uval;
+       if (get_user(val, uaddr))
+               return -EFAULT;
+       if (val == oldval && put_user(newval, uaddr))
+               return -EFAULT;
+       *uval = val;
+       return 0;
 }
 
 #endif /*__KERNEL__*/
 
 }
 
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
+                             int oldval, int newval)
 {
-       int prev;
+       int ret = 0, prev;
 
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
         __asm__ __volatile__ (
         PPC_RELEASE_BARRIER
-"1:     lwarx   %0,0,%2         # futex_atomic_cmpxchg_inatomic\n\
-        cmpw    0,%0,%3\n\
+"1:     lwarx   %1,0,%3         # futex_atomic_cmpxchg_inatomic\n\
+        cmpw    0,%1,%4\n\
         bne-    3f\n"
-        PPC405_ERR77(0,%2)
-"2:     stwcx.  %4,0,%2\n\
+        PPC405_ERR77(0,%3)
+"2:     stwcx.  %5,0,%3\n\
         bne-    1b\n"
         PPC_ACQUIRE_BARRIER
 "3:    .section .fixup,\"ax\"\n\
-4:     li      %0,%5\n\
+4:     li      %0,%6\n\
        b       3b\n\
        .previous\n\
        .section __ex_table,\"a\"\n\
        .align 3\n\
        " PPC_LONG "1b,4b,2b,4b\n\
        .previous" \
-        : "=&r" (prev), "+m" (*uaddr)
+        : "+r" (ret), "=&r" (prev), "+m" (*uaddr)
         : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)
         : "cc", "memory");
 
-        return prev;
+       *uval = prev;
+        return ret;
 }
 
 #endif /* __KERNEL__ */
 
        return ret;
 }
 
-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr,
+static inline int futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
                                                int oldval, int newval)
 {
        if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
-       return uaccess.futex_atomic_cmpxchg(uaddr, oldval, newval);
+       return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval);
 }
 
 #endif /* __KERNEL__ */
 
        size_t (*strnlen_user)(size_t, const char __user *);
        size_t (*strncpy_from_user)(size_t, const char __user *, char *);
        int (*futex_atomic_op)(int op, int __user *, int oparg, int *old);
-       int (*futex_atomic_cmpxchg)(int __user *, int old, int new);
+       int (*futex_atomic_cmpxchg)(int *, int __user *, int old, int new);
 };
 
 extern struct uaccess_ops uaccess;
 
 extern size_t copy_to_user_std(size_t, void __user *, const void *);
 extern size_t strnlen_user_std(size_t, const char __user *);
 extern size_t strncpy_from_user_std(size_t, const char __user *, char *);
-extern int futex_atomic_cmpxchg_std(int __user *, int, int);
+extern int futex_atomic_cmpxchg_std(int *, int __user *, int, int);
 extern int futex_atomic_op_std(int, int __user *, int, int *);
 
 extern size_t copy_from_user_pt(size_t, const void __user *, void *);
 extern size_t copy_to_user_pt(size_t, void __user *, const void *);
 extern int futex_atomic_op_pt(int, int __user *, int, int *);
-extern int futex_atomic_cmpxchg_pt(int __user *, int, int);
+extern int futex_atomic_cmpxchg_pt(int *, int __user *, int, int);
 
 #endif /* __ARCH_S390_LIB_UACCESS_H */
 
        return ret;
 }
 
-static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
+static int __futex_atomic_cmpxchg_pt(int *uval, int __user *uaddr,
+                                    int oldval, int newval)
 {
        int ret;
 
        asm volatile("0: cs   %1,%4,0(%5)\n"
-                    "1: lr   %0,%1\n"
+                    "1: la   %0,0\n"
                     "2:\n"
                     EX_TABLE(0b,2b) EX_TABLE(1b,2b)
                     : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
                     : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
                     : "cc", "memory" );
+       *uval = oldval;
        return ret;
 }
 
-int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
+int futex_atomic_cmpxchg_pt(int *uval, int __user *uaddr,
+                           int oldval, int newval)
 {
        int ret;
 
        if (segment_eq(get_fs(), KERNEL_DS))
-               return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
+               return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
        spin_lock(¤t->mm->page_table_lock);
        uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
        if (!uaddr) {
        }
        get_page(virt_to_page(uaddr));
        spin_unlock(¤t->mm->page_table_lock);
-       ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
+       ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
        put_page(virt_to_page(uaddr));
        return ret;
 }
 
        return ret;
 }
 
-int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval)
+int futex_atomic_cmpxchg_std(int *uval, int __user *uaddr,
+                            int oldval, int newval)
 {
        int ret;
 
        asm volatile(
                "   sacf 256\n"
                "0: cs   %1,%4,0(%5)\n"
-               "1: lr   %0,%1\n"
+               "1: la   %0,0\n"
                "2: sacf 0\n"
                EX_TABLE(0b,2b) EX_TABLE(1b,2b)
                : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
                : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
                : "cc", "memory" );
+       *uval = oldval;
        return ret;
 }
 
 
        return ret;
 }
 
-static inline int atomic_futex_op_cmpxchg_inatomic(int __user *uaddr,
+static inline int atomic_futex_op_cmpxchg_inatomic(int *uval,
+                                                  int __user *uaddr,
                                                   int oldval, int newval)
 {
        unsigned long flags;
 
        local_irq_restore(flags);
 
-       if (ret)
-               return ret;
-
-       return prev;
+       *uval = prev;
+       return ret;
 }
 
 #endif /* __ASM_SH_FUTEX_IRQ_H */
 
 }
 
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
+                             int oldval, int newval)
 {
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
-       return atomic_futex_op_cmpxchg_inatomic(uaddr, oldval, newval);
+       return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval);
 }
 
 #endif /* __KERNEL__ */
 
 }
 
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
+                             int oldval, int newval)
 {
+       int ret = 0;
+
        __asm__ __volatile__(
-       "\n1:   casa    [%3] %%asi, %2, %0\n"
+       "\n1:   casa    [%4] %%asi, %3, %1\n"
        "2:\n"
        "       .section .fixup,#alloc,#execinstr\n"
        "       .align  4\n"
        "3:     sethi   %%hi(2b), %0\n"
        "       jmpl    %0 + %%lo(2b), %%g0\n"
-       "        mov    %4, %0\n"
+       "       mov     %5, %0\n"
        "       .previous\n"
        "       .section __ex_table,\"a\"\n"
        "       .align  4\n"
        "       .word   1b, 3b\n"
        "       .previous\n"
-       : "=r" (newval)
-       : "0" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT)
+       : "+r" (ret), "=r" (newval)
+       : "1" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT)
        : "memory");
 
-       return newval;
+       *uval = newval;
+       return ret;
 }
 
 #endif /* !(_SPARC64_FUTEX_H) */
 
        return ret;
 }
 
-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
-                                               int newval)
+static inline int futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
+                                               int oldval, int newval)
 {
        struct __get_user asm_ret;
 
                return -EFAULT;
 
        asm_ret = futex_cmpxchg(uaddr, oldval, newval);
-       return asm_ret.err ? asm_ret.err : asm_ret.val;
+       *uval = asm_ret.val;
+       return asm_ret.err;
 }
 
 #ifndef __tilegx__
 
        return ret;
 }
 
-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
-                                               int newval)
+static inline int futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
+                                               int oldval, int newval)
 {
+       int ret = 0;
 
 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
        /* Real i386 machines have no cmpxchg instruction */
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
-       asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
+       asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
                     "2:\t.section .fixup, \"ax\"\n"
-                    "3:\tmov     %2, %0\n"
+                    "3:\tmov     %3, %0\n"
                     "\tjmp     2b\n"
                     "\t.previous\n"
                     _ASM_EXTABLE(1b, 3b)
-                    : "=a" (oldval), "+m" (*uaddr)
-                    : "i" (-EFAULT), "r" (newval), "0" (oldval)
+                    : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
+                    : "i" (-EFAULT), "r" (newval), "1" (oldval)
                     : "memory"
        );
 
-       return oldval;
+       *uval = oldval;
+       return ret;
 }
 
 #endif
 
 }
 
 static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
+                             int oldval, int newval)
 {
        return -ENOSYS;
 }
 
        return NULL;
 }
 
-static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
+static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
+                                     u32 uval, u32 newval)
 {
-       u32 curval;
+       int ret;
 
        pagefault_disable();
-       curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
+       ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
        pagefault_enable();
 
-       return curval;
+       return ret;
 }
 
 static int get_futex_value_locked(u32 *dest, u32 __user *from)
        if (set_waiters)
                newval |= FUTEX_WAITERS;
 
-       curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
-
-       if (unlikely(curval == -EFAULT))
+       if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
                return -EFAULT;
 
        /*
                lock_taken = 1;
        }
 
-       curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
-
-       if (unlikely(curval == -EFAULT))
+       if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
                return -EFAULT;
        if (unlikely(curval != uval))
                goto retry;
 
                newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
 
-               curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
-
-               if (curval == -EFAULT)
+               if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
                        ret = -EFAULT;
                else if (curval != uval)
                        ret = -EINVAL;
         * There is no waiter, so we unlock the futex. The owner died
         * bit has not to be preserved here. We are the owner:
         */
-       oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
-
-       if (oldval == -EFAULT)
-               return oldval;
+       if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
+               return -EFAULT;
        if (oldval != uval)
                return -EAGAIN;
 
        while (1) {
                newval = (uval & FUTEX_OWNER_DIED) | newtid;
 
-               curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
-
-               if (curval == -EFAULT)
+               if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
                        goto handle_fault;
                if (curval == uval)
                        break;
         * again. If it succeeds then we can return without waking
         * anyone else up:
         */
-       if (!(uval & FUTEX_OWNER_DIED))
-               uval = cmpxchg_futex_value_locked(uaddr, vpid, 0);
-
-
-       if (unlikely(uval == -EFAULT))
+       if (!(uval & FUTEX_OWNER_DIED) &&
+           cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
                goto pi_faulted;
        /*
         * Rare case: we managed to release the lock atomically,
                 * userspace.
                 */
                mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
-               nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
-
-               if (nval == -EFAULT)
+               if (futex_atomic_cmpxchg_inatomic(&nval, uaddr, uval, mval))
                        return -1;
 
                if (nval != uval)
         * implementation, the non-functional ones will return
         * -ENOSYS.
         */
-       curval = cmpxchg_futex_value_locked(NULL, 0, 0);
-       if (curval == -EFAULT)
+       if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
                futex_cmpxchg_enabled = 1;
 
        for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {