#ifndef __ASM_SH_BITOPS_LLSC_H
 #define __ASM_SH_BITOPS_LLSC_H
 
-static inline void set_bit(int nr, volatile void * addr)
+static inline void set_bit(int nr, volatile void *addr)
 {
        int     mask;
        volatile unsigned int *a = addr;
        __asm__ __volatile__ (
                "1:                                             \n\t"
                "movli.l        @%1, %0 ! set_bit               \n\t"
-               "or             %3, %0                          \n\t"
+               "or             %2, %0                          \n\t"
                "movco.l        %0, @%1                         \n\t"
                "bf             1b                              \n\t"
-               : "=&z" (tmp), "=r" (a)
-               : "1" (a), "r" (mask)
+               : "=&z" (tmp)
+               : "r" (a), "r" (mask)
                : "t", "memory"
        );
 }
 
-static inline void clear_bit(int nr, volatile void * addr)
+static inline void clear_bit(int nr, volatile void *addr)
 {
        int     mask;
        volatile unsigned int *a = addr;
        __asm__ __volatile__ (
                "1:                                             \n\t"
                "movli.l        @%1, %0 ! clear_bit             \n\t"
-               "and            %3, %0                          \n\t"
+               "and            %2, %0                          \n\t"
                "movco.l        %0, @%1                         \n\t"
                "bf             1b                              \n\t"
-               : "=&z" (tmp), "=r" (a)
-               : "1" (a), "r" (~mask)
+               : "=&z" (tmp)
+               : "r" (a), "r" (~mask)
                : "t", "memory"
        );
 }
 
-static inline void change_bit(int nr, volatile void * addr)
+static inline void change_bit(int nr, volatile void *addr)
 {
        int     mask;
        volatile unsigned int *a = addr;
        __asm__ __volatile__ (
                "1:                                             \n\t"
                "movli.l        @%1, %0 ! change_bit            \n\t"
-               "xor            %3, %0                          \n\t"
+               "xor            %2, %0                          \n\t"
                "movco.l        %0, @%1                         \n\t"
                "bf             1b                              \n\t"
-               : "=&z" (tmp), "=r" (a)
-               : "1" (a), "r" (mask)
+               : "=&z" (tmp)
+               : "r" (a), "r" (mask)
                : "t", "memory"
        );
 }
 
-static inline int test_and_set_bit(int nr, volatile void * addr)
+static inline int test_and_set_bit(int nr, volatile void *addr)
 {
        int     mask, retval;
        volatile unsigned int *a = addr;
 
        __asm__ __volatile__ (
                "1:                                             \n\t"
-               "movli.l        @%1, %0 ! test_and_set_bit      \n\t"
-               "mov            %0, %2                          \n\t"
-               "or             %4, %0                          \n\t"
-               "movco.l        %0, @%1                         \n\t"
+               "movli.l        @%2, %0 ! test_and_set_bit      \n\t"
+               "mov            %0, %1                          \n\t"
+               "or             %3, %0                          \n\t"
+               "movco.l        %0, @%2                         \n\t"
                "bf             1b                              \n\t"
-               "and            %4, %2                          \n\t"
-               : "=&z" (tmp), "=r" (a), "=&r" (retval)
-               : "1" (a), "r" (mask)
+               "and            %3, %1                          \n\t"
+               : "=&z" (tmp), "=&r" (retval)
+               : "r" (a), "r" (mask)
                : "t", "memory"
        );
 
        return retval != 0;
 }
 
-static inline int test_and_clear_bit(int nr, volatile void * addr)
+static inline int test_and_clear_bit(int nr, volatile void *addr)
 {
        int     mask, retval;
        volatile unsigned int *a = addr;
 
        __asm__ __volatile__ (
                "1:                                             \n\t"
-               "movli.l        @%1, %0 ! test_and_clear_bit    \n\t"
-               "mov            %0, %2                          \n\t"
-               "and            %5, %0                          \n\t"
-               "movco.l        %0, @%1                         \n\t"
+               "movli.l        @%2, %0 ! test_and_clear_bit    \n\t"
+               "mov            %0, %1                          \n\t"
+               "and            %4, %0                          \n\t"
+               "movco.l        %0, @%2                         \n\t"
                "bf             1b                              \n\t"
-               "and            %4, %2                          \n\t"
+               "and            %3, %1                          \n\t"
                "synco                                          \n\t"
-               : "=&z" (tmp), "=r" (a), "=&r" (retval)
-               : "1" (a), "r" (mask), "r" (~mask)
+               : "=&z" (tmp), "=&r" (retval)
+               : "r" (a), "r" (mask), "r" (~mask)
                : "t", "memory"
        );
 
        return retval != 0;
 }
 
-static inline int test_and_change_bit(int nr, volatile void * addr)
+static inline int test_and_change_bit(int nr, volatile void *addr)
 {
        int     mask, retval;
        volatile unsigned int *a = addr;
 
        __asm__ __volatile__ (
                "1:                                             \n\t"
-               "movli.l        @%1, %0 ! test_and_change_bit   \n\t"
-               "mov            %0, %2                          \n\t"
-               "xor            %4, %0                          \n\t"
-               "movco.l        %0, @%1                         \n\t"
+               "movli.l        @%2, %0 ! test_and_change_bit   \n\t"
+               "mov            %0, %1                          \n\t"
+               "xor            %3, %0                          \n\t"
+               "movco.l        %0, @%2                         \n\t"
                "bf             1b                              \n\t"
-               "and            %4, %2                          \n\t"
+               "and            %3, %1                          \n\t"
                "synco                                          \n\t"
-               : "=&z" (tmp), "=r" (a), "=&r" (retval)
-               : "1" (a), "r" (mask)
+               : "=&z" (tmp), "=&r" (retval)
+               : "r" (a), "r" (mask)
                : "t", "memory"
        );
 
 
 
        __asm__ __volatile__ (
                "1:                                     \n\t"
-               "movli.l        @%1, %0 ! xchg_u32      \n\t"
-               "mov            %0, %2                  \n\t"
-               "mov            %4, %0                  \n\t"
-               "movco.l        %0, @%1                 \n\t"
+               "movli.l        @%2, %0 ! xchg_u32      \n\t"
+               "mov            %0, %1                  \n\t"
+               "mov            %3, %0                  \n\t"
+               "movco.l        %0, @%2                 \n\t"
                "bf             1b                      \n\t"
                "synco                                  \n\t"
-               : "=&z"(tmp), "=r" (m), "=&r" (retval)
-               : "1" (m), "r" (val)
+               : "=&z"(tmp), "=&r" (retval)
+               : "r" (m), "r" (val)
                : "t", "memory"
        );
 
 
        __asm__ __volatile__ (
                "1:                                     \n\t"
-               "movli.l        @%1, %0 ! xchg_u8       \n\t"
-               "mov            %0, %2                  \n\t"
-               "mov            %4, %0                  \n\t"
-               "movco.l        %0, @%1                 \n\t"
+               "movli.l        @%2, %0 ! xchg_u8       \n\t"
+               "mov            %0, %1                  \n\t"
+               "mov            %3, %0                  \n\t"
+               "movco.l        %0, @%2                 \n\t"
                "bf             1b                      \n\t"
                "synco                                  \n\t"
-               : "=&z"(tmp), "=r" (m), "=&r" (retval)
-               : "1" (m), "r" (val & 0xff)
+               : "=&z"(tmp), "=&r" (retval)
+               : "r" (m), "r" (val & 0xff)
                : "t", "memory"
        );
 
 
        __asm__ __volatile__ (
                "1:                                             \n\t"
-               "movli.l        @%1, %0 ! __cmpxchg_u32         \n\t"
-               "mov            %0, %2                          \n\t"
-               "cmp/eq         %2, %4                          \n\t"
+               "movli.l        @%2, %0 ! __cmpxchg_u32         \n\t"
+               "mov            %0, %1                          \n\t"
+               "cmp/eq         %1, %3                          \n\t"
                "bf             2f                              \n\t"
-               "mov            %5, %0                          \n\t"
+               "mov            %3, %0                          \n\t"
                "2:                                             \n\t"
-               "movco.l        %0, @%1                         \n\t"
+               "movco.l        %0, @%2                         \n\t"
                "bf             1b                              \n\t"
                "synco                                          \n\t"
-               : "=&z" (tmp), "=r" (m), "=&r" (retval)
-               : "1" (m), "r" (old), "r" (new)
+               : "=&z" (tmp), "=&r" (retval)
+               : "r" (m), "r" (old), "r" (new)
                : "t", "memory"
        );