Some of the assembly code in the LoongArch port likely originated
from a time when the assembler did not support pseudo-instructions like
"move" or "jr", so the desugared form was used and readability suffers
(to a minor degree) as a result.
As the upstream toolchain supports these pseudo-instructions from the
beginning, migrate the existing few usages to them for better
readability.
Signed-off-by: WANG Xuerui <git@xen0n.name>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
                __asm__ __volatile__(
                "1:     ll.w    %1, %2          # atomic_sub_if_positive\n"
                "       addi.w  %0, %1, %3                              \n"
-               "       or      %1, %0, $zero                           \n"
+               "       move    %1, %0                                  \n"
                "       blt     %0, $zero, 2f                           \n"
                "       sc.w    %1, %2                                  \n"
                "       beq     $zero, %1, 1b                           \n"
                __asm__ __volatile__(
                "1:     ll.w    %1, %2          # atomic_sub_if_positive\n"
                "       sub.w   %0, %1, %3                              \n"
-               "       or      %1, %0, $zero                           \n"
+               "       move    %1, %0                                  \n"
                "       blt     %0, $zero, 2f                           \n"
                "       sc.w    %1, %2                                  \n"
                "       beq     $zero, %1, 1b                           \n"
                __asm__ __volatile__(
                "1:     ll.d    %1, %2  # atomic64_sub_if_positive      \n"
                "       addi.d  %0, %1, %3                              \n"
-               "       or      %1, %0, $zero                           \n"
+               "       move    %1, %0                                  \n"
                "       blt     %0, $zero, 2f                           \n"
                "       sc.d    %1, %2                                  \n"
                "       beq     %1, $zero, 1b                           \n"
                __asm__ __volatile__(
                "1:     ll.d    %1, %2  # atomic64_sub_if_positive      \n"
                "       sub.d   %0, %1, %3                              \n"
-               "       or      %1, %0, $zero                           \n"
+               "       move    %1, %0                                  \n"
                "       blt     %0, $zero, 2f                           \n"
                "       sc.d    %1, %2                                  \n"
                "       beq     %1, $zero, 1b                           \n"
 
        __asm__ __volatile__(                                           \
        "1:     " ld "  %0, %2          # __cmpxchg_asm \n"             \
        "       bne     %0, %z3, 2f                     \n"             \
-       "       or      $t0, %z4, $zero                 \n"             \
+       "       move    $t0, %z4                        \n"             \
        "       " st "  $t0, %1                         \n"             \
        "       beq     $zero, $t0, 1b                  \n"             \
        "2:                                             \n"             \
 
        "# futex_atomic_cmpxchg_inatomic                        \n"
        "1:     ll.w    %1, %3                                  \n"
        "       bne     %1, %z4, 3f                             \n"
-       "       or      $t0, %z5, $zero                         \n"
+       "       move    $t0, %z5                                \n"
        "2:     sc.w    $t0, %2                                 \n"
        "       beq     $zero, $t0, 1b                          \n"
        "3:                                                     \n"
 
        "2:                                                     \n"     \
        "       .section .fixup,\"ax\"                          \n"     \
        "3:     li.w    %0, %3                                  \n"     \
-       "       or      %1, $zero, $zero                        \n"     \
+       "       move    %1, $zero                               \n"     \
        "       b       2b                                      \n"     \
        "       .previous                                       \n"     \
        "       .section __ex_table,\"a\"                       \n"     \
 
        /* KSave3 used for percpu base, initialized as 0 */
        csrwr           zero, PERCPU_BASE_KS
        /* GPR21 used for percpu base (runtime), initialized as 0 */
-       or              u0, zero, zero
+       move            u0, zero
 
        la              tp, init_thread_union
        /* Set the SP after an empty pt_regs.  */