smp_mb();
 }
 
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+       unsigned int val;
+
+       smp_mb();
+
+       /*
+        * zero means writer holds the lock exclusively, deny Reader.
+        * Otherwise grant lock to first/subseq reader
+        *
+        *      if (rw->counter > 0) {
+        *              rw->counter--;
+        *              ret = 1;
+        *      }
+        */
+
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       brls    %[val], %[WR_LOCKED], 1b\n"     /* <= 0: spin while write locked */
+       "       sub     %[val], %[val], 1       \n"     /* reader lock */
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bnz     1b                      \n"
+       "                                       \n"
+       : [val]         "=&r"   (val)
+       : [rwlock]      "r"     (&(rw->counter)),
+         [WR_LOCKED]   "ir"    (0)
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+       unsigned int val, got_it = 0;
+
+       smp_mb();
+
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       brls    %[val], %[WR_LOCKED], 4f\n"     /* <= 0: already write locked, bail */
+       "       sub     %[val], %[val], 1       \n"     /* counter-- */
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bnz     1b                      \n"     /* retry if collided with someone */
+       "       mov     %[got_it], 1            \n"
+       "                                       \n"
+       "4: ; --- done ---                      \n"
+
+       : [val]         "=&r"   (val),
+         [got_it]      "+&r"   (got_it)
+       : [rwlock]      "r"     (&(rw->counter)),
+         [WR_LOCKED]   "ir"    (0)
+       : "memory", "cc");
+
+       smp_mb();
+
+       return got_it;
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+       unsigned int val;
+
+       smp_mb();
+
+       /*
+        * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+        * deny writer. Otherwise if unlocked grant to writer
+        * Hence the claim that Linux rwlocks are unfair to writers.
+        * (can be starved for an indefinite time by readers).
+        *
+        *      if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+        *              rw->counter = 0;
+        *              ret = 1;
+        *      }
+        */
+
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       brne    %[val], %[UNLOCKED], 1b \n"     /* while !UNLOCKED spin */
+       "       mov     %[val], %[WR_LOCKED]    \n"
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bnz     1b                      \n"
+       "                                       \n"
+       : [val]         "=&r"   (val)
+       : [rwlock]      "r"     (&(rw->counter)),
+         [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
+         [WR_LOCKED]   "ir"    (0)
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+       unsigned int val, got_it = 0;
+
+       smp_mb();
+
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       brne    %[val], %[UNLOCKED], 4f \n"     /* !UNLOCKED, bail */
+       "       mov     %[val], %[WR_LOCKED]    \n"
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bnz     1b                      \n"     /* retry if collided with someone */
+       "       mov     %[got_it], 1            \n"
+       "                                       \n"
+       "4: ; --- done ---                      \n"
+
+       : [val]         "=&r"   (val),
+         [got_it]      "+&r"   (got_it)
+       : [rwlock]      "r"     (&(rw->counter)),
+         [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
+         [WR_LOCKED]   "ir"    (0)
+       : "memory", "cc");
+
+       smp_mb();
+
+       return got_it;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+       unsigned int val;
+
+       smp_mb();
+
+       /*
+        * rw->counter++;
+        */
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       add     %[val], %[val], 1       \n"
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bnz     1b                      \n"
+       "                                       \n"
+       : [val]         "=&r"   (val)
+       : [rwlock]      "r"     (&(rw->counter))
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+       smp_mb();
+
+       rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+
+       smp_mb();
+}
+
 #else  /* !CONFIG_ARC_HAS_LLSC */
 
 static inline void arch_spin_lock(arch_spinlock_t *lock)
        smp_mb();
 }
 
-#endif
-
 /*
  * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
  *
  * The spinlock itself is contained in @counter and access to it is
  * serialized with @lock_mutex.
- *
- * Unfair locking as Writers could be starved indefinitely by Reader(s)
  */
 
-/* Would read_trylock() succeed? */
-#define arch_read_can_lock(x)  ((x)->counter > 0)
-
-/* Would write_trylock() succeed? */
-#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
-
 /* 1 - lock taken successfully */
 static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
        arch_spin_unlock(&(rw->lock_mutex));
 }
 
+#endif
+
+#define arch_read_can_lock(x)  ((x)->counter > 0)
+#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
+
 #define arch_read_lock_flags(lock, flags)      arch_read_lock(lock)
 #define arch_write_lock_flags(lock, flags)     arch_write_lock(lock)