#include <linux/spinlock.h>
 #include <asm/qrwlock.h>
 
-/**
- * rspin_until_writer_unlock - inc reader count & spin until writer is gone
- * @lock  : Pointer to queue rwlock structure
- * @writer: Current queue rwlock writer status byte
- *
- * In interrupt context or at the head of the queue, the reader will just
- * increment the reader count & wait until the writer releases the lock.
- */
-static __always_inline void
-rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
-{
-       while ((cnts & _QW_WMASK) == _QW_LOCKED) {
-               cpu_relax();
-               cnts = atomic_read_acquire(&lock->cnts);
-       }
-}
-
 /**
  * queued_read_lock_slowpath - acquire read lock of a queue rwlock
  * @lock: Pointer to queue rwlock structure
- * @cnts: Current qrwlock lock value
  */
-void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
+void queued_read_lock_slowpath(struct qrwlock *lock)
 {
        /*
         * Readers come here when they cannot get the lock without waiting
        if (unlikely(in_interrupt())) {
                /*
                 * Readers in interrupt context will get the lock immediately
-                * if the writer is just waiting (not holding the lock yet).
-                * The rspin_until_writer_unlock() function returns immediately
-                * in this case. Otherwise, they will spin (with ACQUIRE
-                * semantics) until the lock is available without waiting in
-                * the queue.
+                * if the writer is just waiting (not holding the lock yet),
+                * so spin with ACQUIRE semantics until the lock is available
+                * without waiting in the queue.
                 */
-               rspin_until_writer_unlock(lock, cnts);
+               atomic_cond_read_acquire(&lock->cnts, (VAL & _QW_WMASK)
+                                        != _QW_LOCKED);
                return;
        }
        atomic_sub(_QR_BIAS, &lock->cnts);
         * Put the reader into the wait queue
         */
        arch_spin_lock(&lock->wait_lock);
+       atomic_add(_QR_BIAS, &lock->cnts);
 
        /*
         * The ACQUIRE semantics of the following spinning code ensure
         * that accesses can't leak upwards out of our subsequent critical
         * section in the case that the lock is currently held for write.
         */
-       cnts = atomic_fetch_add_acquire(_QR_BIAS, &lock->cnts);
-       rspin_until_writer_unlock(lock, cnts);
+       atomic_cond_read_acquire(&lock->cnts, (VAL & _QW_WMASK) != _QW_LOCKED);
 
        /*
         * Signal the next one in queue to become queue head
  */
 void queued_write_lock_slowpath(struct qrwlock *lock)
 {
-       u32 cnts;
-
        /* Put the writer into the wait queue */
        arch_spin_lock(&lock->wait_lock);
 
        }
 
        /* When no more readers, set the locked flag */
-       for (;;) {
-               cnts = atomic_read(&lock->cnts);
-               if ((cnts == _QW_WAITING) &&
-                   (atomic_cmpxchg_acquire(&lock->cnts, _QW_WAITING,
-                                           _QW_LOCKED) == _QW_WAITING))
-                       break;
-
-               cpu_relax();
-       }
+       do {
+               atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
+       } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
+                                       _QW_LOCKED) != _QW_WAITING);
 unlock:
        arch_spin_unlock(&lock->wait_lock);
 }