LOCK_EVENT(rwsem_opt_norspin)  /* # of disabled reader-only optspins   */
 LOCK_EVENT(rwsem_opt_rlock2)   /* # of opt-acquired 2ndary read locks  */
 LOCK_EVENT(rwsem_rlock)                /* # of read locks acquired             */
+LOCK_EVENT(rwsem_rlock_steal)  /* # of read locks by lock stealing     */
 LOCK_EVENT(rwsem_rlock_fast)   /* # of fast read locks acquired        */
 LOCK_EVENT(rwsem_rlock_fail)   /* # of failed read lock acquisitions   */
 LOCK_EVENT(rwsem_rlock_handoff)        /* # of read lock handoffs              */
 
        }
        return false;
 }
+
+static inline bool rwsem_no_spinners(struct rw_semaphore *sem)
+{
+       return !osq_is_locked(&sem->osq);
+}
+
 #else
 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
                                           unsigned long nonspinnable)
        return false;
 }
 
+static inline bool rwsem_no_spinners(sem)
+{
+       return false;
+}
+
 static inline int
 rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
 {
           !(count & RWSEM_WRITER_LOCKED))
                goto queue;
 
+       /*
+        * Reader optimistic lock stealing
+        *
+        * We can take the read lock directly without doing
+        * rwsem_optimistic_spin() if the conditions are right.
+        * Also wake up other readers if it is the first reader.
+        */
+       if (!(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF)) &&
+           rwsem_no_spinners(sem)) {
+               rwsem_set_reader_owned(sem);
+               lockevent_inc(rwsem_rlock_steal);
+               if (rcnt == 1)
+                       goto wake_readers;
+               return sem;
+       }
+
        /*
         * Save the current read-owner of rwsem, if available, and the
         * reader nonspinnable bit.
                 * Wake up other readers in the wait list if the front
                 * waiter is a reader.
                 */
+wake_readers:
                if ((atomic_long_read(&sem->count) & RWSEM_FLAG_WAITERS)) {
                        raw_spin_lock_irq(&sem->wait_lock);
                        if (!list_empty(&sem->wait_list))