osq_lock_init(&sem->osq);
 #endif
 }
-
 EXPORT_SYMBOL(__init_rwsem);
 
 enum rwsem_waiter_type {
  * - woken process blocks are discarded from the list after having task zeroed
  * - writers are only marked woken if downgrading is false
  */
-static void __rwsem_mark_wake(struct rw_semaphore *sem,
-                             enum rwsem_wake_type wake_type,
-                             struct wake_q_head *wake_q)
+static void rwsem_mark_wake(struct rw_semaphore *sem,
+                           enum rwsem_wake_type wake_type,
+                           struct wake_q_head *wake_q)
 {
        struct rwsem_waiter *waiter, *tmp;
        long oldcount, woken = 0, adjustment = 0;
 
                /*
                 * Ensure calling get_task_struct() before setting the reader
-                * waiter to nil such that rwsem_down_read_failed() cannot
+                * waiter to nil such that rwsem_down_read_slowpath() cannot
                 * race with do_exit() by always holding a reference count
                 * to the task to wakeup.
                 */
 /*
  * Wait for the read lock to be granted
  */
-static inline struct rw_semaphore __sched *
-__rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
+static struct rw_semaphore __sched *
+rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
 {
        long count, adjustment = -RWSEM_READER_BIAS;
        struct rwsem_waiter waiter;
         */
        if (!(count & RWSEM_LOCK_MASK) ||
           (!(count & RWSEM_WRITER_MASK) && (adjustment & RWSEM_FLAG_WAITERS)))
-               __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
+               rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
 
        raw_spin_unlock_irq(&sem->wait_lock);
        wake_up_q(&wake_q);
        return ERR_PTR(-EINTR);
 }
 
-__visible struct rw_semaphore * __sched
-rwsem_down_read_failed(struct rw_semaphore *sem)
-{
-       return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
-}
-EXPORT_SYMBOL(rwsem_down_read_failed);
-
-__visible struct rw_semaphore * __sched
-rwsem_down_read_failed_killable(struct rw_semaphore *sem)
-{
-       return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
-}
-EXPORT_SYMBOL(rwsem_down_read_failed_killable);
-
 /*
  * Wait until we successfully acquire the write lock
  */
-static inline struct rw_semaphore *
-__rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
+static struct rw_semaphore *
+rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
 {
        long count;
        bool waiting = true; /* any queued threads before us */
                 */
                if (!(count & RWSEM_WRITER_MASK) &&
                     (count & RWSEM_READER_MASK)) {
-                       __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
+                       rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
                        /*
                         * The wakeup is normally called _after_ the wait_lock
                         * is released, but given that we are proactively waking
        if (list_empty(&sem->wait_list))
                atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
        else
-               __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
+               rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
        raw_spin_unlock_irq(&sem->wait_lock);
        wake_up_q(&wake_q);
        lockevent_inc(rwsem_wlock_fail);
        return ERR_PTR(-EINTR);
 }
 
-__visible struct rw_semaphore * __sched
-rwsem_down_write_failed(struct rw_semaphore *sem)
-{
-       return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
-}
-EXPORT_SYMBOL(rwsem_down_write_failed);
-
-__visible struct rw_semaphore * __sched
-rwsem_down_write_failed_killable(struct rw_semaphore *sem)
-{
-       return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
-}
-EXPORT_SYMBOL(rwsem_down_write_failed_killable);
-
 /*
  * handle waking up a waiter on the semaphore
  * - up_read/up_write has decremented the active part of count if we come here
  */
-__visible
-struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
+static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
 {
        unsigned long flags;
        DEFINE_WAKE_Q(wake_q);
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
        if (!list_empty(&sem->wait_list))
-               __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
+               rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
 
        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
        wake_up_q(&wake_q);
 
        return sem;
 }
-EXPORT_SYMBOL(rwsem_wake);
 
 /*
  * downgrade a write lock into a read lock
  * - caller incremented waiting part of count and discovered it still negative
  * - just wake up any readers at the front of the queue
  */
-__visible
-struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
+static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
 {
        unsigned long flags;
        DEFINE_WAKE_Q(wake_q);
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
        if (!list_empty(&sem->wait_list))
-               __rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
+               rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
 
        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
        wake_up_q(&wake_q);
 
        return sem;
 }
-EXPORT_SYMBOL(rwsem_downgrade_wake);
 
 /*
  * lock for reading
 {
        if (unlikely(atomic_long_fetch_add_acquire(RWSEM_READER_BIAS,
                        &sem->count) & RWSEM_READ_FAILED_MASK)) {
-               rwsem_down_read_failed(sem);
+               rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE);
                DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
                                        RWSEM_READER_OWNED), sem);
        } else {
 {
        if (unlikely(atomic_long_fetch_add_acquire(RWSEM_READER_BIAS,
                        &sem->count) & RWSEM_READ_FAILED_MASK)) {
-               if (IS_ERR(rwsem_down_read_failed_killable(sem)))
+               if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE)))
                        return -EINTR;
                DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
                                        RWSEM_READER_OWNED), sem);
         */
        long tmp = RWSEM_UNLOCKED_VALUE;
 
-       lockevent_inc(rwsem_rtrylock);
        do {
                if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
                                        tmp + RWSEM_READER_BIAS)) {
  */
 static inline void __down_write(struct rw_semaphore *sem)
 {
-       if (unlikely(atomic_long_cmpxchg_acquire(&sem->count, 0,
-                                                RWSEM_WRITER_LOCKED)))
-               rwsem_down_write_failed(sem);
+       long tmp = RWSEM_UNLOCKED_VALUE;
+
+       if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
+                                                     RWSEM_WRITER_LOCKED)))
+               rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE);
        rwsem_set_owner(sem);
 }
 
 static inline int __down_write_killable(struct rw_semaphore *sem)
 {
-       if (unlikely(atomic_long_cmpxchg_acquire(&sem->count, 0,
-                                                RWSEM_WRITER_LOCKED)))
-               if (IS_ERR(rwsem_down_write_failed_killable(sem)))
+       long tmp = RWSEM_UNLOCKED_VALUE;
+
+       if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
+                                                     RWSEM_WRITER_LOCKED))) {
+               if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE)))
                        return -EINTR;
+       }
        rwsem_set_owner(sem);
        return 0;
 }
 
 static inline int __down_write_trylock(struct rw_semaphore *sem)
 {
-       long tmp;
+       long tmp = RWSEM_UNLOCKED_VALUE;
 
-       lockevent_inc(rwsem_wtrylock);
-       tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
-                                         RWSEM_WRITER_LOCKED);
-       if (tmp == RWSEM_UNLOCKED_VALUE) {
+       if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
+                                           RWSEM_WRITER_LOCKED)) {
                rwsem_set_owner(sem);
                return true;
        }
 {
        long tmp;
 
-       DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED),
-                               sem);
+       DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED), sem);
        rwsem_clear_reader_owned(sem);
        tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
-       if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS))
-                       == RWSEM_FLAG_WAITERS))
+       if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
+                     RWSEM_FLAG_WAITERS))
                rwsem_wake(sem);
 }
 
  */
 static inline void __up_write(struct rw_semaphore *sem)
 {
+       long tmp;
+
        DEBUG_RWSEMS_WARN_ON(sem->owner != current, sem);
        rwsem_clear_owner(sem);
-       if (unlikely(atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED,
-                       &sem->count) & RWSEM_FLAG_WAITERS))
+       tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
+       if (unlikely(tmp & RWSEM_FLAG_WAITERS))
                rwsem_wake(sem);
 }
 
 
        LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
 }
-
 EXPORT_SYMBOL(down_read);
 
 int __sched down_read_killable(struct rw_semaphore *sem)
 
        return 0;
 }
-
 EXPORT_SYMBOL(down_read_killable);
 
 /*
                rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
        return ret;
 }
-
 EXPORT_SYMBOL(down_read_trylock);
 
 /*
 {
        might_sleep();
        rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
-
        LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
 }
-
 EXPORT_SYMBOL(down_write);
 
 /*
        might_sleep();
        rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
 
-       if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, __down_write_killable)) {
+       if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
+                                 __down_write_killable)) {
                rwsem_release(&sem->dep_map, 1, _RET_IP_);
                return -EINTR;
        }
 
        return 0;
 }
-
 EXPORT_SYMBOL(down_write_killable);
 
 /*
 
        return ret;
 }
-
 EXPORT_SYMBOL(down_write_trylock);
 
 /*
 void up_read(struct rw_semaphore *sem)
 {
        rwsem_release(&sem->dep_map, 1, _RET_IP_);
-
        __up_read(sem);
 }
-
 EXPORT_SYMBOL(up_read);
 
 /*
 void up_write(struct rw_semaphore *sem)
 {
        rwsem_release(&sem->dep_map, 1, _RET_IP_);
-
        __up_write(sem);
 }
-
 EXPORT_SYMBOL(up_write);
 
 /*
 void downgrade_write(struct rw_semaphore *sem)
 {
        lock_downgrade(&sem->dep_map, _RET_IP_);
-
        __downgrade_write(sem);
 }
-
 EXPORT_SYMBOL(downgrade_write);
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 {
        might_sleep();
        rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
-
        LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
 }
-
 EXPORT_SYMBOL(down_read_nested);
 
 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
 {
        might_sleep();
        rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
-
        LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
 }
-
 EXPORT_SYMBOL(_down_write_nest_lock);
 
 void down_read_non_owner(struct rw_semaphore *sem)
 {
        might_sleep();
-
        __down_read(sem);
        __rwsem_set_reader_owned(sem, NULL);
 }
-
 EXPORT_SYMBOL(down_read_non_owner);
 
 void down_write_nested(struct rw_semaphore *sem, int subclass)
 {
        might_sleep();
        rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
-
        LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
 }
-
 EXPORT_SYMBOL(down_write_nested);
 
 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
        might_sleep();
        rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
 
-       if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, __down_write_killable)) {
+       if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
+                                 __down_write_killable)) {
                rwsem_release(&sem->dep_map, 1, _RET_IP_);
                return -EINTR;
        }
 
        return 0;
 }
-
 EXPORT_SYMBOL(down_write_killable_nested);
 
 void up_read_non_owner(struct rw_semaphore *sem)
                                sem);
        __up_read(sem);
 }
-
 EXPORT_SYMBOL(up_read_non_owner);
 
 #endif