struct task_struct *task;
 };
 
-static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
-{
-       return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
-}
-
-/*
- * ldsem_cmpxchg() updates @*old with the last-known sem->count value.
- * Returns 1 if count was successfully changed; @*old will have @new value.
- * Returns 0 if count was not changed; @*old will have most recent sem->count
- */
-static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
-{
-       long tmp = atomic_long_cmpxchg(&sem->count, *old, new);
-       if (tmp == *old) {
-               *old = new;
-               return 1;
-       } else {
-               *old = tmp;
-               return 0;
-       }
-}
-
 /*
  * Initialize an ldsem:
  */
        debug_check_no_locks_freed((void *)sem, sizeof(*sem));
        lockdep_init_map(&sem->dep_map, name, key, 0);
 #endif
-       sem->count = LDSEM_UNLOCKED;
+       atomic_long_set(&sem->count, LDSEM_UNLOCKED);
        sem->wait_readers = 0;
        raw_spin_lock_init(&sem->wait_lock);
        INIT_LIST_HEAD(&sem->read_wait);
        struct task_struct *tsk;
        long adjust, count;
 
-       /* Try to grant read locks to all readers on the read wait list.
+       /*
+        * Try to grant read locks to all readers on the read wait list.
         * Note the 'active part' of the count is incremented by
         * the number of readers before waking any processes up.
         */
        adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
-       count = ldsem_atomic_update(adjust, sem);
+       count = atomic_long_add_return(adjust, &sem->count);
        do {
                if (count > 0)
                        break;
-               if (ldsem_cmpxchg(&count, count - adjust, sem))
+               if (atomic_long_try_cmpxchg(&sem->count, &count, count - adjust))
                        return;
        } while (1);
 
 
 static inline int writer_trylock(struct ld_semaphore *sem)
 {
-       /* only wake this writer if the active part of the count can be
+       /*
+        * Only wake this writer if the active part of the count can be
         * transitioned from 0 -> 1
         */
-       long count = ldsem_atomic_update(LDSEM_ACTIVE_BIAS, sem);
+       long count = atomic_long_add_return(LDSEM_ACTIVE_BIAS, &sem->count);
        do {
                if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS)
                        return 1;
-               if (ldsem_cmpxchg(&count, count - LDSEM_ACTIVE_BIAS, sem))
+               if (atomic_long_try_cmpxchg(&sem->count, &count, count - LDSEM_ACTIVE_BIAS))
                        return 0;
        } while (1);
 }
        /* set up my own style of waitqueue */
        raw_spin_lock_irq(&sem->wait_lock);
 
-       /* Try to reverse the lock attempt but if the count has changed
+       /*
+        * Try to reverse the lock attempt but if the count has changed
         * so that reversing fails, check if there are are no waiters,
-        * and early-out if not */
+        * and early-out if not
+        */
        do {
-               if (ldsem_cmpxchg(&count, count + adjust, sem))
+               if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust)) {
+                       count += adjust;
                        break;
+               }
                if (count > 0) {
                        raw_spin_unlock_irq(&sem->wait_lock);
                        return sem;
        __set_current_state(TASK_RUNNING);
 
        if (!timeout) {
-               /* lock timed out but check if this task was just
+               /*
+                * Lock timed out but check if this task was just
                 * granted lock ownership - if so, pretend there
-                * was no timeout; otherwise, cleanup lock wait */
+                * was no timeout; otherwise, cleanup lock wait.
+                */
                raw_spin_lock_irq(&sem->wait_lock);
                if (waiter.task) {
-                       ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
+                       atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
                        list_del(&waiter.list);
                        raw_spin_unlock_irq(&sem->wait_lock);
                        put_task_struct(waiter.task);
        /* set up my own style of waitqueue */
        raw_spin_lock_irq(&sem->wait_lock);
 
-       /* Try to reverse the lock attempt but if the count has changed
+       /*
+        * Try to reverse the lock attempt but if the count has changed
         * so that reversing fails, check if the lock is now owned,
-        * and early-out if so */
+        * and early-out if so.
+        */
        do {
-               if (ldsem_cmpxchg(&count, count + adjust, sem))
+               if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust))
                        break;
                if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) {
                        raw_spin_unlock_irq(&sem->wait_lock);
        }
 
        if (!locked)
-               ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
+               atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
        list_del(&waiter.list);
        raw_spin_unlock_irq(&sem->wait_lock);
 
 
        lockdep_acquire_read(sem, subclass, 0, _RET_IP_);
 
-       count = ldsem_atomic_update(LDSEM_READ_BIAS, sem);
+       count = atomic_long_add_return(LDSEM_READ_BIAS, &sem->count);
        if (count <= 0) {
                lock_stat(sem, contended);
                if (!down_read_failed(sem, count, timeout)) {
 
        lockdep_acquire(sem, subclass, 0, _RET_IP_);
 
-       count = ldsem_atomic_update(LDSEM_WRITE_BIAS, sem);
+       count = atomic_long_add_return(LDSEM_WRITE_BIAS, &sem->count);
        if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
                lock_stat(sem, contended);
                if (!down_write_failed(sem, count, timeout)) {
  */
 int ldsem_down_read_trylock(struct ld_semaphore *sem)
 {
-       long count = sem->count;
+       long count = atomic_long_read(&sem->count);
 
        while (count >= 0) {
-               if (ldsem_cmpxchg(&count, count + LDSEM_READ_BIAS, sem)) {
+               if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_READ_BIAS)) {
                        lockdep_acquire_read(sem, 0, 1, _RET_IP_);
                        lock_stat(sem, acquired);
                        return 1;
  */
 int ldsem_down_write_trylock(struct ld_semaphore *sem)
 {
-       long count = sem->count;
+       long count = atomic_long_read(&sem->count);
 
        while ((count & LDSEM_ACTIVE_MASK) == 0) {
-               if (ldsem_cmpxchg(&count, count + LDSEM_WRITE_BIAS, sem)) {
+               if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_WRITE_BIAS)) {
                        lockdep_acquire(sem, 0, 1, _RET_IP_);
                        lock_stat(sem, acquired);
                        return 1;
 
        lockdep_release(sem, 1, _RET_IP_);
 
-       count = ldsem_atomic_update(-LDSEM_READ_BIAS, sem);
+       count = atomic_long_add_return(-LDSEM_READ_BIAS, &sem->count);
        if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
                ldsem_wake(sem);
 }
 
        lockdep_release(sem, 1, _RET_IP_);
 
-       count = ldsem_atomic_update(-LDSEM_WRITE_BIAS, sem);
+       count = atomic_long_add_return(-LDSEM_WRITE_BIAS, &sem->count);
        if (count < 0)
                ldsem_wake(sem);
 }