*/
 void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
 {
-       if (eb->lock_nested) {
-               read_lock(&eb->lock);
-               if (eb->lock_nested && current->pid == eb->lock_owner) {
-                       read_unlock(&eb->lock);
-                       return;
-               }
-               read_unlock(&eb->lock);
-       }
+       /*
+        * no lock is required.  The lock owner may change if
+        * we have a read lock, but it won't change to or away
+        * from us.  If we have the write lock, we are the owner
+        * and it'll never change.
+        */
+       if (eb->lock_nested && current->pid == eb->lock_owner)
+               return;
        if (rw == BTRFS_WRITE_LOCK) {
                if (atomic_read(&eb->blocking_writers) == 0) {
                        WARN_ON(atomic_read(&eb->spinning_writers) != 1);
  */
 void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
 {
-       if (eb->lock_nested) {
-               read_lock(&eb->lock);
-               if (eb->lock_nested && current->pid == eb->lock_owner) {
-                       read_unlock(&eb->lock);
-                       return;
-               }
-               read_unlock(&eb->lock);
-       }
+       /*
+        * no lock is required.  The lock owner may change if
+        * we have a read lock, but it won't change to or away
+        * from us.  If we have the write lock, we are the owner
+        * and it'll never change.
+        */
+       if (eb->lock_nested && current->pid == eb->lock_owner)
+               return;
+
        if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
                BUG_ON(atomic_read(&eb->blocking_writers) != 1);
                write_lock(&eb->lock);
 void btrfs_tree_read_lock(struct extent_buffer *eb)
 {
 again:
+       BUG_ON(!atomic_read(&eb->blocking_writers) &&
+              current->pid == eb->lock_owner);
+
        read_lock(&eb->lock);
        if (atomic_read(&eb->blocking_writers) &&
            current->pid == eb->lock_owner) {
        if (atomic_read(&eb->blocking_writers))
                return 0;
 
-       read_lock(&eb->lock);
+       if (!read_trylock(&eb->lock))
+               return 0;
+
        if (atomic_read(&eb->blocking_writers)) {
                read_unlock(&eb->lock);
                return 0;
        if (atomic_read(&eb->blocking_writers) ||
            atomic_read(&eb->blocking_readers))
                return 0;
-       write_lock(&eb->lock);
+
+       if (!write_trylock(&eb->lock))
+               return 0;
+
        if (atomic_read(&eb->blocking_writers) ||
            atomic_read(&eb->blocking_readers)) {
                write_unlock(&eb->lock);
  */
 void btrfs_tree_read_unlock(struct extent_buffer *eb)
 {
-       if (eb->lock_nested) {
-               read_lock(&eb->lock);
-               if (eb->lock_nested && current->pid == eb->lock_owner) {
-                       eb->lock_nested = 0;
-                       read_unlock(&eb->lock);
-                       return;
-               }
-               read_unlock(&eb->lock);
+       /*
+        * if we're nested, we have the write lock.  No new locking
+        * is needed as long as we are the lock owner.
+        * The write unlock will do a barrier for us, and the lock_nested
+        * field only matters to the lock owner.
+        */
+       if (eb->lock_nested && current->pid == eb->lock_owner) {
+               eb->lock_nested = 0;
+               return;
        }
        btrfs_assert_tree_read_locked(eb);
        WARN_ON(atomic_read(&eb->spinning_readers) == 0);
  */
 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
 {
-       if (eb->lock_nested) {
-               read_lock(&eb->lock);
-               if (eb->lock_nested && current->pid == eb->lock_owner) {
-                       eb->lock_nested = 0;
-                       read_unlock(&eb->lock);
-                       return;
-               }
-               read_unlock(&eb->lock);
+       /*
+        * if we're nested, we have the write lock.  No new locking
+        * is needed as long as we are the lock owner.
+        * The write unlock will do a barrier for us, and the lock_nested
+        * field only matters to the lock owner.
+        */
+       if (eb->lock_nested && current->pid == eb->lock_owner) {
+               eb->lock_nested = 0;
+               return;
        }
        btrfs_assert_tree_read_locked(eb);
        WARN_ON(atomic_read(&eb->blocking_readers) == 0);
        BUG_ON(blockers > 1);
 
        btrfs_assert_tree_locked(eb);
+       eb->lock_owner = 0;
        atomic_dec(&eb->write_locks);
 
        if (blockers) {