{
        int i;
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-       /* lockdep really cares that we take all of these spinlocks
-        * in the right order.  If any of the locks in the path are not
-        * currently blocking, it is going to complain.  So, make really
-        * really sure by forcing the path to blocking before we clear
-        * the path blocking.
-        */
        if (held) {
                btrfs_set_lock_blocking_rw(held, held_rw);
                if (held_rw == BTRFS_WRITE_LOCK)
                        held_rw = BTRFS_READ_LOCK_BLOCKING;
        }
        btrfs_set_path_blocking(p);
-#endif
 
        for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
                if (p->nodes[i] && p->locks[i]) {
                }
        }
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
        if (held)
                btrfs_clear_lock_blocking_rw(held, held_rw);
-#endif
 }
 
 /* this also releases the path */
                                        }
                                        p->locks[level] = BTRFS_WRITE_LOCK;
                                } else {
-                                       err = btrfs_try_tree_read_lock(b);
+                                       err = btrfs_tree_read_lock_atomic(b);
                                        if (!err) {
                                                btrfs_set_path_blocking(p);
                                                btrfs_tree_read_lock(b);
                        }
 
                        level = btrfs_header_level(b);
-                       err = btrfs_try_tree_read_lock(b);
+                       err = btrfs_tree_read_lock_atomic(b);
                        if (!err) {
                                btrfs_set_path_blocking(p);
                                btrfs_tree_read_lock(b);
 
        atomic_inc(&eb->spinning_readers);
 }
 
+/*
+ * take a spinning read lock.
+ * returns 1 if we get the read lock and 0 if we don't
+ * this won't wait for blocking writers
+ */
+int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
+{
+       if (atomic_read(&eb->blocking_writers))
+               return 0;
+
+       read_lock(&eb->lock);
+       if (atomic_read(&eb->blocking_writers)) {
+               read_unlock(&eb->lock);
+               return 0;
+       }
+       atomic_inc(&eb->read_locks);
+       atomic_inc(&eb->spinning_readers);
+       return 1;
+}
+
 /*
  * returns 1 if we get the read lock and 0 if we don't
  * this won't wait for blocking writers
            atomic_read(&eb->blocking_readers))
                return 0;
 
-       if (!write_trylock(&eb->lock))
-               return 0;
-
+       write_lock(&eb->lock);
        if (atomic_read(&eb->blocking_writers) ||
            atomic_read(&eb->blocking_readers)) {
                write_unlock(&eb->lock);
 
 void btrfs_assert_tree_locked(struct extent_buffer *eb);
 int btrfs_try_tree_read_lock(struct extent_buffer *eb);
 int btrfs_try_tree_write_lock(struct extent_buffer *eb);
+int btrfs_tree_read_lock_atomic(struct extent_buffer *eb);
+
 
 static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
 {