return NULL;
 }
 
-bool btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
-                           struct btrfs_delayed_ref_head *head)
+static bool btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
+                                  struct btrfs_delayed_ref_head *head)
 {
        lockdep_assert_held(&delayed_refs->lock);
        if (mutex_trylock(&head->mutex))
                struct btrfs_delayed_ref_root *delayed_refs)
 {
        struct btrfs_delayed_ref_head *head;
+       bool locked;
 
-       lockdep_assert_held(&delayed_refs->lock);
+       spin_lock(&delayed_refs->lock);
 again:
        head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
                             true);
                delayed_refs->run_delayed_start = 0;
                head = find_first_ref_head(delayed_refs);
        }
-       if (!head)
+       if (!head) {
+               spin_unlock(&delayed_refs->lock);
                return NULL;
+       }
 
        while (head->processing) {
                struct rb_node *node;
 
                node = rb_next(&head->href_node);
                if (!node) {
-                       if (delayed_refs->run_delayed_start == 0)
+                       if (delayed_refs->run_delayed_start == 0) {
+                               spin_unlock(&delayed_refs->lock);
                                return NULL;
+                       }
                        delayed_refs->run_delayed_start = 0;
                        goto again;
                }
        delayed_refs->num_heads_ready--;
        delayed_refs->run_delayed_start = head->bytenr +
                head->num_bytes;
+
+       locked = btrfs_delayed_ref_lock(delayed_refs, head);
+       spin_unlock(&delayed_refs->lock);
+
+       /*
+        * We may have dropped the spin lock to get the head mutex lock, and
+        * that might have given someone else time to free the head.  If that's
+        * true, it has been removed from our list and we can move on.
+        */
+       if (!locked)
+               return ERR_PTR(-EAGAIN);
+
        return head;
 }
 
 
 struct btrfs_delayed_ref_head *
 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
                            u64 bytenr);
-bool btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
-                           struct btrfs_delayed_ref_head *head);
 static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
 {
        mutex_unlock(&head->mutex);
 
        return ret;
 }
 
-static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
-                                       struct btrfs_trans_handle *trans)
-{
-       struct btrfs_delayed_ref_root *delayed_refs =
-               &trans->transaction->delayed_refs;
-       struct btrfs_delayed_ref_head *head = NULL;
-       bool locked;
-
-       spin_lock(&delayed_refs->lock);
-       head = btrfs_select_ref_head(delayed_refs);
-       if (!head) {
-               spin_unlock(&delayed_refs->lock);
-               return head;
-       }
-
-       /*
-        * Grab the lock that says we are going to process all the refs for
-        * this head
-        */
-       locked = btrfs_delayed_ref_lock(delayed_refs, head);
-       spin_unlock(&delayed_refs->lock);
-
-       /*
-        * We may have dropped the spin lock to get the head mutex lock, and
-        * that might have given someone else time to free the head.  If that's
-        * true, it has been removed from our list and we can move on.
-        */
-       if (!locked)
-               head = ERR_PTR(-EAGAIN);
-
-       return head;
-}
-
 static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
                                           struct btrfs_delayed_ref_head *locked_ref,
                                           u64 *bytes_released)
 
        do {
                if (!locked_ref) {
-                       locked_ref = btrfs_obtain_ref_head(trans);
+                       locked_ref = btrfs_select_ref_head(delayed_refs);
                        if (IS_ERR_OR_NULL(locked_ref)) {
                                if (PTR_ERR(locked_ref) == -EAGAIN) {
                                        continue;