return head;
 }
 
+void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+                          struct btrfs_delayed_ref_head *head)
+{
+       lockdep_assert_held(&delayed_refs->lock);
+       lockdep_assert_held(&head->lock);
+
+       rb_erase_cached(&head->href_node, &delayed_refs->href_root);
+       RB_CLEAR_NODE(&head->href_node);
+       atomic_dec(&delayed_refs->num_entries);
+       delayed_refs->num_heads--;
+       if (head->processing == 0)
+               delayed_refs->num_heads_ready--;
+}
+
 /*
  * Helper to insert the ref_node to the tail or merge with tail.
  *
 
 {
        mutex_unlock(&head->mutex);
 }
-
+void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+                          struct btrfs_delayed_ref_head *head);
 
 struct btrfs_delayed_ref_head *btrfs_select_ref_head(
                struct btrfs_delayed_ref_root *delayed_refs);
 
                spin_unlock(&delayed_refs->lock);
                return 1;
        }
-       delayed_refs->num_heads--;
-       rb_erase_cached(&head->href_node, &delayed_refs->href_root);
-       RB_CLEAR_NODE(&head->href_node);
+       btrfs_delete_ref_head(delayed_refs, head);
        spin_unlock(&head->lock);
        spin_unlock(&delayed_refs->lock);
-       atomic_dec(&delayed_refs->num_entries);
 
        trace_run_delayed_ref_head(fs_info, head, 0);
 
        if (!mutex_trylock(&head->mutex))
                goto out;
 
-       /*
-        * at this point we have a head with no other entries.  Go
-        * ahead and process it.
-        */
-       rb_erase_cached(&head->href_node, &delayed_refs->href_root);
-       RB_CLEAR_NODE(&head->href_node);
-       atomic_dec(&delayed_refs->num_entries);
-
-       /*
-        * we don't take a ref on the node because we're removing it from the
-        * tree, so we just steal the ref the tree was holding.
-        */
-       delayed_refs->num_heads--;
-       if (head->processing == 0)
-               delayed_refs->num_heads_ready--;
+       btrfs_delete_ref_head(delayed_refs, head);
        head->processing = 0;
+
        spin_unlock(&head->lock);
        spin_unlock(&delayed_refs->lock);