btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
                           struct btrfs_delayed_ref_head *head);
+static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
+{
+       mutex_unlock(&head->mutex);
+}
+
 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
                           struct list_head *cluster, u64 search_start);
+void btrfs_release_ref_cluster(struct list_head *cluster);
 
 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
                            struct btrfs_delayed_ref_root *delayed_refs,
 
                                                      node->num_bytes);
                        }
                }
-               mutex_unlock(&head->mutex);
                return ret;
        }
 
                         * process of being added. Don't run this ref yet.
                         */
                        list_del_init(&locked_ref->cluster);
-                       mutex_unlock(&locked_ref->mutex);
+                       btrfs_delayed_ref_unlock(locked_ref);
                        locked_ref = NULL;
                        delayed_refs->num_heads_ready++;
                        spin_unlock(&delayed_refs->lock);
                                btrfs_free_delayed_extent_op(extent_op);
 
                                if (ret) {
-                                       list_del_init(&locked_ref->cluster);
-                                       mutex_unlock(&locked_ref->mutex);
-
-                                       printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
+                                       printk(KERN_DEBUG
+                                              "btrfs: run_delayed_extent_op "
+                                              "returned %d\n", ret);
                                        spin_lock(&delayed_refs->lock);
+                                       btrfs_delayed_ref_unlock(locked_ref);
                                        return ret;
                                }
 
                                goto next;
                        }
-
-                       list_del_init(&locked_ref->cluster);
-                       locked_ref = NULL;
                }
 
                ref->in_tree = 0;
                rb_erase(&ref->rb_node, &delayed_refs->root);
                delayed_refs->num_entries--;
-               if (locked_ref) {
+               if (!btrfs_delayed_ref_is_head(ref)) {
                        /*
                         * when we play the delayed ref, also correct the
                         * ref_mod on head
                ret = run_one_delayed_ref(trans, root, ref, extent_op,
                                          must_insert_reserved);
 
-               btrfs_put_delayed_ref(ref);
                btrfs_free_delayed_extent_op(extent_op);
-               count++;
-
                if (ret) {
-                       if (locked_ref) {
-                               list_del_init(&locked_ref->cluster);
-                               mutex_unlock(&locked_ref->mutex);
-                       }
-                       printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
+                       btrfs_delayed_ref_unlock(locked_ref);
+                       btrfs_put_delayed_ref(ref);
+                       printk(KERN_DEBUG
+                              "btrfs: run_one_delayed_ref returned %d\n", ret);
                        spin_lock(&delayed_refs->lock);
                        return ret;
                }
 
+               /*
+                * If this node is a head, that means all the refs in this head
+                * have been dealt with, and we will pick the next head to deal
+                * with, so we must unlock the head and drop it from the cluster
+                * list before we release it.
+                */
+               if (btrfs_delayed_ref_is_head(ref)) {
+                       list_del_init(&locked_ref->cluster);
+                       btrfs_delayed_ref_unlock(locked_ref);
+                       locked_ref = NULL;
+               }
+               btrfs_put_delayed_ref(ref);
+               count++;
 next:
                cond_resched();
                spin_lock(&delayed_refs->lock);
 
                ret = run_clustered_refs(trans, root, &cluster);
                if (ret < 0) {
+                       btrfs_release_ref_cluster(&cluster);
                        spin_unlock(&delayed_refs->lock);
                        btrfs_abort_transaction(trans, root, ret);
                        return ret;