return NULL;
 }
 
+/* insert a new ref to head ref rbtree */
+static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
+                                                  struct rb_node *node)
+{
+       struct rb_node **p = &root->rb_node;
+       struct rb_node *parent_node = NULL;
+       struct btrfs_delayed_ref_head *entry;
+       struct btrfs_delayed_ref_head *ins;
+       u64 bytenr;
+
+       ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
+       bytenr = ins->node.bytenr;
+       while (*p) {
+               parent_node = *p;
+               entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
+                                href_node);
+
+               if (bytenr < entry->node.bytenr)
+                       p = &(*p)->rb_left;
+               else if (bytenr > entry->node.bytenr)
+                       p = &(*p)->rb_right;
+               else
+                       return entry;
+       }
+
+       rb_link_node(node, parent_node, p);
+       rb_insert_color(node, root);
+       return NULL;
+}
+
 /*
  * find an head entry based on bytenr. This returns the delayed ref
  * head if it was able to find one, or NULL if nothing was in that spot.
  * If return_bigger is given, the next bigger entry is returned if no exact
  * match is found.
  */
-static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
-                                 u64 bytenr,
-                                 struct btrfs_delayed_ref_node **last,
-                                 int return_bigger)
+static struct btrfs_delayed_ref_head *
+find_ref_head(struct rb_root *root, u64 bytenr,
+             struct btrfs_delayed_ref_head **last, int return_bigger)
 {
        struct rb_node *n;
-       struct btrfs_delayed_ref_node *entry;
+       struct btrfs_delayed_ref_head *entry;
        int cmp = 0;
 
 again:
        n = root->rb_node;
        entry = NULL;
        while (n) {
-               entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
-               WARN_ON(!entry->in_tree);
+               entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
                if (last)
                        *last = entry;
 
-               if (bytenr < entry->bytenr)
+               if (bytenr < entry->node.bytenr)
                        cmp = -1;
-               else if (bytenr > entry->bytenr)
-                       cmp = 1;
-               else if (!btrfs_delayed_ref_is_head(entry))
+               else if (bytenr > entry->node.bytenr)
                        cmp = 1;
                else
                        cmp = 0;
        }
        if (entry && return_bigger) {
                if (cmp > 0) {
-                       n = rb_next(&entry->rb_node);
+                       n = rb_next(&entry->href_node);
                        if (!n)
                                n = rb_first(root);
-                       entry = rb_entry(n, struct btrfs_delayed_ref_node,
-                                        rb_node);
-                       bytenr = entry->bytenr;
+                       entry = rb_entry(n, struct btrfs_delayed_ref_head,
+                                        href_node);
+                       bytenr = entry->node.bytenr;
                        return_bigger = 0;
                        goto again;
                }
                                    struct btrfs_delayed_ref_node *ref)
 {
        rb_erase(&ref->rb_node, &delayed_refs->root);
+       if (btrfs_delayed_ref_is_head(ref)) {
+               struct btrfs_delayed_ref_head *head;
+
+               head = btrfs_delayed_node_to_head(ref);
+               rb_erase(&head->href_node, &delayed_refs->href_root);
+       }
        ref->in_tree = 0;
        btrfs_put_delayed_ref(ref);
        delayed_refs->num_entries--;
        int count = 0;
        struct btrfs_delayed_ref_root *delayed_refs;
        struct rb_node *node;
-       struct btrfs_delayed_ref_node *ref;
-       struct btrfs_delayed_ref_head *head;
+       struct btrfs_delayed_ref_head *head = NULL;
 
        delayed_refs = &trans->transaction->delayed_refs;
-       if (start == 0) {
-               node = rb_first(&delayed_refs->root);
-       } else {
-               ref = NULL;
-               find_ref_head(&delayed_refs->root, start + 1, &ref, 1);
-               if (ref) {
-                       node = &ref->rb_node;
-               } else
-                       node = rb_first(&delayed_refs->root);
+       node = rb_first(&delayed_refs->href_root);
+
+       if (start) {
+               find_ref_head(&delayed_refs->href_root, start + 1, &head, 1);
+               if (head)
+                       node = &head->href_node;
        }
 again:
        while (node && count < 32) {
-               ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
-               if (btrfs_delayed_ref_is_head(ref)) {
-                       head = btrfs_delayed_node_to_head(ref);
-                       if (list_empty(&head->cluster)) {
-                               list_add_tail(&head->cluster, cluster);
-                               delayed_refs->run_delayed_start =
-                                       head->node.bytenr;
-                               count++;
-
-                               WARN_ON(delayed_refs->num_heads_ready == 0);
-                               delayed_refs->num_heads_ready--;
-                       } else if (count) {
-                               /* the goal of the clustering is to find extents
-                                * that are likely to end up in the same extent
-                                * leaf on disk.  So, we don't want them spread
-                                * all over the tree.  Stop now if we've hit
-                                * a head that was already in use
-                                */
-                               break;
-                       }
+               head = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
+               if (list_empty(&head->cluster)) {
+                       list_add_tail(&head->cluster, cluster);
+                       delayed_refs->run_delayed_start =
+                               head->node.bytenr;
+                       count++;
+
+                       WARN_ON(delayed_refs->num_heads_ready == 0);
+                       delayed_refs->num_heads_ready--;
+               } else if (count) {
+                       /* the goal of the clustering is to find extents
+                        * that are likely to end up in the same extent
+                        * leaf on disk.  So, we don't want them spread
+                        * all over the tree.  Stop now if we've hit
+                        * a head that was already in use
+                        */
+                       break;
                }
                node = rb_next(node);
        }
                 * clusters.  start from the beginning and try again
                 */
                start = 0;
-               node = rb_first(&delayed_refs->root);
+               node = rb_first(&delayed_refs->href_root);
                goto again;
        }
        return 1;
                 */
                kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
        } else {
+               htree_insert(&delayed_refs->href_root, &head_ref->href_node);
                delayed_refs->num_heads++;
                delayed_refs->num_heads_ready++;
                delayed_refs->num_entries++;
 struct btrfs_delayed_ref_head *
 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
 {
-       struct btrfs_delayed_ref_node *ref;
        struct btrfs_delayed_ref_root *delayed_refs;
 
        delayed_refs = &trans->transaction->delayed_refs;
-       ref = find_ref_head(&delayed_refs->root, bytenr, NULL, 0);
-       if (ref)
-               return btrfs_delayed_node_to_head(ref);
-       return NULL;
+       return find_ref_head(&delayed_refs->href_root, bytenr, NULL, 0);
 }
 
 void btrfs_delayed_ref_exit(void)
 
 
                ref->in_tree = 0;
                rb_erase(&ref->rb_node, &delayed_refs->root);
+               if (btrfs_delayed_ref_is_head(ref)) {
+                       rb_erase(&locked_ref->href_node,
+                                &delayed_refs->href_root);
+               }
                delayed_refs->num_entries--;
                if (!btrfs_delayed_ref_is_head(ref)) {
                        /*
 {
        struct rb_node *node;
        struct btrfs_delayed_ref_root *delayed_refs;
-       struct btrfs_delayed_ref_node *ref;
+       struct btrfs_delayed_ref_head *head;
        struct list_head cluster;
        int ret;
        u64 delayed_start;
                        spin_lock(&delayed_refs->lock);
                }
 
-               node = rb_first(&delayed_refs->root);
+               node = rb_first(&delayed_refs->href_root);
                if (!node)
                        goto out;
                count = (unsigned long)-1;
 
                while (node) {
-                       ref = rb_entry(node, struct btrfs_delayed_ref_node,
-                                      rb_node);
-                       if (btrfs_delayed_ref_is_head(ref)) {
-                               struct btrfs_delayed_ref_head *head;
+                       head = rb_entry(node, struct btrfs_delayed_ref_head,
+                                       href_node);
+                       if (btrfs_delayed_ref_is_head(&head->node)) {
+                               struct btrfs_delayed_ref_node *ref;
 
-                               head = btrfs_delayed_node_to_head(ref);
+                               ref = &head->node;
                                atomic_inc(&ref->refs);
 
                                spin_unlock(&delayed_refs->lock);
                                btrfs_put_delayed_ref(ref);
                                cond_resched();
                                goto again;
+                       } else {
+                               WARN_ON(1);
                        }
                        node = rb_next(node);
                }
         */
        head->node.in_tree = 0;
        rb_erase(&head->node.rb_node, &delayed_refs->root);
+       rb_erase(&head->href_node, &delayed_refs->href_root);
 
        delayed_refs->num_entries--;