{
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *info;
-       int ret = 0;
+       int ret;
+       bool re_search = false;
 
        spin_lock(&ctl->tree_lock);
 
 again:
+       ret = 0;
        if (!bytes)
                goto out_lock;
 
                info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
                                          1, 0);
                if (!info) {
-                       /* the tree logging code might be calling us before we
-                        * have fully loaded the free space rbtree for this
-                        * block group.  So it is possible the entry won't
-                        * be in the rbtree yet at all.  The caching code
-                        * will make sure not to put it in the rbtree if
-                        * the logging code has pinned it.
+                       /*
+                        * If we found a partial bit of our free space in a
+                        * bitmap but then couldn't find the other part this may
+                        * be a problem, so WARN about it.
                         */
+                       WARN_ON(re_search);
                        goto out_lock;
                }
        }
 
+       re_search = false;
        if (!info->bitmap) {
                unlink_free_space(ctl, info);
                if (offset == info->offset) {
        }
 
        ret = remove_from_bitmap(ctl, info, &offset, &bytes);
-       if (ret == -EAGAIN)
+       if (ret == -EAGAIN) {
+               re_search = true;
                goto again;
+       }
        BUG_ON(ret); /* logic error */
 out_lock:
        spin_unlock(&ctl->tree_lock);