return NULL;
 }
 
-/* must be called with qgroup_lock held */
+/*
+ * Add qgroup to the filesystem's qgroup tree.
+ *
+ * Must be called with qgroup_lock held and @prealloc preallocated.
+ *
+ * The control on the lifespan of @prealloc would be transfered to this
+ * function, thus caller should no longer touch @prealloc.
+ */
 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
+                                         struct btrfs_qgroup *prealloc,
                                          u64 qgroupid)
 {
        struct rb_node **p = &fs_info->qgroup_tree.rb_node;
        struct rb_node *parent = NULL;
        struct btrfs_qgroup *qgroup;
 
+       /* Caller must have pre-allocated @prealloc. */
+       ASSERT(prealloc);
+
        while (*p) {
                parent = *p;
                qgroup = rb_entry(parent, struct btrfs_qgroup, node);
 
-               if (qgroup->qgroupid < qgroupid)
+               if (qgroup->qgroupid < qgroupid) {
                        p = &(*p)->rb_left;
-               else if (qgroup->qgroupid > qgroupid)
+               } else if (qgroup->qgroupid > qgroupid) {
                        p = &(*p)->rb_right;
-               else
+               } else {
+                       kfree(prealloc);
                        return qgroup;
+               }
        }
 
-       qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
-       if (!qgroup)
-               return ERR_PTR(-ENOMEM);
-
+       qgroup = prealloc;
        qgroup->qgroupid = qgroupid;
        INIT_LIST_HEAD(&qgroup->groups);
        INIT_LIST_HEAD(&qgroup->members);
                        qgroup_mark_inconsistent(fs_info);
                }
                if (!qgroup) {
-                       qgroup = add_qgroup_rb(fs_info, found_key.offset);
-                       if (IS_ERR(qgroup)) {
-                               ret = PTR_ERR(qgroup);
+                       struct btrfs_qgroup *prealloc;
+
+                       prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
+                       if (!prealloc) {
+                               ret = -ENOMEM;
                                goto out;
                        }
+                       qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
                }
                ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
                if (ret < 0)
        struct btrfs_key key;
        struct btrfs_key found_key;
        struct btrfs_qgroup *qgroup = NULL;
+       struct btrfs_qgroup *prealloc = NULL;
        struct btrfs_trans_handle *trans = NULL;
        struct ulist *ulist = NULL;
        int ret = 0;
                        /* Release locks on tree_root before we access quota_root */
                        btrfs_release_path(path);
 
+                       /* We should not have a stray @prealloc pointer. */
+                       ASSERT(prealloc == NULL);
+                       prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
+                       if (!prealloc) {
+                               ret = -ENOMEM;
+                               btrfs_abort_transaction(trans, ret);
+                               goto out_free_path;
+                       }
+
                        ret = add_qgroup_item(trans, quota_root,
                                              found_key.offset);
                        if (ret) {
                                goto out_free_path;
                        }
 
-                       qgroup = add_qgroup_rb(fs_info, found_key.offset);
+                       qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
+                       prealloc = NULL;
                        if (IS_ERR(qgroup)) {
                                ret = PTR_ERR(qgroup);
                                btrfs_abort_transaction(trans, ret);
                goto out_free_path;
        }
 
-       qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
-       if (IS_ERR(qgroup)) {
-               ret = PTR_ERR(qgroup);
-               btrfs_abort_transaction(trans, ret);
+       ASSERT(prealloc == NULL);
+       prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
+       if (!prealloc) {
+               ret = -ENOMEM;
                goto out_free_path;
        }
+       qgroup = add_qgroup_rb(fs_info, prealloc, BTRFS_FS_TREE_OBJECTID);
+       prealloc = NULL;
        ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
        if (ret < 0) {
                btrfs_abort_transaction(trans, ret);
        else if (trans)
                ret = btrfs_end_transaction(trans);
        ulist_free(ulist);
+       kfree(prealloc);
        return ret;
 }
 
        struct btrfs_fs_info *fs_info = trans->fs_info;
        struct btrfs_root *quota_root;
        struct btrfs_qgroup *qgroup;
+       struct btrfs_qgroup *prealloc = NULL;
        int ret = 0;
 
        mutex_lock(&fs_info->qgroup_ioctl_lock);
                goto out;
        }
 
+       prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
+       if (!prealloc) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
        ret = add_qgroup_item(trans, quota_root, qgroupid);
        if (ret)
                goto out;
 
        spin_lock(&fs_info->qgroup_lock);
-       qgroup = add_qgroup_rb(fs_info, qgroupid);
+       qgroup = add_qgroup_rb(fs_info, prealloc, qgroupid);
        spin_unlock(&fs_info->qgroup_lock);
+       prealloc = NULL;
 
-       if (IS_ERR(qgroup)) {
-               ret = PTR_ERR(qgroup);
-               goto out;
-       }
        ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
 out:
        mutex_unlock(&fs_info->qgroup_ioctl_lock);
+       kfree(prealloc);
        return ret;
 }
 
        struct btrfs_root *quota_root;
        struct btrfs_qgroup *srcgroup;
        struct btrfs_qgroup *dstgroup;
+       struct btrfs_qgroup *prealloc;
        bool need_rescan = false;
        u32 level_size = 0;
        u64 nums;
 
+       prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
+       if (!prealloc)
+               return -ENOMEM;
+
        /*
         * There are only two callers of this function.
         *
 
        spin_lock(&fs_info->qgroup_lock);
 
-       dstgroup = add_qgroup_rb(fs_info, objectid);
-       if (IS_ERR(dstgroup)) {
-               ret = PTR_ERR(dstgroup);
-               goto unlock;
-       }
+       dstgroup = add_qgroup_rb(fs_info, prealloc, objectid);
+       prealloc = NULL;
 
        if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
                dstgroup->lim_flags = inherit->lim.flags;
                mutex_unlock(&fs_info->qgroup_ioctl_lock);
        if (need_rescan)
                qgroup_mark_inconsistent(fs_info);
+       kfree(prealloc);
        return ret;
 }