node->num_bytes);
                        }
                }
+
+               /* Also free its reserved qgroup space */
+               btrfs_qgroup_free_delayed_ref(root->fs_info,
+                                             head->qgroup_ref_root,
+                                             head->qgroup_reserved);
                return ret;
        }
 
 
        ret = btrfs_alloc_reserved_file_extent(trans, root,
                                        root->root_key.objectid,
                                        btrfs_ino(inode), file_pos, &ins);
+       if (ret < 0)
+               goto out;
+       /*
+        * Release the reserved range from inode dirty range map, and
+        * move it to delayed ref codes, as now accounting only happens at
+        * commit_transaction() time.
+        */
+       btrfs_qgroup_release_data(inode, file_pos, ram_bytes);
+       ret = btrfs_add_delayed_qgroup_reserve(root->fs_info, trans,
+                       root->objectid, disk_bytenr, ram_bytes);
 out:
        btrfs_free_path(path);
 
 
        return ret;
 }
 
-void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
+void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
+                              u64 ref_root, u64 num_bytes)
 {
        struct btrfs_root *quota_root;
        struct btrfs_qgroup *qgroup;
-       struct btrfs_fs_info *fs_info = root->fs_info;
        struct ulist_node *unode;
        struct ulist_iterator uiter;
-       u64 ref_root = root->root_key.objectid;
        int ret = 0;
 
        if (!is_fstree(ref_root))
 
                         struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
                         struct btrfs_qgroup_inherit *inherit);
 int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes);
-void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes);
+void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
+                              u64 ref_root, u64 num_bytes);
+static inline void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
+{
+       return btrfs_qgroup_free_refroot(root->fs_info, root->objectid,
+                                        num_bytes);
+}
+
+/*
+ * TODO: Add proper trace point for it, as btrfs_qgroup_free() is
+ * called by everywhere, can't provide good trace for delayed ref case.
+ */
+static inline void btrfs_qgroup_free_delayed_ref(struct btrfs_fs_info *fs_info,
+                                                u64 ref_root, u64 num_bytes)
+{
+       btrfs_qgroup_free_refroot(fs_info, ref_root, num_bytes);
+}
 
 void assert_qgroups_uptodate(struct btrfs_trans_handle *trans);