struct completion qgroup_rescan_completion;
        struct btrfs_work qgroup_rescan_work;
        bool qgroup_rescan_running;     /* protected by qgroup_rescan_lock */
+       u8 qgroup_drop_subtree_thres;
 
        /* filesystem state */
        unsigned long fs_state;
 
        fs_info->qgroup_seq = 1;
        fs_info->qgroup_ulist = NULL;
        fs_info->qgroup_rescan_running = false;
+       fs_info->qgroup_drop_subtree_thres = BTRFS_MAX_LEVEL;
        mutex_init(&fs_info->qgroup_rescan_lock);
 }
 
 
        quota_root = fs_info->quota_root;
        fs_info->quota_root = NULL;
        fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
+       fs_info->qgroup_drop_subtree_thres = BTRFS_MAX_LEVEL;
        spin_unlock(&fs_info->qgroup_lock);
 
        btrfs_free_qgroup_config(fs_info);
        struct btrfs_fs_info *fs_info = trans->fs_info;
        int ret = 0;
        int level;
+       u8 drop_subptree_thres;
        struct extent_buffer *eb = root_eb;
        struct btrfs_path *path = NULL;
 
        if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
                return 0;
 
+       spin_lock(&fs_info->qgroup_lock);
+       drop_subptree_thres = fs_info->qgroup_drop_subtree_thres;
+       spin_unlock(&fs_info->qgroup_lock);
+
+       /*
+        * This function only gets called for snapshot drop, if we hit a high
+        * node here, it means we are going to change ownership for quite a lot
+        * of extents, which will greatly slow down btrfs_commit_transaction().
+        *
+        * So here if we find a high tree here, we just skip the accounting and
+        * mark qgroup inconsistent.
+        */
+       if (root_level >= drop_subptree_thres) {
+               qgroup_mark_inconsistent(fs_info);
+               return 0;
+       }
+
        if (!extent_buffer_uptodate(root_eb)) {
                ret = btrfs_read_extent_buffer(root_eb, root_gen, root_level, NULL);
                if (ret)
 
 }
 BTRFS_ATTR(qgroups, inconsistent, qgroup_inconsistent_show);
 
+static ssize_t qgroup_drop_subtree_thres_show(struct kobject *qgroups_kobj,
+                                             struct kobj_attribute *a,
+                                             char *buf)
+{
+       struct btrfs_fs_info *fs_info = to_fs_info(qgroups_kobj->parent);
+       u8 result;
+
+       spin_lock(&fs_info->qgroup_lock);
+       result = fs_info->qgroup_drop_subtree_thres;
+       spin_unlock(&fs_info->qgroup_lock);
+
+       return sysfs_emit(buf, "%d\n", result);
+}
+
+static ssize_t qgroup_drop_subtree_thres_store(struct kobject *qgroups_kobj,
+                                              struct kobj_attribute *a,
+                                              const char *buf, size_t len)
+{
+       struct btrfs_fs_info *fs_info = to_fs_info(qgroups_kobj->parent);
+       u8 new_thres;
+       int ret;
+
+       ret = kstrtou8(buf, 10, &new_thres);
+       if (ret)
+               return -EINVAL;
+
+       if (new_thres > BTRFS_MAX_LEVEL)
+               return -EINVAL;
+
+       spin_lock(&fs_info->qgroup_lock);
+       fs_info->qgroup_drop_subtree_thres = new_thres;
+       spin_unlock(&fs_info->qgroup_lock);
+
+       return len;
+}
+BTRFS_ATTR_RW(qgroups, drop_subtree_threshold, qgroup_drop_subtree_thres_show,
+             qgroup_drop_subtree_thres_store);
+
 /*
  * Qgroups global info
  *
 static struct attribute *qgroups_attrs[] = {
        BTRFS_ATTR_PTR(qgroups, enabled),
        BTRFS_ATTR_PTR(qgroups, inconsistent),
+       BTRFS_ATTR_PTR(qgroups, drop_subtree_threshold),
        NULL
 };
 ATTRIBUTE_GROUPS(qgroups);