return (block_group->start + block_group->length);
 }
 
+static inline bool btrfs_is_block_group_data_only(
+                                       struct btrfs_block_group *block_group)
+{
+       /*
+        * In mixed mode the fragmentation is expected to be high, lowering the
+        * efficiency, so only proper data block groups are considered.
+        */
+       return (block_group->flags & BTRFS_BLOCK_GROUP_DATA) &&
+              !(block_group->flags & BTRFS_BLOCK_GROUP_METADATA);
+}
+
 #ifdef CONFIG_BTRFS_DEBUG
 static inline int btrfs_should_fragment_free_space(
                struct btrfs_block_group *block_group)
 
 static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
                                struct btrfs_block_group *block_group)
 {
+       if (!btrfs_is_block_group_data_only(block_group))
+               return;
+
        spin_lock(&discard_ctl->lock);
        __add_to_discard_list(discard_ctl, block_group);
        spin_unlock(&discard_ctl->lock);
        if (block_group && now > block_group->discard_eligible_time) {
                if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED &&
                    block_group->used != 0) {
-                       __add_to_discard_list(discard_ctl, block_group);
+                       if (btrfs_is_block_group_data_only(block_group))
+                               __add_to_discard_list(discard_ctl, block_group);
+                       else
+                               list_del_init(&block_group->discard_list);
                        goto again;
                }
                if (block_group->discard_state == BTRFS_DISCARD_RESET_CURSOR) {
        s32 extents_delta;
        s64 bytes_delta;
 
-       if (!block_group || !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
+       if (!block_group ||
+           !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC) ||
+           !btrfs_is_block_group_data_only(block_group))
                return;
 
        discard_ctl = &block_group->fs_info->discard_ctl;