]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
btrfs: destroy qgroup extent records on transaction abort
authorJeff Mahoney <jeffm@suse.com>
Tue, 11 Feb 2020 07:25:37 +0000 (15:25 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 28 Feb 2020 16:23:40 +0000 (17:23 +0100)
commit 81f7eb00ff5bb8326e82503a32809421d14abb8a upstream.

We clean up the delayed references when we abort a transaction but we
leave the pending qgroup extent records behind, leaking memory.

This patch destroys the extent records when we destroy the delayed refs
and makes sure ensure they're gone before releasing the transaction.

Fixes: 3368d001ba5d ("btrfs: qgroup: Record possible quota-related extent for qgroup.")
CC: stable@vger.kernel.org # 4.4+
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Jeff Mahoney <jeffm@suse.com>
[ Rebased to latest upstream, remove to_qgroup() helper, use
  rbtree_postorder_for_each_entry_safe() wrapper ]
Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
fs/btrfs/disk-io.c
fs/btrfs/qgroup.c
fs/btrfs/qgroup.h
fs/btrfs/transaction.c

index c1e47db439e286b4ffc5f4759b2e8a575568ca07..b6ef9c2e0f347be63610e993b98476c933fd9106 100644 (file)
@@ -4272,6 +4272,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
                cond_resched();
                spin_lock(&delayed_refs->lock);
        }
+       btrfs_qgroup_destroy_extent_records(trans);
 
        spin_unlock(&delayed_refs->lock);
 
index 39fc8c3d3a75df2387e43c7dec3aae9d90e59350..410b791f28a56542912f14dd90062b537f2db83d 100644 (file)
@@ -4016,3 +4016,16 @@ out:
        }
        return ret;
 }
+
+void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
+{
+       struct btrfs_qgroup_extent_record *entry;
+       struct btrfs_qgroup_extent_record *next;
+       struct rb_root *root;
+
+       root = &trans->delayed_refs.dirty_extent_root;
+       rbtree_postorder_for_each_entry_safe(entry, next, root, node) {
+               ulist_free(entry->old_roots);
+               kfree(entry);
+       }
+}
index 236f12224d5205a591251e317cf996dd23fc1e8c..1bc65445946907c171eccc0fe1363b8ce2f0b754 100644 (file)
@@ -414,5 +414,6 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
                u64 last_snapshot);
 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
                struct btrfs_root *root, struct extent_buffer *eb);
+void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
 
 #endif
index 33dcc88b428ad402ed68c1dd6bbfb9dc9ee69d23..beb6c69cd1e55b7d0fb3089b4822269fb6bce38c 100644 (file)
@@ -121,6 +121,8 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
                BUG_ON(!list_empty(&transaction->list));
                WARN_ON(!RB_EMPTY_ROOT(
                                &transaction->delayed_refs.href_root.rb_root));
+               WARN_ON(!RB_EMPTY_ROOT(
+                               &transaction->delayed_refs.dirty_extent_root));
                if (transaction->delayed_refs.pending_csums)
                        btrfs_err(transaction->fs_info,
                                  "pending csums is %llu",