]> www.infradead.org Git - users/willy/xarray.git/commitdiff
bcachefs: Fix btree_trans list ordering
authorKent Overstreet <kent.overstreet@linux.dev>
Sun, 23 Jun 2024 02:11:01 +0000 (22:11 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 23 Jun 2024 04:57:21 +0000 (00:57 -0400)
The debug code relies on btree_trans_list being ordered so that it can
resume on subsequent calls or lock restarts.

However, it was using trans->locknig_wait.task.pid, which is incorrect
since btree_trans objects are cached and reused - typically by different
tasks.

Fix this by switching to pointer order, and also sort them lazily when
required - speeding up the btree_trans_get() fastpath.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_iter.c
fs/bcachefs/debug.c

index 15c1c7cfefe604b83a7510ba8ae836bfdc96702f..0ed9e6574fcd0db7ca70deb197a19faaa57498aa 100644 (file)
@@ -3149,15 +3149,10 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
                        BUG_ON(pos_task &&
                               pid == pos_task->pid &&
                               pos->locked);
-
-                       if (pos_task && pid < pos_task->pid) {
-                               list_add_tail(&trans->list, &pos->list);
-                               goto list_add_done;
-                       }
                }
        }
-       list_add_tail(&trans->list, &c->btree_trans_list);
-list_add_done:
+
+       list_add(&trans->list, &c->btree_trans_list);
        seqmutex_unlock(&c->btree_trans_lock);
 got_trans:
        trans->c                = c;
index 61c50522abb955c337b732ff233ffec45b54553d..f0d4727c4dc290970aef02b9dcb921641a018153 100644 (file)
@@ -568,6 +568,32 @@ static const struct file_operations cached_btree_nodes_ops = {
        .read           = bch2_cached_btree_nodes_read,
 };
 
+typedef int (*list_cmp_fn)(const struct list_head *l, const struct list_head *r);
+
+static void list_sort(struct list_head *head, list_cmp_fn cmp)
+{
+       struct list_head *pos;
+
+       list_for_each(pos, head)
+               while (!list_is_last(pos, head) &&
+                      cmp(pos, pos->next) > 0) {
+                       struct list_head *pos2, *next = pos->next;
+
+                       list_del(next);
+                       list_for_each(pos2, head)
+                               if (cmp(next, pos2) < 0)
+                                       goto pos_found;
+                       BUG();
+pos_found:
+                       list_add_tail(next, pos2);
+               }
+}
+
+static int list_ptr_order_cmp(const struct list_head *l, const struct list_head *r)
+{
+       return cmp_int(l, r);
+}
+
 static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
                                            size_t size, loff_t *ppos)
 {
@@ -581,12 +607,14 @@ static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
        i->ret  = 0;
 restart:
        seqmutex_lock(&c->btree_trans_lock);
-       list_for_each_entry(trans, &c->btree_trans_list, list) {
-               struct task_struct *task = READ_ONCE(trans->locking_wait.task);
+       list_sort(&c->btree_trans_list, list_ptr_order_cmp);
 
-               if (!task || task->pid <= i->iter)
+       list_for_each_entry(trans, &c->btree_trans_list, list) {
+               if ((ulong) trans < i->iter)
                        continue;
 
+               i->iter = (ulong) trans;
+
                if (!closure_get_not_zero(&trans->ref))
                        continue;
 
@@ -596,7 +624,7 @@ restart:
 
                prt_printf(&i->buf, "backtrace:\n");
                printbuf_indent_add(&i->buf, 2);
-               bch2_prt_task_backtrace(&i->buf, task, 0, GFP_KERNEL);
+               bch2_prt_task_backtrace(&i->buf, trans->locking_wait.task, 0, GFP_KERNEL);
                printbuf_indent_sub(&i->buf, 2);
                prt_newline(&i->buf);