We now include backtraces for every thread involved in the cycle.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
        prt_printf(out, "Found lock cycle (%u entries):", g->nr);
        prt_newline(out);
 
-       for (i = g->g; i < g->g + g->nr; i++)
+       for (i = g->g; i < g->g + g->nr; i++) {
+               struct task_struct *task = READ_ONCE(i->trans->locking_wait.task);
+               if (!task)
+                       continue;
+
                bch2_btree_trans_to_text(out, i->trans);
+               bch2_prt_task_backtrace(out, task, i == g->g ? 5 : 1);
+       }
 }
 
 static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
        return false;
 }
 
-static void trace_would_deadlock(struct lock_graph *g, struct btree_trans *trans,
-                                unsigned long ip)
+static void trace_would_deadlock(struct lock_graph *g, struct btree_trans *trans)
 {
        struct bch_fs *c = trans->c;
 
                buf.atomic++;
                print_cycle(&buf, g);
 
-               trace_trans_restart_would_deadlock(trans, ip, buf.buf);
+               trace_trans_restart_would_deadlock(trans, buf.buf);
                printbuf_exit(&buf);
        }
 }
 static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
 {
        if (i == g->g) {
-               trace_would_deadlock(g, i->trans, _RET_IP_);
+               trace_would_deadlock(g, i->trans);
                return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
        } else {
                i->trans->lock_must_abort = true;
                        prt_printf(&buf, "backtrace:");
                        prt_newline(&buf);
                        printbuf_indent_add(&buf, 2);
-                       bch2_prt_task_backtrace(&buf, trans->locking_wait.task);
+                       bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2);
                        printbuf_indent_sub(&buf, 2);
                        prt_newline(&buf);
                }
                if (cycle)
                        return -1;
 
-               trace_would_deadlock(&g, trans, _RET_IP_);
+               trace_would_deadlock(&g, trans);
                return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
        }
 
 
                prt_printf(&i->buf, "backtrace:");
                prt_newline(&i->buf);
                printbuf_indent_add(&i->buf, 2);
-               bch2_prt_task_backtrace(&i->buf, task);
+               bch2_prt_task_backtrace(&i->buf, task, 0);
                printbuf_indent_sub(&i->buf, 2);
                prt_newline(&i->buf);
 
 
                  __entry->trans_fn, (void *) __entry->caller_ip, __get_str(str))
 );
 
+DECLARE_EVENT_CLASS(trans_str_nocaller,
+       TP_PROTO(struct btree_trans *trans, const char *str),
+       TP_ARGS(trans, str),
+
+       TP_STRUCT__entry(
+               __field(dev_t,          dev                     )
+               __array(char,           trans_fn, 32            )
+               __string(str,           str                     )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = trans->c->dev;
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               __assign_str(str, str);
+       ),
+
+       TP_printk("%d,%d %s %s",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->trans_fn, __get_str(str))
+);
+
 DECLARE_EVENT_CLASS(btree_node_nofs,
        TP_PROTO(struct bch_fs *c, struct btree *b),
        TP_ARGS(c, b),
        TP_ARGS(trans, caller_ip, path)
 );
 
-DEFINE_EVENT(trans_str, trans_restart_would_deadlock,
+DEFINE_EVENT(trans_str_nocaller, trans_restart_would_deadlock,
        TP_PROTO(struct btree_trans *trans,
-                unsigned long caller_ip,
                 const char *cycle),
-       TP_ARGS(trans, caller_ip, cycle)
+       TP_ARGS(trans, cycle)
 );
 
 DEFINE_EVENT(transaction_event,        trans_restart_would_deadlock_recursion_limit,
 
        console_unlock();
 }
 
-int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task)
+int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task, unsigned skipnr)
 {
 #ifdef CONFIG_STACKTRACE
        unsigned nr_entries = 0;
                return -1;
 
        do {
-               nr_entries = stack_trace_save_tsk(task, stack->data, stack->size, 0);
+               nr_entries = stack_trace_save_tsk(task, stack->data, stack->size, skipnr + 1);
        } while (nr_entries == stack->size &&
                 !(ret = darray_make_room(stack, stack->size * 2)));
 
        }
 }
 
-int bch2_prt_task_backtrace(struct printbuf *out, struct task_struct *task)
+int bch2_prt_task_backtrace(struct printbuf *out, struct task_struct *task, unsigned skipnr)
 {
        bch_stacktrace stack = { 0 };
-       int ret = bch2_save_backtrace(&stack, task);
+       int ret = bch2_save_backtrace(&stack, task, skipnr + 1);
 
        bch2_prt_backtrace(out, &stack);
        darray_exit(&stack);
 
 void bch2_print_string_as_lines(const char *prefix, const char *lines);
 
 typedef DARRAY(unsigned long) bch_stacktrace;
-int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *);
+int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *, unsigned);
 void bch2_prt_backtrace(struct printbuf *, bch_stacktrace *);
-int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *);
+int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *, unsigned);
 
 #define NR_QUANTILES   15
 #define QUANTILE_IDX(i)        inorder_to_eytzinger0(i, NR_QUANTILES)