#include <linux/freezer.h>
 #include <linux/workqueue.h>
 #include "async-thread.h"
+#include "ctree.h"
 
 #define WORK_DONE_BIT 0
 #define WORK_ORDER_DONE_BIT 1
                 */
                if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
                        break;
+               trace_btrfs_ordered_sched(work);
                spin_unlock_irqrestore(lock, flags);
                work->ordered_func(work);
 
                 * with the lock held though
                 */
                work->ordered_free(work);
+               trace_btrfs_all_work_done(work);
        }
        spin_unlock_irqrestore(lock, flags);
 }
                need_order = 1;
        wq = work->wq;
 
+       trace_btrfs_work_sched(work);
        thresh_exec_hook(wq);
        work->func(work);
        if (need_order) {
                set_bit(WORK_DONE_BIT, &work->flags);
                run_ordered_work(wq);
        }
+       if (!need_order)
+               trace_btrfs_all_work_done(work);
 }
 
 void btrfs_init_work(struct btrfs_work *work,
                spin_unlock_irqrestore(&wq->list_lock, flags);
        }
        queue_work(wq->normal_wq, &work->normal_work);
+       trace_btrfs_work_queued(work);
 }
 
 void btrfs_queue_work(struct btrfs_workqueue *wq,
 
 struct btrfs_free_cluster;
 struct map_lookup;
 struct extent_buffer;
+struct btrfs_work;
 
 #define show_ref_type(type)                                            \
        __print_symbolic(type,                                          \
                  (void *)__entry->ip)
 );
 
+DECLARE_EVENT_CLASS(btrfs__work,
+
+       TP_PROTO(struct btrfs_work *work),
+
+       TP_ARGS(work),
+
+       TP_STRUCT__entry(
+               __field(        void *, work                    )
+               __field(        void *, wq                      )
+               __field(        void *, func                    )
+               __field(        void *, ordered_func            )
+               __field(        void *, ordered_free            )
+       ),
+
+       TP_fast_assign(
+               __entry->work           = work;
+               __entry->wq             = work->wq;
+               __entry->func           = work->func;
+               __entry->ordered_func   = work->ordered_func;
+               __entry->ordered_free   = work->ordered_free;
+       ),
+
+       TP_printk("work=%p, wq=%p, func=%p, ordered_func=%p, ordered_free=%p",
+                 __entry->work, __entry->wq, __entry->func,
+                 __entry->ordered_func, __entry->ordered_free)
+);
+
+/* For situiations that the work is freed */
+DECLARE_EVENT_CLASS(btrfs__work__done,
+
+       TP_PROTO(struct btrfs_work *work),
+
+       TP_ARGS(work),
+
+       TP_STRUCT__entry(
+               __field(        void *, work                    )
+       ),
+
+       TP_fast_assign(
+               __entry->work           = work;
+       ),
+
+       TP_printk("work->%p", __entry->work)
+);
+
+DEFINE_EVENT(btrfs__work, btrfs_work_queued,
+
+       TP_PROTO(struct btrfs_work *work),
+
+       TP_ARGS(work)
+);
+
+DEFINE_EVENT(btrfs__work, btrfs_work_sched,
+
+       TP_PROTO(struct btrfs_work *work),
+
+       TP_ARGS(work)
+);
+
+DEFINE_EVENT(btrfs__work, btrfs_normal_work_done,
+
+       TP_PROTO(struct btrfs_work *work),
+
+       TP_ARGS(work)
+);
+
+DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done,
+
+       TP_PROTO(struct btrfs_work *work),
+
+       TP_ARGS(work)
+);
+
+DEFINE_EVENT(btrfs__work, btrfs_ordered_sched,
+
+       TP_PROTO(struct btrfs_work *work),
+
+       TP_ARGS(work)
+);
+
+
 #endif /* _TRACE_BTRFS_H */
 
 /* This part must be outside protection */