]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
btrfs: Add ftrace for btrfs_workqueue
authorQu Wenruo <quwenruo@cn.fujitsu.com>
Thu, 6 Mar 2014 04:19:50 +0000 (04:19 +0000)
committerJosef Bacik <jbacik@fb.com>
Mon, 10 Mar 2014 19:17:21 +0000 (15:17 -0400)
Add ftrace for btrfs_workqueue for further workqueue tunning.
This patch needs to applied after the workqueue replace patchset.

Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
Signed-off-by: Josef Bacik <jbacik@fb.com>
fs/btrfs/async-thread.c
include/trace/events/btrfs.h

index d8c07e5c1f24bbc24c1dcd62ad125d9734849e5d..00623dd16b81467ab2f164b445bf74981f5c0a6f 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/freezer.h>
 #include <linux/workqueue.h>
 #include "async-thread.h"
+#include "ctree.h"
 
 #define WORK_DONE_BIT 0
 #define WORK_ORDER_DONE_BIT 1
@@ -210,6 +211,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
                 */
                if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
                        break;
+               trace_btrfs_ordered_sched(work);
                spin_unlock_irqrestore(lock, flags);
                work->ordered_func(work);
 
@@ -223,6 +225,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
                 * with the lock held though
                 */
                work->ordered_free(work);
+               trace_btrfs_all_work_done(work);
        }
        spin_unlock_irqrestore(lock, flags);
 }
@@ -246,12 +249,15 @@ static void normal_work_helper(struct work_struct *arg)
                need_order = 1;
        wq = work->wq;
 
+       trace_btrfs_work_sched(work);
        thresh_exec_hook(wq);
        work->func(work);
        if (need_order) {
                set_bit(WORK_DONE_BIT, &work->flags);
                run_ordered_work(wq);
        }
+       if (!need_order)
+               trace_btrfs_all_work_done(work);
 }
 
 void btrfs_init_work(struct btrfs_work *work,
@@ -280,6 +286,7 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
                spin_unlock_irqrestore(&wq->list_lock, flags);
        }
        queue_work(wq->normal_wq, &work->normal_work);
+       trace_btrfs_work_queued(work);
 }
 
 void btrfs_queue_work(struct btrfs_workqueue *wq,
index 3176cdc32937f4b62cbcd11e878310f1b086f15e..c346919254a907eff9b862057d606a13c259f1dd 100644 (file)
@@ -21,6 +21,7 @@ struct btrfs_block_group_cache;
 struct btrfs_free_cluster;
 struct map_lookup;
 struct extent_buffer;
+struct btrfs_work;
 
 #define show_ref_type(type)                                            \
        __print_symbolic(type,                                          \
@@ -982,6 +983,87 @@ TRACE_EVENT(free_extent_state,
                  (void *)__entry->ip)
 );
 
+DECLARE_EVENT_CLASS(btrfs__work,
+
+       TP_PROTO(struct btrfs_work *work),
+
+       TP_ARGS(work),
+
+       TP_STRUCT__entry(
+               __field(        void *, work                    )
+               __field(        void *, wq                      )
+               __field(        void *, func                    )
+               __field(        void *, ordered_func            )
+               __field(        void *, ordered_free            )
+       ),
+
+       TP_fast_assign(
+               __entry->work           = work;
+               __entry->wq             = work->wq;
+               __entry->func           = work->func;
+               __entry->ordered_func   = work->ordered_func;
+               __entry->ordered_free   = work->ordered_free;
+       ),
+
+       TP_printk("work=%p, wq=%p, func=%p, ordered_func=%p, ordered_free=%p",
+                 __entry->work, __entry->wq, __entry->func,
+                 __entry->ordered_func, __entry->ordered_free)
+);
+
+/* For situiations that the work is freed */
+DECLARE_EVENT_CLASS(btrfs__work__done,
+
+       TP_PROTO(struct btrfs_work *work),
+
+       TP_ARGS(work),
+
+       TP_STRUCT__entry(
+               __field(        void *, work                    )
+       ),
+
+       TP_fast_assign(
+               __entry->work           = work;
+       ),
+
+       TP_printk("work->%p", __entry->work)
+);
+
+DEFINE_EVENT(btrfs__work, btrfs_work_queued,
+
+       TP_PROTO(struct btrfs_work *work),
+
+       TP_ARGS(work)
+);
+
+DEFINE_EVENT(btrfs__work, btrfs_work_sched,
+
+       TP_PROTO(struct btrfs_work *work),
+
+       TP_ARGS(work)
+);
+
+DEFINE_EVENT(btrfs__work, btrfs_normal_work_done,
+
+       TP_PROTO(struct btrfs_work *work),
+
+       TP_ARGS(work)
+);
+
+DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done,
+
+       TP_PROTO(struct btrfs_work *work),
+
+       TP_ARGS(work)
+);
+
+DEFINE_EVENT(btrfs__work, btrfs_ordered_sched,
+
+       TP_PROTO(struct btrfs_work *work),
+
+       TP_ARGS(work)
+);
+
+
 #endif /* _TRACE_BTRFS_H */
 
 /* This part must be outside protection */