4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
7 #include <linux/module.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <trace/sched.h>
17 static struct trace_array *ctx_trace;
18 static int __read_mostly tracer_enabled;
19 static atomic_t sched_ref;
22 probe_sched_switch(struct rq *__rq, struct task_struct *prev,
23 struct task_struct *next)
25 struct trace_array_cpu *data;
30 if (!atomic_read(&sched_ref))
33 tracing_record_cmdline(prev);
34 tracing_record_cmdline(next);
39 local_irq_save(flags);
40 cpu = raw_smp_processor_id();
41 data = ctx_trace->data[cpu];
42 disabled = atomic_inc_return(&data->disabled);
44 if (likely(disabled == 1))
45 tracing_sched_switch_trace(ctx_trace, data, prev, next, flags);
47 atomic_dec(&data->disabled);
48 local_irq_restore(flags);
52 probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
54 struct trace_array_cpu *data;
59 if (!likely(tracer_enabled))
62 tracing_record_cmdline(current);
64 local_irq_save(flags);
65 cpu = raw_smp_processor_id();
66 data = ctx_trace->data[cpu];
67 disabled = atomic_inc_return(&data->disabled);
69 if (likely(disabled == 1))
70 tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
73 atomic_dec(&data->disabled);
74 local_irq_restore(flags);
77 static void sched_switch_reset(struct trace_array *tr)
81 tr->time_start = ftrace_now(tr->cpu);
83 for_each_online_cpu(cpu)
84 tracing_reset(tr->data[cpu]);
87 static int tracing_sched_register(void)
91 ret = register_trace_sched_wakeup(probe_sched_wakeup);
93 pr_info("wakeup trace: Couldn't activate tracepoint"
94 " probe to kernel_sched_wakeup\n");
98 ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
100 pr_info("wakeup trace: Couldn't activate tracepoint"
101 " probe to kernel_sched_wakeup_new\n");
105 ret = register_trace_sched_switch(probe_sched_switch);
107 pr_info("sched trace: Couldn't activate tracepoint"
108 " probe to kernel_sched_schedule\n");
109 goto fail_deprobe_wake_new;
113 fail_deprobe_wake_new:
114 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
116 unregister_trace_sched_wakeup(probe_sched_wakeup);
120 static void tracing_sched_unregister(void)
122 unregister_trace_sched_switch(probe_sched_switch);
123 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
124 unregister_trace_sched_wakeup(probe_sched_wakeup);
127 static void tracing_start_sched_switch(void)
131 ref = atomic_inc_return(&sched_ref);
133 tracing_sched_register();
136 static void tracing_stop_sched_switch(void)
140 ref = atomic_dec_and_test(&sched_ref);
142 tracing_sched_unregister();
145 void tracing_start_cmdline_record(void)
147 tracing_start_sched_switch();
150 void tracing_stop_cmdline_record(void)
152 tracing_stop_sched_switch();
155 static void start_sched_trace(struct trace_array *tr)
157 sched_switch_reset(tr);
158 tracing_start_cmdline_record();
162 static void stop_sched_trace(struct trace_array *tr)
165 tracing_stop_cmdline_record();
168 static void sched_switch_trace_init(struct trace_array *tr)
173 start_sched_trace(tr);
176 static void sched_switch_trace_reset(struct trace_array *tr)
179 stop_sched_trace(tr);
182 static void sched_switch_trace_ctrl_update(struct trace_array *tr)
184 /* When starting a new trace, reset the buffers */
186 start_sched_trace(tr);
188 stop_sched_trace(tr);
191 static struct tracer sched_switch_trace __read_mostly =
193 .name = "sched_switch",
194 .init = sched_switch_trace_init,
195 .reset = sched_switch_trace_reset,
196 .ctrl_update = sched_switch_trace_ctrl_update,
197 #ifdef CONFIG_FTRACE_SELFTEST
198 .selftest = trace_selftest_startup_sched_switch,
202 __init static int init_sched_switch_trace(void)
206 if (atomic_read(&sched_ref))
207 ret = tracing_sched_register();
209 pr_info("error registering scheduler trace\n");
212 return register_tracer(&sched_switch_trace);
214 device_initcall(init_sched_switch_trace);