2 * ring buffer based function tracer
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/pstore.h>
21 /* function tracing enabled */
22 static int ftrace_function_enabled;
24 static struct trace_array *func_trace;
26 static void tracing_start_function_trace(void);
27 static void tracing_stop_function_trace(void);
29 static int function_trace_init(struct trace_array *tr)
35 tracing_start_cmdline_record();
36 tracing_start_function_trace();
40 static void function_trace_reset(struct trace_array *tr)
42 tracing_stop_function_trace();
43 tracing_stop_cmdline_record();
46 static void function_trace_start(struct trace_array *tr)
48 tracing_reset_online_cpus(tr);
52 function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
54 struct trace_array *tr = func_trace;
55 struct trace_array_cpu *data;
61 if (unlikely(!ftrace_function_enabled))
65 preempt_disable_notrace();
66 local_save_flags(flags);
67 cpu = raw_smp_processor_id();
69 disabled = atomic_inc_return(&data->disabled);
71 if (likely(disabled == 1))
72 trace_function(tr, ip, parent_ip, flags, pc);
74 atomic_dec(&data->disabled);
75 preempt_enable_notrace();
80 TRACE_FUNC_OPT_STACK = 0x1,
81 TRACE_FUNC_OPT_PSTORE = 0x2,
84 static struct tracer_flags func_flags;
87 function_trace_call(unsigned long ip, unsigned long parent_ip)
89 struct trace_array *tr = func_trace;
90 struct trace_array_cpu *data;
96 if (unlikely(!ftrace_function_enabled))
100 * Need to use raw, since this must be called before the
101 * recursive protection is performed.
103 local_irq_save(flags);
104 cpu = raw_smp_processor_id();
105 data = tr->data[cpu];
106 disabled = atomic_inc_return(&data->disabled);
108 if (likely(disabled == 1)) {
110 * So far tracing doesn't support multiple buffers, so
111 * we make an explicit call for now.
113 if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
114 pstore_ftrace_call(ip, parent_ip);
115 pc = preempt_count();
116 trace_function(tr, ip, parent_ip, flags, pc);
119 atomic_dec(&data->disabled);
120 local_irq_restore(flags);
124 function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
126 struct trace_array *tr = func_trace;
127 struct trace_array_cpu *data;
133 if (unlikely(!ftrace_function_enabled))
137 * Need to use raw, since this must be called before the
138 * recursive protection is performed.
140 local_irq_save(flags);
141 cpu = raw_smp_processor_id();
142 data = tr->data[cpu];
143 disabled = atomic_inc_return(&data->disabled);
145 if (likely(disabled == 1)) {
146 pc = preempt_count();
147 trace_function(tr, ip, parent_ip, flags, pc);
150 * __ftrace_trace_stack,
152 * function_stack_trace_call
156 __trace_stack(tr, flags, 5, pc);
159 atomic_dec(&data->disabled);
160 local_irq_restore(flags);
164 static struct ftrace_ops trace_ops __read_mostly =
166 .func = function_trace_call,
167 .flags = FTRACE_OPS_FL_GLOBAL,
170 static struct ftrace_ops trace_stack_ops __read_mostly =
172 .func = function_stack_trace_call,
173 .flags = FTRACE_OPS_FL_GLOBAL,
176 static struct tracer_opt func_opts[] = {
177 #ifdef CONFIG_STACKTRACE
178 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
180 #ifdef CONFIG_PSTORE_FTRACE
181 { TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
183 { } /* Always set a last empty entry */
186 static struct tracer_flags func_flags = {
187 .val = 0, /* By default: all flags disabled */
191 static void tracing_start_function_trace(void)
193 ftrace_function_enabled = 0;
195 if (trace_flags & TRACE_ITER_PREEMPTONLY)
196 trace_ops.func = function_trace_call_preempt_only;
198 trace_ops.func = function_trace_call;
200 if (func_flags.val & TRACE_FUNC_OPT_STACK)
201 register_ftrace_function(&trace_stack_ops);
203 register_ftrace_function(&trace_ops);
205 ftrace_function_enabled = 1;
208 static void tracing_stop_function_trace(void)
210 ftrace_function_enabled = 0;
212 if (func_flags.val & TRACE_FUNC_OPT_STACK)
213 unregister_ftrace_function(&trace_stack_ops);
215 unregister_ftrace_function(&trace_ops);
218 static int func_set_flag(u32 old_flags, u32 bit, int set)
220 if (bit == TRACE_FUNC_OPT_STACK) {
221 /* do nothing if already set */
222 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
226 unregister_ftrace_function(&trace_ops);
227 register_ftrace_function(&trace_stack_ops);
229 unregister_ftrace_function(&trace_stack_ops);
230 register_ftrace_function(&trace_ops);
234 } else if (bit == TRACE_FUNC_OPT_PSTORE) {
241 static struct tracer function_trace __read_mostly =
244 .init = function_trace_init,
245 .reset = function_trace_reset,
246 .start = function_trace_start,
247 .wait_pipe = poll_wait_pipe,
248 .flags = &func_flags,
249 .set_flag = func_set_flag,
250 #ifdef CONFIG_FTRACE_SELFTEST
251 .selftest = trace_selftest_startup_function,
255 #ifdef CONFIG_DYNAMIC_FTRACE
257 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
259 long *count = (long *)data;
274 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
276 long *count = (long *)data;
278 if (!tracing_is_on())
291 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
292 struct ftrace_probe_ops *ops, void *data);
294 static struct ftrace_probe_ops traceon_probe_ops = {
295 .func = ftrace_traceon,
296 .print = ftrace_trace_onoff_print,
299 static struct ftrace_probe_ops traceoff_probe_ops = {
300 .func = ftrace_traceoff,
301 .print = ftrace_trace_onoff_print,
305 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
306 struct ftrace_probe_ops *ops, void *data)
308 long count = (long)data;
310 seq_printf(m, "%ps:", (void *)ip);
312 if (ops == &traceon_probe_ops)
313 seq_printf(m, "traceon");
315 seq_printf(m, "traceoff");
318 seq_printf(m, ":unlimited\n");
320 seq_printf(m, ":count=%ld\n", count);
326 ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
328 struct ftrace_probe_ops *ops;
330 /* we register both traceon and traceoff to this callback */
331 if (strcmp(cmd, "traceon") == 0)
332 ops = &traceon_probe_ops;
334 ops = &traceoff_probe_ops;
336 unregister_ftrace_function_probe_func(glob, ops);
342 ftrace_trace_onoff_callback(struct ftrace_hash *hash,
343 char *glob, char *cmd, char *param, int enable)
345 struct ftrace_probe_ops *ops;
346 void *count = (void *)-1;
350 /* hash funcs only work with set_ftrace_filter */
355 return ftrace_trace_onoff_unreg(glob+1, cmd, param);
357 /* we register both traceon and traceoff to this callback */
358 if (strcmp(cmd, "traceon") == 0)
359 ops = &traceon_probe_ops;
361 ops = &traceoff_probe_ops;
366 number = strsep(¶m, ":");
372 * We use the callback data field (which is a pointer)
375 ret = strict_strtoul(number, 0, (unsigned long *)&count);
380 ret = register_ftrace_function_probe(glob, ops, count);
382 return ret < 0 ? ret : 0;
385 static struct ftrace_func_command ftrace_traceon_cmd = {
387 .func = ftrace_trace_onoff_callback,
390 static struct ftrace_func_command ftrace_traceoff_cmd = {
392 .func = ftrace_trace_onoff_callback,
395 static int __init init_func_cmd_traceon(void)
399 ret = register_ftrace_command(&ftrace_traceoff_cmd);
403 ret = register_ftrace_command(&ftrace_traceon_cmd);
405 unregister_ftrace_command(&ftrace_traceoff_cmd);
409 static inline int init_func_cmd_traceon(void)
413 #endif /* CONFIG_DYNAMIC_FTRACE */
415 static __init int init_function_trace(void)
417 init_func_cmd_traceon();
418 return register_tracer(&function_trace);
420 device_initcall(init_function_trace);