]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - kernel/trace/trace_functions.c
Merge branch 'sfc-3.3' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc
[karo-tx-linux.git] / kernel / trace / trace_functions.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Based on code from the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 William Lee Irwin III
11  */
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/fs.h>
17
18 #include "trace.h"
19
20 /* function tracing enabled */
21 static int                      ftrace_function_enabled;
22
23 static struct trace_array       *func_trace;
24
25 static void tracing_start_function_trace(void);
26 static void tracing_stop_function_trace(void);
27
28 static int function_trace_init(struct trace_array *tr)
29 {
30         func_trace = tr;
31         tr->cpu = get_cpu();
32         put_cpu();
33
34         tracing_start_cmdline_record();
35         tracing_start_function_trace();
36         return 0;
37 }
38
39 static void function_trace_reset(struct trace_array *tr)
40 {
41         tracing_stop_function_trace();
42         tracing_stop_cmdline_record();
43 }
44
45 static void function_trace_start(struct trace_array *tr)
46 {
47         tracing_reset_online_cpus(tr);
48 }
49
50 static void
51 function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
52 {
53         struct trace_array *tr = func_trace;
54         struct trace_array_cpu *data;
55         unsigned long flags;
56         long disabled;
57         int cpu;
58         int pc;
59
60         if (unlikely(!ftrace_function_enabled))
61                 return;
62
63         pc = preempt_count();
64         preempt_disable_notrace();
65         local_save_flags(flags);
66         cpu = raw_smp_processor_id();
67         data = tr->data[cpu];
68         disabled = atomic_inc_return(&data->disabled);
69
70         if (likely(disabled == 1))
71                 trace_function(tr, ip, parent_ip, flags, pc);
72
73         atomic_dec(&data->disabled);
74         preempt_enable_notrace();
75 }
76
77 static void
78 function_trace_call(unsigned long ip, unsigned long parent_ip)
79 {
80         struct trace_array *tr = func_trace;
81         struct trace_array_cpu *data;
82         unsigned long flags;
83         long disabled;
84         int cpu;
85         int pc;
86
87         if (unlikely(!ftrace_function_enabled))
88                 return;
89
90         /*
91          * Need to use raw, since this must be called before the
92          * recursive protection is performed.
93          */
94         local_irq_save(flags);
95         cpu = raw_smp_processor_id();
96         data = tr->data[cpu];
97         disabled = atomic_inc_return(&data->disabled);
98
99         if (likely(disabled == 1)) {
100                 pc = preempt_count();
101                 trace_function(tr, ip, parent_ip, flags, pc);
102         }
103
104         atomic_dec(&data->disabled);
105         local_irq_restore(flags);
106 }
107
108 static void
109 function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
110 {
111         struct trace_array *tr = func_trace;
112         struct trace_array_cpu *data;
113         unsigned long flags;
114         long disabled;
115         int cpu;
116         int pc;
117
118         if (unlikely(!ftrace_function_enabled))
119                 return;
120
121         /*
122          * Need to use raw, since this must be called before the
123          * recursive protection is performed.
124          */
125         local_irq_save(flags);
126         cpu = raw_smp_processor_id();
127         data = tr->data[cpu];
128         disabled = atomic_inc_return(&data->disabled);
129
130         if (likely(disabled == 1)) {
131                 pc = preempt_count();
132                 trace_function(tr, ip, parent_ip, flags, pc);
133                 /*
134                  * skip over 5 funcs:
135                  *    __ftrace_trace_stack,
136                  *    __trace_stack,
137                  *    function_stack_trace_call
138                  *    ftrace_list_func
139                  *    ftrace_call
140                  */
141                 __trace_stack(tr, flags, 5, pc);
142         }
143
144         atomic_dec(&data->disabled);
145         local_irq_restore(flags);
146 }
147
148
149 static struct ftrace_ops trace_ops __read_mostly =
150 {
151         .func = function_trace_call,
152         .flags = FTRACE_OPS_FL_GLOBAL,
153 };
154
155 static struct ftrace_ops trace_stack_ops __read_mostly =
156 {
157         .func = function_stack_trace_call,
158         .flags = FTRACE_OPS_FL_GLOBAL,
159 };
160
161 /* Our two options */
162 enum {
163         TRACE_FUNC_OPT_STACK = 0x1,
164 };
165
166 static struct tracer_opt func_opts[] = {
167 #ifdef CONFIG_STACKTRACE
168         { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
169 #endif
170         { } /* Always set a last empty entry */
171 };
172
173 static struct tracer_flags func_flags = {
174         .val = 0, /* By default: all flags disabled */
175         .opts = func_opts
176 };
177
178 static void tracing_start_function_trace(void)
179 {
180         ftrace_function_enabled = 0;
181
182         if (trace_flags & TRACE_ITER_PREEMPTONLY)
183                 trace_ops.func = function_trace_call_preempt_only;
184         else
185                 trace_ops.func = function_trace_call;
186
187         if (func_flags.val & TRACE_FUNC_OPT_STACK)
188                 register_ftrace_function(&trace_stack_ops);
189         else
190                 register_ftrace_function(&trace_ops);
191
192         ftrace_function_enabled = 1;
193 }
194
195 static void tracing_stop_function_trace(void)
196 {
197         ftrace_function_enabled = 0;
198
199         if (func_flags.val & TRACE_FUNC_OPT_STACK)
200                 unregister_ftrace_function(&trace_stack_ops);
201         else
202                 unregister_ftrace_function(&trace_ops);
203 }
204
205 static int func_set_flag(u32 old_flags, u32 bit, int set)
206 {
207         if (bit == TRACE_FUNC_OPT_STACK) {
208                 /* do nothing if already set */
209                 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
210                         return 0;
211
212                 if (set) {
213                         unregister_ftrace_function(&trace_ops);
214                         register_ftrace_function(&trace_stack_ops);
215                 } else {
216                         unregister_ftrace_function(&trace_stack_ops);
217                         register_ftrace_function(&trace_ops);
218                 }
219
220                 return 0;
221         }
222
223         return -EINVAL;
224 }
225
226 static struct tracer function_trace __read_mostly =
227 {
228         .name           = "function",
229         .init           = function_trace_init,
230         .reset          = function_trace_reset,
231         .start          = function_trace_start,
232         .wait_pipe      = poll_wait_pipe,
233         .flags          = &func_flags,
234         .set_flag       = func_set_flag,
235 #ifdef CONFIG_FTRACE_SELFTEST
236         .selftest       = trace_selftest_startup_function,
237 #endif
238 };
239
240 #ifdef CONFIG_DYNAMIC_FTRACE
241 static void
242 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
243 {
244         long *count = (long *)data;
245
246         if (tracing_is_on())
247                 return;
248
249         if (!*count)
250                 return;
251
252         if (*count != -1)
253                 (*count)--;
254
255         tracing_on();
256 }
257
258 static void
259 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
260 {
261         long *count = (long *)data;
262
263         if (!tracing_is_on())
264                 return;
265
266         if (!*count)
267                 return;
268
269         if (*count != -1)
270                 (*count)--;
271
272         tracing_off();
273 }
274
275 static int
276 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
277                          struct ftrace_probe_ops *ops, void *data);
278
279 static struct ftrace_probe_ops traceon_probe_ops = {
280         .func                   = ftrace_traceon,
281         .print                  = ftrace_trace_onoff_print,
282 };
283
284 static struct ftrace_probe_ops traceoff_probe_ops = {
285         .func                   = ftrace_traceoff,
286         .print                  = ftrace_trace_onoff_print,
287 };
288
289 static int
290 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
291                          struct ftrace_probe_ops *ops, void *data)
292 {
293         long count = (long)data;
294
295         seq_printf(m, "%ps:", (void *)ip);
296
297         if (ops == &traceon_probe_ops)
298                 seq_printf(m, "traceon");
299         else
300                 seq_printf(m, "traceoff");
301
302         if (count == -1)
303                 seq_printf(m, ":unlimited\n");
304         else
305                 seq_printf(m, ":count=%ld\n", count);
306
307         return 0;
308 }
309
310 static int
311 ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
312 {
313         struct ftrace_probe_ops *ops;
314
315         /* we register both traceon and traceoff to this callback */
316         if (strcmp(cmd, "traceon") == 0)
317                 ops = &traceon_probe_ops;
318         else
319                 ops = &traceoff_probe_ops;
320
321         unregister_ftrace_function_probe_func(glob, ops);
322
323         return 0;
324 }
325
326 static int
327 ftrace_trace_onoff_callback(struct ftrace_hash *hash,
328                             char *glob, char *cmd, char *param, int enable)
329 {
330         struct ftrace_probe_ops *ops;
331         void *count = (void *)-1;
332         char *number;
333         int ret;
334
335         /* hash funcs only work with set_ftrace_filter */
336         if (!enable)
337                 return -EINVAL;
338
339         if (glob[0] == '!')
340                 return ftrace_trace_onoff_unreg(glob+1, cmd, param);
341
342         /* we register both traceon and traceoff to this callback */
343         if (strcmp(cmd, "traceon") == 0)
344                 ops = &traceon_probe_ops;
345         else
346                 ops = &traceoff_probe_ops;
347
348         if (!param)
349                 goto out_reg;
350
351         number = strsep(&param, ":");
352
353         if (!strlen(number))
354                 goto out_reg;
355
356         /*
357          * We use the callback data field (which is a pointer)
358          * as our counter.
359          */
360         ret = strict_strtoul(number, 0, (unsigned long *)&count);
361         if (ret)
362                 return ret;
363
364  out_reg:
365         ret = register_ftrace_function_probe(glob, ops, count);
366
367         return ret < 0 ? ret : 0;
368 }
369
370 static struct ftrace_func_command ftrace_traceon_cmd = {
371         .name                   = "traceon",
372         .func                   = ftrace_trace_onoff_callback,
373 };
374
375 static struct ftrace_func_command ftrace_traceoff_cmd = {
376         .name                   = "traceoff",
377         .func                   = ftrace_trace_onoff_callback,
378 };
379
380 static int __init init_func_cmd_traceon(void)
381 {
382         int ret;
383
384         ret = register_ftrace_command(&ftrace_traceoff_cmd);
385         if (ret)
386                 return ret;
387
388         ret = register_ftrace_command(&ftrace_traceon_cmd);
389         if (ret)
390                 unregister_ftrace_command(&ftrace_traceoff_cmd);
391         return ret;
392 }
393 #else
394 static inline int init_func_cmd_traceon(void)
395 {
396         return 0;
397 }
398 #endif /* CONFIG_DYNAMIC_FTRACE */
399
400 static __init int init_function_trace(void)
401 {
402         init_func_cmd_traceon();
403         return register_tracer(&function_trace);
404 }
405 device_initcall(init_function_trace);
406