]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - kernel/trace/ftrace.c
df93392aad89a29a0d6f805960981c48c0c4ba88
[karo-tx-linux.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/slab.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31 #include <linux/rcupdate.h>
32
33 #include <trace/events/sched.h>
34
35 #include <asm/setup.h>
36
37 #include "trace_output.h"
38 #include "trace_stat.h"
39
40 #define FTRACE_WARN_ON(cond)                    \
41         ({                                      \
42                 int ___r = cond;                \
43                 if (WARN_ON(___r))              \
44                         ftrace_kill();          \
45                 ___r;                           \
46         })
47
48 #define FTRACE_WARN_ON_ONCE(cond)               \
49         ({                                      \
50                 int ___r = cond;                \
51                 if (WARN_ON_ONCE(___r))         \
52                         ftrace_kill();          \
53                 ___r;                           \
54         })
55
56 /* hash bits for specific function selection */
57 #define FTRACE_HASH_BITS 7
58 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
59 #define FTRACE_HASH_DEFAULT_BITS 10
60 #define FTRACE_HASH_MAX_BITS 12
61
62 /* ftrace_enabled is a method to turn ftrace on or off */
63 int ftrace_enabled __read_mostly;
64 static int last_ftrace_enabled;
65
66 /* Quick disabling of function tracer. */
67 int function_trace_stop;
68
69 /* List for set_ftrace_pid's pids. */
70 LIST_HEAD(ftrace_pids);
71 struct ftrace_pid {
72         struct list_head list;
73         struct pid *pid;
74 };
75
76 /*
77  * ftrace_disabled is set when an anomaly is discovered.
78  * ftrace_disabled is much stronger than ftrace_enabled.
79  */
80 static int ftrace_disabled __read_mostly;
81
82 static DEFINE_MUTEX(ftrace_lock);
83
84 static struct ftrace_ops ftrace_list_end __read_mostly = {
85         .func           = ftrace_stub,
86 };
87
88 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
89 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
90 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
91 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
92 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
93 static struct ftrace_ops global_ops;
94
95 static void
96 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
97
98 /*
99  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
100  * can use rcu_dereference_raw() is that elements removed from this list
101  * are simply leaked, so there is no need to interact with a grace-period
102  * mechanism.  The rcu_dereference_raw() calls are needed to handle
103  * concurrent insertions into the ftrace_global_list.
104  *
105  * Silly Alpha and silly pointer-speculation compiler optimizations!
106  */
107 static void ftrace_global_list_func(unsigned long ip,
108                                     unsigned long parent_ip)
109 {
110         struct ftrace_ops *op;
111
112         if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
113                 return;
114
115         trace_recursion_set(TRACE_GLOBAL_BIT);
116         op = rcu_dereference_raw(ftrace_global_list); /*see above*/
117         while (op != &ftrace_list_end) {
118                 op->func(ip, parent_ip);
119                 op = rcu_dereference_raw(op->next); /*see above*/
120         };
121         trace_recursion_clear(TRACE_GLOBAL_BIT);
122 }
123
124 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
125 {
126         if (!test_tsk_trace_trace(current))
127                 return;
128
129         ftrace_pid_function(ip, parent_ip);
130 }
131
132 static void set_ftrace_pid_function(ftrace_func_t func)
133 {
134         /* do not set ftrace_pid_function to itself! */
135         if (func != ftrace_pid_func)
136                 ftrace_pid_function = func;
137 }
138
139 /**
140  * clear_ftrace_function - reset the ftrace function
141  *
142  * This NULLs the ftrace function and in essence stops
143  * tracing.  There may be lag
144  */
145 void clear_ftrace_function(void)
146 {
147         ftrace_trace_function = ftrace_stub;
148         __ftrace_trace_function = ftrace_stub;
149         ftrace_pid_function = ftrace_stub;
150 }
151
152 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
153 /*
154  * For those archs that do not test ftrace_trace_stop in their
155  * mcount call site, we need to do it from C.
156  */
157 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
158 {
159         if (function_trace_stop)
160                 return;
161
162         __ftrace_trace_function(ip, parent_ip);
163 }
164 #endif
165
166 static void update_global_ops(void)
167 {
168         ftrace_func_t func;
169
170         /*
171          * If there's only one function registered, then call that
172          * function directly. Otherwise, we need to iterate over the
173          * registered callers.
174          */
175         if (ftrace_global_list == &ftrace_list_end ||
176             ftrace_global_list->next == &ftrace_list_end)
177                 func = ftrace_global_list->func;
178         else
179                 func = ftrace_global_list_func;
180
181         /* If we filter on pids, update to use the pid function */
182         if (!list_empty(&ftrace_pids)) {
183                 set_ftrace_pid_function(func);
184                 func = ftrace_pid_func;
185         }
186
187         global_ops.func = func;
188 }
189
190 static void update_ftrace_function(void)
191 {
192         ftrace_func_t func;
193
194         update_global_ops();
195
196         /*
197          * If we are at the end of the list and this ops is
198          * not dynamic, then have the mcount trampoline call
199          * the function directly
200          */
201         if (ftrace_ops_list == &ftrace_list_end ||
202             (ftrace_ops_list->next == &ftrace_list_end &&
203              !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
204                 func = ftrace_ops_list->func;
205         else
206                 func = ftrace_ops_list_func;
207
208 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
209         ftrace_trace_function = func;
210 #else
211         __ftrace_trace_function = func;
212         ftrace_trace_function = ftrace_test_stop_func;
213 #endif
214 }
215
216 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
217 {
218         ops->next = *list;
219         /*
220          * We are entering ops into the list but another
221          * CPU might be walking that list. We need to make sure
222          * the ops->next pointer is valid before another CPU sees
223          * the ops pointer included into the list.
224          */
225         rcu_assign_pointer(*list, ops);
226 }
227
228 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
229 {
230         struct ftrace_ops **p;
231
232         /*
233          * If we are removing the last function, then simply point
234          * to the ftrace_stub.
235          */
236         if (*list == ops && ops->next == &ftrace_list_end) {
237                 *list = &ftrace_list_end;
238                 return 0;
239         }
240
241         for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
242                 if (*p == ops)
243                         break;
244
245         if (*p != ops)
246                 return -1;
247
248         *p = (*p)->next;
249         return 0;
250 }
251
252 static int __register_ftrace_function(struct ftrace_ops *ops)
253 {
254         if (ftrace_disabled)
255                 return -ENODEV;
256
257         if (FTRACE_WARN_ON(ops == &global_ops))
258                 return -EINVAL;
259
260         if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
261                 return -EBUSY;
262
263         if (!core_kernel_data((unsigned long)ops))
264                 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
265
266         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
267                 int first = ftrace_global_list == &ftrace_list_end;
268                 add_ftrace_ops(&ftrace_global_list, ops);
269                 ops->flags |= FTRACE_OPS_FL_ENABLED;
270                 if (first)
271                         add_ftrace_ops(&ftrace_ops_list, &global_ops);
272         } else
273                 add_ftrace_ops(&ftrace_ops_list, ops);
274
275         if (ftrace_enabled)
276                 update_ftrace_function();
277
278         return 0;
279 }
280
281 static int __unregister_ftrace_function(struct ftrace_ops *ops)
282 {
283         int ret;
284
285         if (ftrace_disabled)
286                 return -ENODEV;
287
288         if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
289                 return -EBUSY;
290
291         if (FTRACE_WARN_ON(ops == &global_ops))
292                 return -EINVAL;
293
294         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
295                 ret = remove_ftrace_ops(&ftrace_global_list, ops);
296                 if (!ret && ftrace_global_list == &ftrace_list_end)
297                         ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
298                 if (!ret)
299                         ops->flags &= ~FTRACE_OPS_FL_ENABLED;
300         } else
301                 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
302
303         if (ret < 0)
304                 return ret;
305
306         if (ftrace_enabled)
307                 update_ftrace_function();
308
309         /*
310          * Dynamic ops may be freed, we must make sure that all
311          * callers are done before leaving this function.
312          */
313         if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
314                 synchronize_sched();
315
316         return 0;
317 }
318
319 static void ftrace_update_pid_func(void)
320 {
321         /* Only do something if we are tracing something */
322         if (ftrace_trace_function == ftrace_stub)
323                 return;
324
325         update_ftrace_function();
326 }
327
328 #ifdef CONFIG_FUNCTION_PROFILER
329 struct ftrace_profile {
330         struct hlist_node               node;
331         unsigned long                   ip;
332         unsigned long                   counter;
333 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
334         unsigned long long              time;
335         unsigned long long              time_squared;
336 #endif
337 };
338
339 struct ftrace_profile_page {
340         struct ftrace_profile_page      *next;
341         unsigned long                   index;
342         struct ftrace_profile           records[];
343 };
344
345 struct ftrace_profile_stat {
346         atomic_t                        disabled;
347         struct hlist_head               *hash;
348         struct ftrace_profile_page      *pages;
349         struct ftrace_profile_page      *start;
350         struct tracer_stat              stat;
351 };
352
353 #define PROFILE_RECORDS_SIZE                                            \
354         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
355
356 #define PROFILES_PER_PAGE                                       \
357         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
358
359 static int ftrace_profile_bits __read_mostly;
360 static int ftrace_profile_enabled __read_mostly;
361
362 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
363 static DEFINE_MUTEX(ftrace_profile_lock);
364
365 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
366
367 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
368
369 static void *
370 function_stat_next(void *v, int idx)
371 {
372         struct ftrace_profile *rec = v;
373         struct ftrace_profile_page *pg;
374
375         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
376
377  again:
378         if (idx != 0)
379                 rec++;
380
381         if ((void *)rec >= (void *)&pg->records[pg->index]) {
382                 pg = pg->next;
383                 if (!pg)
384                         return NULL;
385                 rec = &pg->records[0];
386                 if (!rec->counter)
387                         goto again;
388         }
389
390         return rec;
391 }
392
393 static void *function_stat_start(struct tracer_stat *trace)
394 {
395         struct ftrace_profile_stat *stat =
396                 container_of(trace, struct ftrace_profile_stat, stat);
397
398         if (!stat || !stat->start)
399                 return NULL;
400
401         return function_stat_next(&stat->start->records[0], 0);
402 }
403
404 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
405 /* function graph compares on total time */
406 static int function_stat_cmp(void *p1, void *p2)
407 {
408         struct ftrace_profile *a = p1;
409         struct ftrace_profile *b = p2;
410
411         if (a->time < b->time)
412                 return -1;
413         if (a->time > b->time)
414                 return 1;
415         else
416                 return 0;
417 }
418 #else
419 /* not function graph compares against hits */
420 static int function_stat_cmp(void *p1, void *p2)
421 {
422         struct ftrace_profile *a = p1;
423         struct ftrace_profile *b = p2;
424
425         if (a->counter < b->counter)
426                 return -1;
427         if (a->counter > b->counter)
428                 return 1;
429         else
430                 return 0;
431 }
432 #endif
433
434 static int function_stat_headers(struct seq_file *m)
435 {
436 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
437         seq_printf(m, "  Function                               "
438                    "Hit    Time            Avg             s^2\n"
439                       "  --------                               "
440                    "---    ----            ---             ---\n");
441 #else
442         seq_printf(m, "  Function                               Hit\n"
443                       "  --------                               ---\n");
444 #endif
445         return 0;
446 }
447
448 static int function_stat_show(struct seq_file *m, void *v)
449 {
450         struct ftrace_profile *rec = v;
451         char str[KSYM_SYMBOL_LEN];
452         int ret = 0;
453 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
454         static struct trace_seq s;
455         unsigned long long avg;
456         unsigned long long stddev;
457 #endif
458         mutex_lock(&ftrace_profile_lock);
459
460         /* we raced with function_profile_reset() */
461         if (unlikely(rec->counter == 0)) {
462                 ret = -EBUSY;
463                 goto out;
464         }
465
466         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
467         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
468
469 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
470         seq_printf(m, "    ");
471         avg = rec->time;
472         do_div(avg, rec->counter);
473
474         /* Sample standard deviation (s^2) */
475         if (rec->counter <= 1)
476                 stddev = 0;
477         else {
478                 stddev = rec->time_squared - rec->counter * avg * avg;
479                 /*
480                  * Divide only 1000 for ns^2 -> us^2 conversion.
481                  * trace_print_graph_duration will divide 1000 again.
482                  */
483                 do_div(stddev, (rec->counter - 1) * 1000);
484         }
485
486         trace_seq_init(&s);
487         trace_print_graph_duration(rec->time, &s);
488         trace_seq_puts(&s, "    ");
489         trace_print_graph_duration(avg, &s);
490         trace_seq_puts(&s, "    ");
491         trace_print_graph_duration(stddev, &s);
492         trace_print_seq(m, &s);
493 #endif
494         seq_putc(m, '\n');
495 out:
496         mutex_unlock(&ftrace_profile_lock);
497
498         return ret;
499 }
500
501 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
502 {
503         struct ftrace_profile_page *pg;
504
505         pg = stat->pages = stat->start;
506
507         while (pg) {
508                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
509                 pg->index = 0;
510                 pg = pg->next;
511         }
512
513         memset(stat->hash, 0,
514                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
515 }
516
517 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
518 {
519         struct ftrace_profile_page *pg;
520         int functions;
521         int pages;
522         int i;
523
524         /* If we already allocated, do nothing */
525         if (stat->pages)
526                 return 0;
527
528         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
529         if (!stat->pages)
530                 return -ENOMEM;
531
532 #ifdef CONFIG_DYNAMIC_FTRACE
533         functions = ftrace_update_tot_cnt;
534 #else
535         /*
536          * We do not know the number of functions that exist because
537          * dynamic tracing is what counts them. With past experience
538          * we have around 20K functions. That should be more than enough.
539          * It is highly unlikely we will execute every function in
540          * the kernel.
541          */
542         functions = 20000;
543 #endif
544
545         pg = stat->start = stat->pages;
546
547         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
548
549         for (i = 0; i < pages; i++) {
550                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
551                 if (!pg->next)
552                         goto out_free;
553                 pg = pg->next;
554         }
555
556         return 0;
557
558  out_free:
559         pg = stat->start;
560         while (pg) {
561                 unsigned long tmp = (unsigned long)pg;
562
563                 pg = pg->next;
564                 free_page(tmp);
565         }
566
567         free_page((unsigned long)stat->pages);
568         stat->pages = NULL;
569         stat->start = NULL;
570
571         return -ENOMEM;
572 }
573
574 static int ftrace_profile_init_cpu(int cpu)
575 {
576         struct ftrace_profile_stat *stat;
577         int size;
578
579         stat = &per_cpu(ftrace_profile_stats, cpu);
580
581         if (stat->hash) {
582                 /* If the profile is already created, simply reset it */
583                 ftrace_profile_reset(stat);
584                 return 0;
585         }
586
587         /*
588          * We are profiling all functions, but usually only a few thousand
589          * functions are hit. We'll make a hash of 1024 items.
590          */
591         size = FTRACE_PROFILE_HASH_SIZE;
592
593         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
594
595         if (!stat->hash)
596                 return -ENOMEM;
597
598         if (!ftrace_profile_bits) {
599                 size--;
600
601                 for (; size; size >>= 1)
602                         ftrace_profile_bits++;
603         }
604
605         /* Preallocate the function profiling pages */
606         if (ftrace_profile_pages_init(stat) < 0) {
607                 kfree(stat->hash);
608                 stat->hash = NULL;
609                 return -ENOMEM;
610         }
611
612         return 0;
613 }
614
615 static int ftrace_profile_init(void)
616 {
617         int cpu;
618         int ret = 0;
619
620         for_each_online_cpu(cpu) {
621                 ret = ftrace_profile_init_cpu(cpu);
622                 if (ret)
623                         break;
624         }
625
626         return ret;
627 }
628
629 /* interrupts must be disabled */
630 static struct ftrace_profile *
631 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
632 {
633         struct ftrace_profile *rec;
634         struct hlist_head *hhd;
635         struct hlist_node *n;
636         unsigned long key;
637
638         key = hash_long(ip, ftrace_profile_bits);
639         hhd = &stat->hash[key];
640
641         if (hlist_empty(hhd))
642                 return NULL;
643
644         hlist_for_each_entry_rcu(rec, n, hhd, node) {
645                 if (rec->ip == ip)
646                         return rec;
647         }
648
649         return NULL;
650 }
651
652 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
653                                struct ftrace_profile *rec)
654 {
655         unsigned long key;
656
657         key = hash_long(rec->ip, ftrace_profile_bits);
658         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
659 }
660
661 /*
662  * The memory is already allocated, this simply finds a new record to use.
663  */
664 static struct ftrace_profile *
665 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
666 {
667         struct ftrace_profile *rec = NULL;
668
669         /* prevent recursion (from NMIs) */
670         if (atomic_inc_return(&stat->disabled) != 1)
671                 goto out;
672
673         /*
674          * Try to find the function again since an NMI
675          * could have added it
676          */
677         rec = ftrace_find_profiled_func(stat, ip);
678         if (rec)
679                 goto out;
680
681         if (stat->pages->index == PROFILES_PER_PAGE) {
682                 if (!stat->pages->next)
683                         goto out;
684                 stat->pages = stat->pages->next;
685         }
686
687         rec = &stat->pages->records[stat->pages->index++];
688         rec->ip = ip;
689         ftrace_add_profile(stat, rec);
690
691  out:
692         atomic_dec(&stat->disabled);
693
694         return rec;
695 }
696
697 static void
698 function_profile_call(unsigned long ip, unsigned long parent_ip)
699 {
700         struct ftrace_profile_stat *stat;
701         struct ftrace_profile *rec;
702         unsigned long flags;
703
704         if (!ftrace_profile_enabled)
705                 return;
706
707         local_irq_save(flags);
708
709         stat = &__get_cpu_var(ftrace_profile_stats);
710         if (!stat->hash || !ftrace_profile_enabled)
711                 goto out;
712
713         rec = ftrace_find_profiled_func(stat, ip);
714         if (!rec) {
715                 rec = ftrace_profile_alloc(stat, ip);
716                 if (!rec)
717                         goto out;
718         }
719
720         rec->counter++;
721  out:
722         local_irq_restore(flags);
723 }
724
725 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
726 static int profile_graph_entry(struct ftrace_graph_ent *trace)
727 {
728         function_profile_call(trace->func, 0);
729         return 1;
730 }
731
732 static void profile_graph_return(struct ftrace_graph_ret *trace)
733 {
734         struct ftrace_profile_stat *stat;
735         unsigned long long calltime;
736         struct ftrace_profile *rec;
737         unsigned long flags;
738
739         local_irq_save(flags);
740         stat = &__get_cpu_var(ftrace_profile_stats);
741         if (!stat->hash || !ftrace_profile_enabled)
742                 goto out;
743
744         /* If the calltime was zero'd ignore it */
745         if (!trace->calltime)
746                 goto out;
747
748         calltime = trace->rettime - trace->calltime;
749
750         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
751                 int index;
752
753                 index = trace->depth;
754
755                 /* Append this call time to the parent time to subtract */
756                 if (index)
757                         current->ret_stack[index - 1].subtime += calltime;
758
759                 if (current->ret_stack[index].subtime < calltime)
760                         calltime -= current->ret_stack[index].subtime;
761                 else
762                         calltime = 0;
763         }
764
765         rec = ftrace_find_profiled_func(stat, trace->func);
766         if (rec) {
767                 rec->time += calltime;
768                 rec->time_squared += calltime * calltime;
769         }
770
771  out:
772         local_irq_restore(flags);
773 }
774
775 static int register_ftrace_profiler(void)
776 {
777         return register_ftrace_graph(&profile_graph_return,
778                                      &profile_graph_entry);
779 }
780
781 static void unregister_ftrace_profiler(void)
782 {
783         unregister_ftrace_graph();
784 }
785 #else
786 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
787         .func           = function_profile_call,
788 };
789
790 static int register_ftrace_profiler(void)
791 {
792         return register_ftrace_function(&ftrace_profile_ops);
793 }
794
795 static void unregister_ftrace_profiler(void)
796 {
797         unregister_ftrace_function(&ftrace_profile_ops);
798 }
799 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
800
801 static ssize_t
802 ftrace_profile_write(struct file *filp, const char __user *ubuf,
803                      size_t cnt, loff_t *ppos)
804 {
805         unsigned long val;
806         int ret;
807
808         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
809         if (ret)
810                 return ret;
811
812         val = !!val;
813
814         mutex_lock(&ftrace_profile_lock);
815         if (ftrace_profile_enabled ^ val) {
816                 if (val) {
817                         ret = ftrace_profile_init();
818                         if (ret < 0) {
819                                 cnt = ret;
820                                 goto out;
821                         }
822
823                         ret = register_ftrace_profiler();
824                         if (ret < 0) {
825                                 cnt = ret;
826                                 goto out;
827                         }
828                         ftrace_profile_enabled = 1;
829                 } else {
830                         ftrace_profile_enabled = 0;
831                         /*
832                          * unregister_ftrace_profiler calls stop_machine
833                          * so this acts like an synchronize_sched.
834                          */
835                         unregister_ftrace_profiler();
836                 }
837         }
838  out:
839         mutex_unlock(&ftrace_profile_lock);
840
841         *ppos += cnt;
842
843         return cnt;
844 }
845
846 static ssize_t
847 ftrace_profile_read(struct file *filp, char __user *ubuf,
848                      size_t cnt, loff_t *ppos)
849 {
850         char buf[64];           /* big enough to hold a number */
851         int r;
852
853         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
854         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
855 }
856
857 static const struct file_operations ftrace_profile_fops = {
858         .open           = tracing_open_generic,
859         .read           = ftrace_profile_read,
860         .write          = ftrace_profile_write,
861         .llseek         = default_llseek,
862 };
863
864 /* used to initialize the real stat files */
865 static struct tracer_stat function_stats __initdata = {
866         .name           = "functions",
867         .stat_start     = function_stat_start,
868         .stat_next      = function_stat_next,
869         .stat_cmp       = function_stat_cmp,
870         .stat_headers   = function_stat_headers,
871         .stat_show      = function_stat_show
872 };
873
874 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
875 {
876         struct ftrace_profile_stat *stat;
877         struct dentry *entry;
878         char *name;
879         int ret;
880         int cpu;
881
882         for_each_possible_cpu(cpu) {
883                 stat = &per_cpu(ftrace_profile_stats, cpu);
884
885                 /* allocate enough for function name + cpu number */
886                 name = kmalloc(32, GFP_KERNEL);
887                 if (!name) {
888                         /*
889                          * The files created are permanent, if something happens
890                          * we still do not free memory.
891                          */
892                         WARN(1,
893                              "Could not allocate stat file for cpu %d\n",
894                              cpu);
895                         return;
896                 }
897                 stat->stat = function_stats;
898                 snprintf(name, 32, "function%d", cpu);
899                 stat->stat.name = name;
900                 ret = register_stat_tracer(&stat->stat);
901                 if (ret) {
902                         WARN(1,
903                              "Could not register function stat for cpu %d\n",
904                              cpu);
905                         kfree(name);
906                         return;
907                 }
908         }
909
910         entry = debugfs_create_file("function_profile_enabled", 0644,
911                                     d_tracer, NULL, &ftrace_profile_fops);
912         if (!entry)
913                 pr_warning("Could not create debugfs "
914                            "'function_profile_enabled' entry\n");
915 }
916
917 #else /* CONFIG_FUNCTION_PROFILER */
918 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
919 {
920 }
921 #endif /* CONFIG_FUNCTION_PROFILER */
922
923 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
924
925 #ifdef CONFIG_DYNAMIC_FTRACE
926
927 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
928 # error Dynamic ftrace depends on MCOUNT_RECORD
929 #endif
930
931 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
932
933 struct ftrace_func_probe {
934         struct hlist_node       node;
935         struct ftrace_probe_ops *ops;
936         unsigned long           flags;
937         unsigned long           ip;
938         void                    *data;
939         struct rcu_head         rcu;
940 };
941
942 enum {
943         FTRACE_ENABLE_CALLS             = (1 << 0),
944         FTRACE_DISABLE_CALLS            = (1 << 1),
945         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
946         FTRACE_START_FUNC_RET           = (1 << 3),
947         FTRACE_STOP_FUNC_RET            = (1 << 4),
948 };
949 struct ftrace_func_entry {
950         struct hlist_node hlist;
951         unsigned long ip;
952 };
953
954 struct ftrace_hash {
955         unsigned long           size_bits;
956         struct hlist_head       *buckets;
957         unsigned long           count;
958         struct rcu_head         rcu;
959 };
960
961 /*
962  * We make these constant because no one should touch them,
963  * but they are used as the default "empty hash", to avoid allocating
964  * it all the time. These are in a read only section such that if
965  * anyone does try to modify it, it will cause an exception.
966  */
967 static const struct hlist_head empty_buckets[1];
968 static const struct ftrace_hash empty_hash = {
969         .buckets = (struct hlist_head *)empty_buckets,
970 };
971 #define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
972
973 static struct ftrace_ops global_ops = {
974         .func                   = ftrace_stub,
975         .notrace_hash           = EMPTY_HASH,
976         .filter_hash            = EMPTY_HASH,
977 };
978
979 static struct dyn_ftrace *ftrace_new_addrs;
980
981 static DEFINE_MUTEX(ftrace_regex_lock);
982
983 struct ftrace_page {
984         struct ftrace_page      *next;
985         int                     index;
986         struct dyn_ftrace       records[];
987 };
988
989 #define ENTRIES_PER_PAGE \
990   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
991
992 /* estimate from running different kernels */
993 #define NR_TO_INIT              10000
994
995 static struct ftrace_page       *ftrace_pages_start;
996 static struct ftrace_page       *ftrace_pages;
997
998 static struct dyn_ftrace *ftrace_free_records;
999
1000 static struct ftrace_func_entry *
1001 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1002 {
1003         unsigned long key;
1004         struct ftrace_func_entry *entry;
1005         struct hlist_head *hhd;
1006         struct hlist_node *n;
1007
1008         if (!hash->count)
1009                 return NULL;
1010
1011         if (hash->size_bits > 0)
1012                 key = hash_long(ip, hash->size_bits);
1013         else
1014                 key = 0;
1015
1016         hhd = &hash->buckets[key];
1017
1018         hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1019                 if (entry->ip == ip)
1020                         return entry;
1021         }
1022         return NULL;
1023 }
1024
1025 static void __add_hash_entry(struct ftrace_hash *hash,
1026                              struct ftrace_func_entry *entry)
1027 {
1028         struct hlist_head *hhd;
1029         unsigned long key;
1030
1031         if (hash->size_bits)
1032                 key = hash_long(entry->ip, hash->size_bits);
1033         else
1034                 key = 0;
1035
1036         hhd = &hash->buckets[key];
1037         hlist_add_head(&entry->hlist, hhd);
1038         hash->count++;
1039 }
1040
1041 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1042 {
1043         struct ftrace_func_entry *entry;
1044
1045         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1046         if (!entry)
1047                 return -ENOMEM;
1048
1049         entry->ip = ip;
1050         __add_hash_entry(hash, entry);
1051
1052         return 0;
1053 }
1054
1055 static void
1056 free_hash_entry(struct ftrace_hash *hash,
1057                   struct ftrace_func_entry *entry)
1058 {
1059         hlist_del(&entry->hlist);
1060         kfree(entry);
1061         hash->count--;
1062 }
1063
1064 static void
1065 remove_hash_entry(struct ftrace_hash *hash,
1066                   struct ftrace_func_entry *entry)
1067 {
1068         hlist_del(&entry->hlist);
1069         hash->count--;
1070 }
1071
1072 static void ftrace_hash_clear(struct ftrace_hash *hash)
1073 {
1074         struct hlist_head *hhd;
1075         struct hlist_node *tp, *tn;
1076         struct ftrace_func_entry *entry;
1077         int size = 1 << hash->size_bits;
1078         int i;
1079
1080         if (!hash->count)
1081                 return;
1082
1083         for (i = 0; i < size; i++) {
1084                 hhd = &hash->buckets[i];
1085                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1086                         free_hash_entry(hash, entry);
1087         }
1088         FTRACE_WARN_ON(hash->count);
1089 }
1090
1091 static void free_ftrace_hash(struct ftrace_hash *hash)
1092 {
1093         if (!hash || hash == EMPTY_HASH)
1094                 return;
1095         ftrace_hash_clear(hash);
1096         kfree(hash->buckets);
1097         kfree(hash);
1098 }
1099
1100 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1101 {
1102         struct ftrace_hash *hash;
1103
1104         hash = container_of(rcu, struct ftrace_hash, rcu);
1105         free_ftrace_hash(hash);
1106 }
1107
1108 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1109 {
1110         if (!hash || hash == EMPTY_HASH)
1111                 return;
1112         call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1113 }
1114
1115 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1116 {
1117         struct ftrace_hash *hash;
1118         int size;
1119
1120         hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1121         if (!hash)
1122                 return NULL;
1123
1124         size = 1 << size_bits;
1125         hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
1126
1127         if (!hash->buckets) {
1128                 kfree(hash);
1129                 return NULL;
1130         }
1131
1132         hash->size_bits = size_bits;
1133
1134         return hash;
1135 }
1136
1137 static struct ftrace_hash *
1138 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1139 {
1140         struct ftrace_func_entry *entry;
1141         struct ftrace_hash *new_hash;
1142         struct hlist_node *tp;
1143         int size;
1144         int ret;
1145         int i;
1146
1147         new_hash = alloc_ftrace_hash(size_bits);
1148         if (!new_hash)
1149                 return NULL;
1150
1151         /* Empty hash? */
1152         if (!hash || !hash->count)
1153                 return new_hash;
1154
1155         size = 1 << hash->size_bits;
1156         for (i = 0; i < size; i++) {
1157                 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1158                         ret = add_hash_entry(new_hash, entry->ip);
1159                         if (ret < 0)
1160                                 goto free_hash;
1161                 }
1162         }
1163
1164         FTRACE_WARN_ON(new_hash->count != hash->count);
1165
1166         return new_hash;
1167
1168  free_hash:
1169         free_ftrace_hash(new_hash);
1170         return NULL;
1171 }
1172
1173 static int
1174 ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1175 {
1176         struct ftrace_func_entry *entry;
1177         struct hlist_node *tp, *tn;
1178         struct hlist_head *hhd;
1179         struct ftrace_hash *old_hash;
1180         struct ftrace_hash *new_hash;
1181         unsigned long key;
1182         int size = src->count;
1183         int bits = 0;
1184         int i;
1185
1186         /*
1187          * If the new source is empty, just free dst and assign it
1188          * the empty_hash.
1189          */
1190         if (!src->count) {
1191                 free_ftrace_hash_rcu(*dst);
1192                 rcu_assign_pointer(*dst, EMPTY_HASH);
1193                 return 0;
1194         }
1195
1196         /*
1197          * Make the hash size about 1/2 the # found
1198          */
1199         for (size /= 2; size; size >>= 1)
1200                 bits++;
1201
1202         /* Don't allocate too much */
1203         if (bits > FTRACE_HASH_MAX_BITS)
1204                 bits = FTRACE_HASH_MAX_BITS;
1205
1206         new_hash = alloc_ftrace_hash(bits);
1207         if (!new_hash)
1208                 return -ENOMEM;
1209
1210         size = 1 << src->size_bits;
1211         for (i = 0; i < size; i++) {
1212                 hhd = &src->buckets[i];
1213                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1214                         if (bits > 0)
1215                                 key = hash_long(entry->ip, bits);
1216                         else
1217                                 key = 0;
1218                         remove_hash_entry(src, entry);
1219                         __add_hash_entry(new_hash, entry);
1220                 }
1221         }
1222
1223         old_hash = *dst;
1224         rcu_assign_pointer(*dst, new_hash);
1225         free_ftrace_hash_rcu(old_hash);
1226
1227         return 0;
1228 }
1229
1230 /*
1231  * Test the hashes for this ops to see if we want to call
1232  * the ops->func or not.
1233  *
1234  * It's a match if the ip is in the ops->filter_hash or
1235  * the filter_hash does not exist or is empty,
1236  *  AND
1237  * the ip is not in the ops->notrace_hash.
1238  *
1239  * This needs to be called with preemption disabled as
1240  * the hashes are freed with call_rcu_sched().
1241  */
1242 static int
1243 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1244 {
1245         struct ftrace_hash *filter_hash;
1246         struct ftrace_hash *notrace_hash;
1247         int ret;
1248
1249         filter_hash = rcu_dereference_raw(ops->filter_hash);
1250         notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1251
1252         if ((!filter_hash || !filter_hash->count ||
1253              ftrace_lookup_ip(filter_hash, ip)) &&
1254             (!notrace_hash || !notrace_hash->count ||
1255              !ftrace_lookup_ip(notrace_hash, ip)))
1256                 ret = 1;
1257         else
1258                 ret = 0;
1259
1260         return ret;
1261 }
1262
1263 /*
1264  * This is a double for. Do not use 'break' to break out of the loop,
1265  * you must use a goto.
1266  */
1267 #define do_for_each_ftrace_rec(pg, rec)                                 \
1268         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1269                 int _____i;                                             \
1270                 for (_____i = 0; _____i < pg->index; _____i++) {        \
1271                         rec = &pg->records[_____i];
1272
1273 #define while_for_each_ftrace_rec()             \
1274                 }                               \
1275         }
1276
1277 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1278                                      int filter_hash,
1279                                      bool inc)
1280 {
1281         struct ftrace_hash *hash;
1282         struct ftrace_hash *other_hash;
1283         struct ftrace_page *pg;
1284         struct dyn_ftrace *rec;
1285         int count = 0;
1286         int all = 0;
1287
1288         /* Only update if the ops has been registered */
1289         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1290                 return;
1291
1292         /*
1293          * In the filter_hash case:
1294          *   If the count is zero, we update all records.
1295          *   Otherwise we just update the items in the hash.
1296          *
1297          * In the notrace_hash case:
1298          *   We enable the update in the hash.
1299          *   As disabling notrace means enabling the tracing,
1300          *   and enabling notrace means disabling, the inc variable
1301          *   gets inversed.
1302          */
1303         if (filter_hash) {
1304                 hash = ops->filter_hash;
1305                 other_hash = ops->notrace_hash;
1306                 if (!hash || !hash->count)
1307                         all = 1;
1308         } else {
1309                 inc = !inc;
1310                 hash = ops->notrace_hash;
1311                 other_hash = ops->filter_hash;
1312                 /*
1313                  * If the notrace hash has no items,
1314                  * then there's nothing to do.
1315                  */
1316                 if (hash && !hash->count)
1317                         return;
1318         }
1319
1320         do_for_each_ftrace_rec(pg, rec) {
1321                 int in_other_hash = 0;
1322                 int in_hash = 0;
1323                 int match = 0;
1324
1325                 if (all) {
1326                         /*
1327                          * Only the filter_hash affects all records.
1328                          * Update if the record is not in the notrace hash.
1329                          */
1330                         if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1331                                 match = 1;
1332                 } else {
1333                         in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip);
1334                         in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip);
1335
1336                         /*
1337                          *
1338                          */
1339                         if (filter_hash && in_hash && !in_other_hash)
1340                                 match = 1;
1341                         else if (!filter_hash && in_hash &&
1342                                  (in_other_hash || !other_hash->count))
1343                                 match = 1;
1344                 }
1345                 if (!match)
1346                         continue;
1347
1348                 if (inc) {
1349                         rec->flags++;
1350                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1351                                 return;
1352                 } else {
1353                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1354                                 return;
1355                         rec->flags--;
1356                 }
1357                 count++;
1358                 /* Shortcut, if we handled all records, we are done. */
1359                 if (!all && count == hash->count)
1360                         return;
1361         } while_for_each_ftrace_rec();
1362 }
1363
1364 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1365                                     int filter_hash)
1366 {
1367         __ftrace_hash_rec_update(ops, filter_hash, 0);
1368 }
1369
1370 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1371                                    int filter_hash)
1372 {
1373         __ftrace_hash_rec_update(ops, filter_hash, 1);
1374 }
1375
1376 static void ftrace_free_rec(struct dyn_ftrace *rec)
1377 {
1378         rec->freelist = ftrace_free_records;
1379         ftrace_free_records = rec;
1380         rec->flags |= FTRACE_FL_FREE;
1381 }
1382
1383 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
1384 {
1385         struct dyn_ftrace *rec;
1386
1387         /* First check for freed records */
1388         if (ftrace_free_records) {
1389                 rec = ftrace_free_records;
1390
1391                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
1392                         FTRACE_WARN_ON_ONCE(1);
1393                         ftrace_free_records = NULL;
1394                         return NULL;
1395                 }
1396
1397                 ftrace_free_records = rec->freelist;
1398                 memset(rec, 0, sizeof(*rec));
1399                 return rec;
1400         }
1401
1402         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
1403                 if (!ftrace_pages->next) {
1404                         /* allocate another page */
1405                         ftrace_pages->next =
1406                                 (void *)get_zeroed_page(GFP_KERNEL);
1407                         if (!ftrace_pages->next)
1408                                 return NULL;
1409                 }
1410                 ftrace_pages = ftrace_pages->next;
1411         }
1412
1413         return &ftrace_pages->records[ftrace_pages->index++];
1414 }
1415
1416 static struct dyn_ftrace *
1417 ftrace_record_ip(unsigned long ip)
1418 {
1419         struct dyn_ftrace *rec;
1420
1421         if (ftrace_disabled)
1422                 return NULL;
1423
1424         rec = ftrace_alloc_dyn_node(ip);
1425         if (!rec)
1426                 return NULL;
1427
1428         rec->ip = ip;
1429         rec->newlist = ftrace_new_addrs;
1430         ftrace_new_addrs = rec;
1431
1432         return rec;
1433 }
1434
1435 static void print_ip_ins(const char *fmt, unsigned char *p)
1436 {
1437         int i;
1438
1439         printk(KERN_CONT "%s", fmt);
1440
1441         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1442                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1443 }
1444
1445 static void ftrace_bug(int failed, unsigned long ip)
1446 {
1447         switch (failed) {
1448         case -EFAULT:
1449                 FTRACE_WARN_ON_ONCE(1);
1450                 pr_info("ftrace faulted on modifying ");
1451                 print_ip_sym(ip);
1452                 break;
1453         case -EINVAL:
1454                 FTRACE_WARN_ON_ONCE(1);
1455                 pr_info("ftrace failed to modify ");
1456                 print_ip_sym(ip);
1457                 print_ip_ins(" actual: ", (unsigned char *)ip);
1458                 printk(KERN_CONT "\n");
1459                 break;
1460         case -EPERM:
1461                 FTRACE_WARN_ON_ONCE(1);
1462                 pr_info("ftrace faulted on writing ");
1463                 print_ip_sym(ip);
1464                 break;
1465         default:
1466                 FTRACE_WARN_ON_ONCE(1);
1467                 pr_info("ftrace faulted on unknown error ");
1468                 print_ip_sym(ip);
1469         }
1470 }
1471
1472
1473 /* Return 1 if the address range is reserved for ftrace */
1474 int ftrace_text_reserved(void *start, void *end)
1475 {
1476         struct dyn_ftrace *rec;
1477         struct ftrace_page *pg;
1478
1479         do_for_each_ftrace_rec(pg, rec) {
1480                 if (rec->ip <= (unsigned long)end &&
1481                     rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1482                         return 1;
1483         } while_for_each_ftrace_rec();
1484         return 0;
1485 }
1486
1487
1488 static int
1489 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1490 {
1491         unsigned long ftrace_addr;
1492         unsigned long flag = 0UL;
1493
1494         ftrace_addr = (unsigned long)FTRACE_ADDR;
1495
1496         /*
1497          * If we are enabling tracing:
1498          *
1499          *   If the record has a ref count, then we need to enable it
1500          *   because someone is using it.
1501          *
1502          *   Otherwise we make sure its disabled.
1503          *
1504          * If we are disabling tracing, then disable all records that
1505          * are enabled.
1506          */
1507         if (enable && (rec->flags & ~FTRACE_FL_MASK))
1508                 flag = FTRACE_FL_ENABLED;
1509
1510         /* If the state of this record hasn't changed, then do nothing */
1511         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1512                 return 0;
1513
1514         if (flag) {
1515                 rec->flags |= FTRACE_FL_ENABLED;
1516                 return ftrace_make_call(rec, ftrace_addr);
1517         }
1518
1519         rec->flags &= ~FTRACE_FL_ENABLED;
1520         return ftrace_make_nop(NULL, rec, ftrace_addr);
1521 }
1522
1523 static void ftrace_replace_code(int enable)
1524 {
1525         struct dyn_ftrace *rec;
1526         struct ftrace_page *pg;
1527         int failed;
1528
1529         if (unlikely(ftrace_disabled))
1530                 return;
1531
1532         do_for_each_ftrace_rec(pg, rec) {
1533                 /* Skip over free records */
1534                 if (rec->flags & FTRACE_FL_FREE)
1535                         continue;
1536
1537                 failed = __ftrace_replace_code(rec, enable);
1538                 if (failed) {
1539                         ftrace_bug(failed, rec->ip);
1540                         /* Stop processing */
1541                         return;
1542                 }
1543         } while_for_each_ftrace_rec();
1544 }
1545
1546 static int
1547 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1548 {
1549         unsigned long ip;
1550         int ret;
1551
1552         ip = rec->ip;
1553
1554         if (unlikely(ftrace_disabled))
1555                 return 0;
1556
1557         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1558         if (ret) {
1559                 ftrace_bug(ret, ip);
1560                 return 0;
1561         }
1562         return 1;
1563 }
1564
1565 /*
1566  * archs can override this function if they must do something
1567  * before the modifying code is performed.
1568  */
1569 int __weak ftrace_arch_code_modify_prepare(void)
1570 {
1571         return 0;
1572 }
1573
1574 /*
1575  * archs can override this function if they must do something
1576  * after the modifying code is performed.
1577  */
1578 int __weak ftrace_arch_code_modify_post_process(void)
1579 {
1580         return 0;
1581 }
1582
1583 static int __ftrace_modify_code(void *data)
1584 {
1585         int *command = data;
1586
1587         if (*command & FTRACE_ENABLE_CALLS)
1588                 ftrace_replace_code(1);
1589         else if (*command & FTRACE_DISABLE_CALLS)
1590                 ftrace_replace_code(0);
1591
1592         if (*command & FTRACE_UPDATE_TRACE_FUNC)
1593                 ftrace_update_ftrace_func(ftrace_trace_function);
1594
1595         if (*command & FTRACE_START_FUNC_RET)
1596                 ftrace_enable_ftrace_graph_caller();
1597         else if (*command & FTRACE_STOP_FUNC_RET)
1598                 ftrace_disable_ftrace_graph_caller();
1599
1600         return 0;
1601 }
1602
1603 static void ftrace_run_update_code(int command)
1604 {
1605         int ret;
1606
1607         ret = ftrace_arch_code_modify_prepare();
1608         FTRACE_WARN_ON(ret);
1609         if (ret)
1610                 return;
1611
1612         stop_machine(__ftrace_modify_code, &command, NULL);
1613
1614         ret = ftrace_arch_code_modify_post_process();
1615         FTRACE_WARN_ON(ret);
1616 }
1617
1618 static ftrace_func_t saved_ftrace_func;
1619 static int ftrace_start_up;
1620 static int global_start_up;
1621
1622 static void ftrace_startup_enable(int command)
1623 {
1624         if (saved_ftrace_func != ftrace_trace_function) {
1625                 saved_ftrace_func = ftrace_trace_function;
1626                 command |= FTRACE_UPDATE_TRACE_FUNC;
1627         }
1628
1629         if (!command || !ftrace_enabled)
1630                 return;
1631
1632         ftrace_run_update_code(command);
1633 }
1634
1635 static int ftrace_startup(struct ftrace_ops *ops, int command)
1636 {
1637         bool hash_enable = true;
1638
1639         if (unlikely(ftrace_disabled))
1640                 return -ENODEV;
1641
1642         ftrace_start_up++;
1643         command |= FTRACE_ENABLE_CALLS;
1644
1645         /* ops marked global share the filter hashes */
1646         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1647                 ops = &global_ops;
1648                 /* Don't update hash if global is already set */
1649                 if (global_start_up)
1650                         hash_enable = false;
1651                 global_start_up++;
1652         }
1653
1654         ops->flags |= FTRACE_OPS_FL_ENABLED;
1655         if (hash_enable)
1656                 ftrace_hash_rec_enable(ops, 1);
1657
1658         ftrace_startup_enable(command);
1659
1660         return 0;
1661 }
1662
1663 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
1664 {
1665         bool hash_disable = true;
1666
1667         if (unlikely(ftrace_disabled))
1668                 return;
1669
1670         ftrace_start_up--;
1671         /*
1672          * Just warn in case of unbalance, no need to kill ftrace, it's not
1673          * critical but the ftrace_call callers may be never nopped again after
1674          * further ftrace uses.
1675          */
1676         WARN_ON_ONCE(ftrace_start_up < 0);
1677
1678         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1679                 ops = &global_ops;
1680                 global_start_up--;
1681                 WARN_ON_ONCE(global_start_up < 0);
1682                 /* Don't update hash if global still has users */
1683                 if (global_start_up) {
1684                         WARN_ON_ONCE(!ftrace_start_up);
1685                         hash_disable = false;
1686                 }
1687         }
1688
1689         if (hash_disable)
1690                 ftrace_hash_rec_disable(ops, 1);
1691
1692         if (ops != &global_ops || !global_start_up)
1693                 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
1694
1695         if (!ftrace_start_up)
1696                 command |= FTRACE_DISABLE_CALLS;
1697
1698         if (saved_ftrace_func != ftrace_trace_function) {
1699                 saved_ftrace_func = ftrace_trace_function;
1700                 command |= FTRACE_UPDATE_TRACE_FUNC;
1701         }
1702
1703         if (!command || !ftrace_enabled)
1704                 return;
1705
1706         ftrace_run_update_code(command);
1707 }
1708
1709 static void ftrace_startup_sysctl(void)
1710 {
1711         if (unlikely(ftrace_disabled))
1712                 return;
1713
1714         /* Force update next time */
1715         saved_ftrace_func = NULL;
1716         /* ftrace_start_up is true if we want ftrace running */
1717         if (ftrace_start_up)
1718                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1719 }
1720
1721 static void ftrace_shutdown_sysctl(void)
1722 {
1723         if (unlikely(ftrace_disabled))
1724                 return;
1725
1726         /* ftrace_start_up is true if ftrace is running */
1727         if (ftrace_start_up)
1728                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
1729 }
1730
1731 static cycle_t          ftrace_update_time;
1732 static unsigned long    ftrace_update_cnt;
1733 unsigned long           ftrace_update_tot_cnt;
1734
1735 static int ftrace_update_code(struct module *mod)
1736 {
1737         struct dyn_ftrace *p;
1738         cycle_t start, stop;
1739
1740         start = ftrace_now(raw_smp_processor_id());
1741         ftrace_update_cnt = 0;
1742
1743         while (ftrace_new_addrs) {
1744
1745                 /* If something went wrong, bail without enabling anything */
1746                 if (unlikely(ftrace_disabled))
1747                         return -1;
1748
1749                 p = ftrace_new_addrs;
1750                 ftrace_new_addrs = p->newlist;
1751                 p->flags = 0L;
1752
1753                 /*
1754                  * Do the initial record conversion from mcount jump
1755                  * to the NOP instructions.
1756                  */
1757                 if (!ftrace_code_disable(mod, p)) {
1758                         ftrace_free_rec(p);
1759                         /* Game over */
1760                         break;
1761                 }
1762
1763                 ftrace_update_cnt++;
1764
1765                 /*
1766                  * If the tracing is enabled, go ahead and enable the record.
1767                  *
1768                  * The reason not to enable the record immediatelly is the
1769                  * inherent check of ftrace_make_nop/ftrace_make_call for
1770                  * correct previous instructions.  Making first the NOP
1771                  * conversion puts the module to the correct state, thus
1772                  * passing the ftrace_make_call check.
1773                  */
1774                 if (ftrace_start_up) {
1775                         int failed = __ftrace_replace_code(p, 1);
1776                         if (failed) {
1777                                 ftrace_bug(failed, p->ip);
1778                                 ftrace_free_rec(p);
1779                         }
1780                 }
1781         }
1782
1783         stop = ftrace_now(raw_smp_processor_id());
1784         ftrace_update_time = stop - start;
1785         ftrace_update_tot_cnt += ftrace_update_cnt;
1786
1787         return 0;
1788 }
1789
1790 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1791 {
1792         struct ftrace_page *pg;
1793         int cnt;
1794         int i;
1795
1796         /* allocate a few pages */
1797         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1798         if (!ftrace_pages_start)
1799                 return -1;
1800
1801         /*
1802          * Allocate a few more pages.
1803          *
1804          * TODO: have some parser search vmlinux before
1805          *   final linking to find all calls to ftrace.
1806          *   Then we can:
1807          *    a) know how many pages to allocate.
1808          *     and/or
1809          *    b) set up the table then.
1810          *
1811          *  The dynamic code is still necessary for
1812          *  modules.
1813          */
1814
1815         pg = ftrace_pages = ftrace_pages_start;
1816
1817         cnt = num_to_init / ENTRIES_PER_PAGE;
1818         pr_info("ftrace: allocating %ld entries in %d pages\n",
1819                 num_to_init, cnt + 1);
1820
1821         for (i = 0; i < cnt; i++) {
1822                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1823
1824                 /* If we fail, we'll try later anyway */
1825                 if (!pg->next)
1826                         break;
1827
1828                 pg = pg->next;
1829         }
1830
1831         return 0;
1832 }
1833
1834 enum {
1835         FTRACE_ITER_FILTER      = (1 << 0),
1836         FTRACE_ITER_NOTRACE     = (1 << 1),
1837         FTRACE_ITER_PRINTALL    = (1 << 2),
1838         FTRACE_ITER_HASH        = (1 << 3),
1839         FTRACE_ITER_ENABLED     = (1 << 4),
1840 };
1841
1842 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1843
1844 struct ftrace_iterator {
1845         loff_t                          pos;
1846         loff_t                          func_pos;
1847         struct ftrace_page              *pg;
1848         struct dyn_ftrace               *func;
1849         struct ftrace_func_probe        *probe;
1850         struct trace_parser             parser;
1851         struct ftrace_hash              *hash;
1852         struct ftrace_ops               *ops;
1853         int                             hidx;
1854         int                             idx;
1855         unsigned                        flags;
1856 };
1857
1858 static void *
1859 t_hash_next(struct seq_file *m, loff_t *pos)
1860 {
1861         struct ftrace_iterator *iter = m->private;
1862         struct hlist_node *hnd = NULL;
1863         struct hlist_head *hhd;
1864
1865         (*pos)++;
1866         iter->pos = *pos;
1867
1868         if (iter->probe)
1869                 hnd = &iter->probe->node;
1870  retry:
1871         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1872                 return NULL;
1873
1874         hhd = &ftrace_func_hash[iter->hidx];
1875
1876         if (hlist_empty(hhd)) {
1877                 iter->hidx++;
1878                 hnd = NULL;
1879                 goto retry;
1880         }
1881
1882         if (!hnd)
1883                 hnd = hhd->first;
1884         else {
1885                 hnd = hnd->next;
1886                 if (!hnd) {
1887                         iter->hidx++;
1888                         goto retry;
1889                 }
1890         }
1891
1892         if (WARN_ON_ONCE(!hnd))
1893                 return NULL;
1894
1895         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
1896
1897         return iter;
1898 }
1899
1900 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1901 {
1902         struct ftrace_iterator *iter = m->private;
1903         void *p = NULL;
1904         loff_t l;
1905
1906         if (iter->func_pos > *pos)
1907                 return NULL;
1908
1909         iter->hidx = 0;
1910         for (l = 0; l <= (*pos - iter->func_pos); ) {
1911                 p = t_hash_next(m, &l);
1912                 if (!p)
1913                         break;
1914         }
1915         if (!p)
1916                 return NULL;
1917
1918         /* Only set this if we have an item */
1919         iter->flags |= FTRACE_ITER_HASH;
1920
1921         return iter;
1922 }
1923
1924 static int
1925 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
1926 {
1927         struct ftrace_func_probe *rec;
1928
1929         rec = iter->probe;
1930         if (WARN_ON_ONCE(!rec))
1931                 return -EIO;
1932
1933         if (rec->ops->print)
1934                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1935
1936         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
1937
1938         if (rec->data)
1939                 seq_printf(m, ":%p", rec->data);
1940         seq_putc(m, '\n');
1941
1942         return 0;
1943 }
1944
1945 static void *
1946 t_next(struct seq_file *m, void *v, loff_t *pos)
1947 {
1948         struct ftrace_iterator *iter = m->private;
1949         struct ftrace_ops *ops = &global_ops;
1950         struct dyn_ftrace *rec = NULL;
1951
1952         if (unlikely(ftrace_disabled))
1953                 return NULL;
1954
1955         if (iter->flags & FTRACE_ITER_HASH)
1956                 return t_hash_next(m, pos);
1957
1958         (*pos)++;
1959         iter->pos = iter->func_pos = *pos;
1960
1961         if (iter->flags & FTRACE_ITER_PRINTALL)
1962                 return t_hash_start(m, pos);
1963
1964  retry:
1965         if (iter->idx >= iter->pg->index) {
1966                 if (iter->pg->next) {
1967                         iter->pg = iter->pg->next;
1968                         iter->idx = 0;
1969                         goto retry;
1970                 }
1971         } else {
1972                 rec = &iter->pg->records[iter->idx++];
1973                 if ((rec->flags & FTRACE_FL_FREE) ||
1974
1975                     ((iter->flags & FTRACE_ITER_FILTER) &&
1976                      !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
1977
1978                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
1979                      !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
1980
1981                     ((iter->flags & FTRACE_ITER_ENABLED) &&
1982                      !(rec->flags & ~FTRACE_FL_MASK))) {
1983
1984                         rec = NULL;
1985                         goto retry;
1986                 }
1987         }
1988
1989         if (!rec)
1990                 return t_hash_start(m, pos);
1991
1992         iter->func = rec;
1993
1994         return iter;
1995 }
1996
1997 static void reset_iter_read(struct ftrace_iterator *iter)
1998 {
1999         iter->pos = 0;
2000         iter->func_pos = 0;
2001         iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
2002 }
2003
2004 static void *t_start(struct seq_file *m, loff_t *pos)
2005 {
2006         struct ftrace_iterator *iter = m->private;
2007         struct ftrace_ops *ops = &global_ops;
2008         void *p = NULL;
2009         loff_t l;
2010
2011         mutex_lock(&ftrace_lock);
2012
2013         if (unlikely(ftrace_disabled))
2014                 return NULL;
2015
2016         /*
2017          * If an lseek was done, then reset and start from beginning.
2018          */
2019         if (*pos < iter->pos)
2020                 reset_iter_read(iter);
2021
2022         /*
2023          * For set_ftrace_filter reading, if we have the filter
2024          * off, we can short cut and just print out that all
2025          * functions are enabled.
2026          */
2027         if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
2028                 if (*pos > 0)
2029                         return t_hash_start(m, pos);
2030                 iter->flags |= FTRACE_ITER_PRINTALL;
2031                 /* reset in case of seek/pread */
2032                 iter->flags &= ~FTRACE_ITER_HASH;
2033                 return iter;
2034         }
2035
2036         if (iter->flags & FTRACE_ITER_HASH)
2037                 return t_hash_start(m, pos);
2038
2039         /*
2040          * Unfortunately, we need to restart at ftrace_pages_start
2041          * every time we let go of the ftrace_mutex. This is because
2042          * those pointers can change without the lock.
2043          */
2044         iter->pg = ftrace_pages_start;
2045         iter->idx = 0;
2046         for (l = 0; l <= *pos; ) {
2047                 p = t_next(m, p, &l);
2048                 if (!p)
2049                         break;
2050         }
2051
2052         if (!p) {
2053                 if (iter->flags & FTRACE_ITER_FILTER)
2054                         return t_hash_start(m, pos);
2055
2056                 return NULL;
2057         }
2058
2059         return iter;
2060 }
2061
2062 static void t_stop(struct seq_file *m, void *p)
2063 {
2064         mutex_unlock(&ftrace_lock);
2065 }
2066
2067 static int t_show(struct seq_file *m, void *v)
2068 {
2069         struct ftrace_iterator *iter = m->private;
2070         struct dyn_ftrace *rec;
2071
2072         if (iter->flags & FTRACE_ITER_HASH)
2073                 return t_hash_show(m, iter);
2074
2075         if (iter->flags & FTRACE_ITER_PRINTALL) {
2076                 seq_printf(m, "#### all functions enabled ####\n");
2077                 return 0;
2078         }
2079
2080         rec = iter->func;
2081
2082         if (!rec)
2083                 return 0;
2084
2085         seq_printf(m, "%ps", (void *)rec->ip);
2086         if (iter->flags & FTRACE_ITER_ENABLED)
2087                 seq_printf(m, " (%ld)",
2088                            rec->flags & ~FTRACE_FL_MASK);
2089         seq_printf(m, "\n");
2090
2091         return 0;
2092 }
2093
2094 static const struct seq_operations show_ftrace_seq_ops = {
2095         .start = t_start,
2096         .next = t_next,
2097         .stop = t_stop,
2098         .show = t_show,
2099 };
2100
2101 static int
2102 ftrace_avail_open(struct inode *inode, struct file *file)
2103 {
2104         struct ftrace_iterator *iter;
2105         int ret;
2106
2107         if (unlikely(ftrace_disabled))
2108                 return -ENODEV;
2109
2110         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2111         if (!iter)
2112                 return -ENOMEM;
2113
2114         iter->pg = ftrace_pages_start;
2115
2116         ret = seq_open(file, &show_ftrace_seq_ops);
2117         if (!ret) {
2118                 struct seq_file *m = file->private_data;
2119
2120                 m->private = iter;
2121         } else {
2122                 kfree(iter);
2123         }
2124
2125         return ret;
2126 }
2127
2128 static int
2129 ftrace_enabled_open(struct inode *inode, struct file *file)
2130 {
2131         struct ftrace_iterator *iter;
2132         int ret;
2133
2134         if (unlikely(ftrace_disabled))
2135                 return -ENODEV;
2136
2137         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2138         if (!iter)
2139                 return -ENOMEM;
2140
2141         iter->pg = ftrace_pages_start;
2142         iter->flags = FTRACE_ITER_ENABLED;
2143
2144         ret = seq_open(file, &show_ftrace_seq_ops);
2145         if (!ret) {
2146                 struct seq_file *m = file->private_data;
2147
2148                 m->private = iter;
2149         } else {
2150                 kfree(iter);
2151         }
2152
2153         return ret;
2154 }
2155
2156 static void ftrace_filter_reset(struct ftrace_hash *hash)
2157 {
2158         mutex_lock(&ftrace_lock);
2159         ftrace_hash_clear(hash);
2160         mutex_unlock(&ftrace_lock);
2161 }
2162
2163 static int
2164 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2165                   struct inode *inode, struct file *file)
2166 {
2167         struct ftrace_iterator *iter;
2168         struct ftrace_hash *hash;
2169         int ret = 0;
2170
2171         if (unlikely(ftrace_disabled))
2172                 return -ENODEV;
2173
2174         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2175         if (!iter)
2176                 return -ENOMEM;
2177
2178         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2179                 kfree(iter);
2180                 return -ENOMEM;
2181         }
2182
2183         if (flag & FTRACE_ITER_NOTRACE)
2184                 hash = ops->notrace_hash;
2185         else
2186                 hash = ops->filter_hash;
2187
2188         iter->ops = ops;
2189         iter->flags = flag;
2190
2191         if (file->f_mode & FMODE_WRITE) {
2192                 mutex_lock(&ftrace_lock);
2193                 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2194                 mutex_unlock(&ftrace_lock);
2195
2196                 if (!iter->hash) {
2197                         trace_parser_put(&iter->parser);
2198                         kfree(iter);
2199                         return -ENOMEM;
2200                 }
2201         }
2202
2203         mutex_lock(&ftrace_regex_lock);
2204
2205         if ((file->f_mode & FMODE_WRITE) &&
2206             (file->f_flags & O_TRUNC))
2207                 ftrace_filter_reset(iter->hash);
2208
2209         if (file->f_mode & FMODE_READ) {
2210                 iter->pg = ftrace_pages_start;
2211
2212                 ret = seq_open(file, &show_ftrace_seq_ops);
2213                 if (!ret) {
2214                         struct seq_file *m = file->private_data;
2215                         m->private = iter;
2216                 } else {
2217                         /* Failed */
2218                         free_ftrace_hash(iter->hash);
2219                         trace_parser_put(&iter->parser);
2220                         kfree(iter);
2221                 }
2222         } else
2223                 file->private_data = iter;
2224         mutex_unlock(&ftrace_regex_lock);
2225
2226         return ret;
2227 }
2228
2229 static int
2230 ftrace_filter_open(struct inode *inode, struct file *file)
2231 {
2232         return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER,
2233                                  inode, file);
2234 }
2235
2236 static int
2237 ftrace_notrace_open(struct inode *inode, struct file *file)
2238 {
2239         return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2240                                  inode, file);
2241 }
2242
2243 static loff_t
2244 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
2245 {
2246         loff_t ret;
2247
2248         if (file->f_mode & FMODE_READ)
2249                 ret = seq_lseek(file, offset, origin);
2250         else
2251                 file->f_pos = ret = 1;
2252
2253         return ret;
2254 }
2255
2256 static int ftrace_match(char *str, char *regex, int len, int type)
2257 {
2258         int matched = 0;
2259         int slen;
2260
2261         switch (type) {
2262         case MATCH_FULL:
2263                 if (strcmp(str, regex) == 0)
2264                         matched = 1;
2265                 break;
2266         case MATCH_FRONT_ONLY:
2267                 if (strncmp(str, regex, len) == 0)
2268                         matched = 1;
2269                 break;
2270         case MATCH_MIDDLE_ONLY:
2271                 if (strstr(str, regex))
2272                         matched = 1;
2273                 break;
2274         case MATCH_END_ONLY:
2275                 slen = strlen(str);
2276                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2277                         matched = 1;
2278                 break;
2279         }
2280
2281         return matched;
2282 }
2283
2284 static int
2285 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2286 {
2287         struct ftrace_func_entry *entry;
2288         int ret = 0;
2289
2290         entry = ftrace_lookup_ip(hash, rec->ip);
2291         if (not) {
2292                 /* Do nothing if it doesn't exist */
2293                 if (!entry)
2294                         return 0;
2295
2296                 free_hash_entry(hash, entry);
2297         } else {
2298                 /* Do nothing if it exists */
2299                 if (entry)
2300                         return 0;
2301
2302                 ret = add_hash_entry(hash, rec->ip);
2303         }
2304         return ret;
2305 }
2306
2307 static int
2308 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2309                     char *regex, int len, int type)
2310 {
2311         char str[KSYM_SYMBOL_LEN];
2312         char *modname;
2313
2314         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2315
2316         if (mod) {
2317                 /* module lookup requires matching the module */
2318                 if (!modname || strcmp(modname, mod))
2319                         return 0;
2320
2321                 /* blank search means to match all funcs in the mod */
2322                 if (!len)
2323                         return 1;
2324         }
2325
2326         return ftrace_match(str, regex, len, type);
2327 }
2328
2329 static int
2330 match_records(struct ftrace_hash *hash, char *buff,
2331               int len, char *mod, int not)
2332 {
2333         unsigned search_len = 0;
2334         struct ftrace_page *pg;
2335         struct dyn_ftrace *rec;
2336         int type = MATCH_FULL;
2337         char *search = buff;
2338         int found = 0;
2339         int ret;
2340
2341         if (len) {
2342                 type = filter_parse_regex(buff, len, &search, &not);
2343                 search_len = strlen(search);
2344         }
2345
2346         mutex_lock(&ftrace_lock);
2347
2348         if (unlikely(ftrace_disabled))
2349                 goto out_unlock;
2350
2351         do_for_each_ftrace_rec(pg, rec) {
2352
2353                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2354                         ret = enter_record(hash, rec, not);
2355                         if (ret < 0) {
2356                                 found = ret;
2357                                 goto out_unlock;
2358                         }
2359                         found = 1;
2360                 }
2361         } while_for_each_ftrace_rec();
2362  out_unlock:
2363         mutex_unlock(&ftrace_lock);
2364
2365         return found;
2366 }
2367
2368 static int
2369 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2370 {
2371         return match_records(hash, buff, len, NULL, 0);
2372 }
2373
2374 static int
2375 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2376 {
2377         int not = 0;
2378
2379         /* blank or '*' mean the same */
2380         if (strcmp(buff, "*") == 0)
2381                 buff[0] = 0;
2382
2383         /* handle the case of 'dont filter this module' */
2384         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2385                 buff[0] = 0;
2386                 not = 1;
2387         }
2388
2389         return match_records(hash, buff, strlen(buff), mod, not);
2390 }
2391
2392 /*
2393  * We register the module command as a template to show others how
2394  * to register the a command as well.
2395  */
2396
2397 static int
2398 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
2399 {
2400         struct ftrace_ops *ops = &global_ops;
2401         struct ftrace_hash *hash;
2402         char *mod;
2403         int ret = -EINVAL;
2404
2405         /*
2406          * cmd == 'mod' because we only registered this func
2407          * for the 'mod' ftrace_func_command.
2408          * But if you register one func with multiple commands,
2409          * you can tell which command was used by the cmd
2410          * parameter.
2411          */
2412
2413         /* we must have a module name */
2414         if (!param)
2415                 return ret;
2416
2417         mod = strsep(&param, ":");
2418         if (!strlen(mod))
2419                 return ret;
2420
2421         if (enable)
2422                 hash = ops->filter_hash;
2423         else
2424                 hash = ops->notrace_hash;
2425
2426         ret = ftrace_match_module_records(hash, func, mod);
2427         if (!ret)
2428                 ret = -EINVAL;
2429         if (ret < 0)
2430                 return ret;
2431
2432         return 0;
2433 }
2434
2435 static struct ftrace_func_command ftrace_mod_cmd = {
2436         .name                   = "mod",
2437         .func                   = ftrace_mod_callback,
2438 };
2439
2440 static int __init ftrace_mod_cmd_init(void)
2441 {
2442         return register_ftrace_command(&ftrace_mod_cmd);
2443 }
2444 device_initcall(ftrace_mod_cmd_init);
2445
2446 static void
2447 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
2448 {
2449         struct ftrace_func_probe *entry;
2450         struct hlist_head *hhd;
2451         struct hlist_node *n;
2452         unsigned long key;
2453
2454         key = hash_long(ip, FTRACE_HASH_BITS);
2455
2456         hhd = &ftrace_func_hash[key];
2457
2458         if (hlist_empty(hhd))
2459                 return;
2460
2461         /*
2462          * Disable preemption for these calls to prevent a RCU grace
2463          * period. This syncs the hash iteration and freeing of items
2464          * on the hash. rcu_read_lock is too dangerous here.
2465          */
2466         preempt_disable_notrace();
2467         hlist_for_each_entry_rcu(entry, n, hhd, node) {
2468                 if (entry->ip == ip)
2469                         entry->ops->func(ip, parent_ip, &entry->data);
2470         }
2471         preempt_enable_notrace();
2472 }
2473
2474 static struct ftrace_ops trace_probe_ops __read_mostly =
2475 {
2476         .func           = function_trace_probe_call,
2477 };
2478
2479 static int ftrace_probe_registered;
2480
2481 static void __enable_ftrace_function_probe(void)
2482 {
2483         int ret;
2484         int i;
2485
2486         if (ftrace_probe_registered)
2487                 return;
2488
2489         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2490                 struct hlist_head *hhd = &ftrace_func_hash[i];
2491                 if (hhd->first)
2492                         break;
2493         }
2494         /* Nothing registered? */
2495         if (i == FTRACE_FUNC_HASHSIZE)
2496                 return;
2497
2498         ret = __register_ftrace_function(&trace_probe_ops);
2499         if (!ret)
2500                 ret = ftrace_startup(&trace_probe_ops, 0);
2501
2502         ftrace_probe_registered = 1;
2503 }
2504
2505 static void __disable_ftrace_function_probe(void)
2506 {
2507         int ret;
2508         int i;
2509
2510         if (!ftrace_probe_registered)
2511                 return;
2512
2513         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2514                 struct hlist_head *hhd = &ftrace_func_hash[i];
2515                 if (hhd->first)
2516                         return;
2517         }
2518
2519         /* no more funcs left */
2520         ret = __unregister_ftrace_function(&trace_probe_ops);
2521         if (!ret)
2522                 ftrace_shutdown(&trace_probe_ops, 0);
2523
2524         ftrace_probe_registered = 0;
2525 }
2526
2527
2528 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2529 {
2530         struct ftrace_func_probe *entry =
2531                 container_of(rhp, struct ftrace_func_probe, rcu);
2532
2533         if (entry->ops->free)
2534                 entry->ops->free(&entry->data);
2535         kfree(entry);
2536 }
2537
2538
2539 int
2540 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2541                               void *data)
2542 {
2543         struct ftrace_func_probe *entry;
2544         struct ftrace_page *pg;
2545         struct dyn_ftrace *rec;
2546         int type, len, not;
2547         unsigned long key;
2548         int count = 0;
2549         char *search;
2550
2551         type = filter_parse_regex(glob, strlen(glob), &search, &not);
2552         len = strlen(search);
2553
2554         /* we do not support '!' for function probes */
2555         if (WARN_ON(not))
2556                 return -EINVAL;
2557
2558         mutex_lock(&ftrace_lock);
2559
2560         if (unlikely(ftrace_disabled))
2561                 goto out_unlock;
2562
2563         do_for_each_ftrace_rec(pg, rec) {
2564
2565                 if (!ftrace_match_record(rec, NULL, search, len, type))
2566                         continue;
2567
2568                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2569                 if (!entry) {
2570                         /* If we did not process any, then return error */
2571                         if (!count)
2572                                 count = -ENOMEM;
2573                         goto out_unlock;
2574                 }
2575
2576                 count++;
2577
2578                 entry->data = data;
2579
2580                 /*
2581                  * The caller might want to do something special
2582                  * for each function we find. We call the callback
2583                  * to give the caller an opportunity to do so.
2584                  */
2585                 if (ops->callback) {
2586                         if (ops->callback(rec->ip, &entry->data) < 0) {
2587                                 /* caller does not like this func */
2588                                 kfree(entry);
2589                                 continue;
2590                         }
2591                 }
2592
2593                 entry->ops = ops;
2594                 entry->ip = rec->ip;
2595
2596                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2597                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2598
2599         } while_for_each_ftrace_rec();
2600         __enable_ftrace_function_probe();
2601
2602  out_unlock:
2603         mutex_unlock(&ftrace_lock);
2604
2605         return count;
2606 }
2607
2608 enum {
2609         PROBE_TEST_FUNC         = 1,
2610         PROBE_TEST_DATA         = 2
2611 };
2612
2613 static void
2614 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2615                                   void *data, int flags)
2616 {
2617         struct ftrace_func_probe *entry;
2618         struct hlist_node *n, *tmp;
2619         char str[KSYM_SYMBOL_LEN];
2620         int type = MATCH_FULL;
2621         int i, len = 0;
2622         char *search;
2623
2624         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2625                 glob = NULL;
2626         else if (glob) {
2627                 int not;
2628
2629                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2630                 len = strlen(search);
2631
2632                 /* we do not support '!' for function probes */
2633                 if (WARN_ON(not))
2634                         return;
2635         }
2636
2637         mutex_lock(&ftrace_lock);
2638         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2639                 struct hlist_head *hhd = &ftrace_func_hash[i];
2640
2641                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2642
2643                         /* break up if statements for readability */
2644                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2645                                 continue;
2646
2647                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
2648                                 continue;
2649
2650                         /* do this last, since it is the most expensive */
2651                         if (glob) {
2652                                 kallsyms_lookup(entry->ip, NULL, NULL,
2653                                                 NULL, str);
2654                                 if (!ftrace_match(str, glob, len, type))
2655                                         continue;
2656                         }
2657
2658                         hlist_del(&entry->node);
2659                         call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2660                 }
2661         }
2662         __disable_ftrace_function_probe();
2663         mutex_unlock(&ftrace_lock);
2664 }
2665
2666 void
2667 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2668                                 void *data)
2669 {
2670         __unregister_ftrace_function_probe(glob, ops, data,
2671                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
2672 }
2673
2674 void
2675 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2676 {
2677         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2678 }
2679
2680 void unregister_ftrace_function_probe_all(char *glob)
2681 {
2682         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2683 }
2684
2685 static LIST_HEAD(ftrace_commands);
2686 static DEFINE_MUTEX(ftrace_cmd_mutex);
2687
2688 int register_ftrace_command(struct ftrace_func_command *cmd)
2689 {
2690         struct ftrace_func_command *p;
2691         int ret = 0;
2692
2693         mutex_lock(&ftrace_cmd_mutex);
2694         list_for_each_entry(p, &ftrace_commands, list) {
2695                 if (strcmp(cmd->name, p->name) == 0) {
2696                         ret = -EBUSY;
2697                         goto out_unlock;
2698                 }
2699         }
2700         list_add(&cmd->list, &ftrace_commands);
2701  out_unlock:
2702         mutex_unlock(&ftrace_cmd_mutex);
2703
2704         return ret;
2705 }
2706
2707 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2708 {
2709         struct ftrace_func_command *p, *n;
2710         int ret = -ENODEV;
2711
2712         mutex_lock(&ftrace_cmd_mutex);
2713         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2714                 if (strcmp(cmd->name, p->name) == 0) {
2715                         ret = 0;
2716                         list_del_init(&p->list);
2717                         goto out_unlock;
2718                 }
2719         }
2720  out_unlock:
2721         mutex_unlock(&ftrace_cmd_mutex);
2722
2723         return ret;
2724 }
2725
2726 static int ftrace_process_regex(struct ftrace_hash *hash,
2727                                 char *buff, int len, int enable)
2728 {
2729         char *func, *command, *next = buff;
2730         struct ftrace_func_command *p;
2731         int ret = -EINVAL;
2732
2733         func = strsep(&next, ":");
2734
2735         if (!next) {
2736                 ret = ftrace_match_records(hash, func, len);
2737                 if (!ret)
2738                         ret = -EINVAL;
2739                 if (ret < 0)
2740                         return ret;
2741                 return 0;
2742         }
2743
2744         /* command found */
2745
2746         command = strsep(&next, ":");
2747
2748         mutex_lock(&ftrace_cmd_mutex);
2749         list_for_each_entry(p, &ftrace_commands, list) {
2750                 if (strcmp(p->name, command) == 0) {
2751                         ret = p->func(func, command, next, enable);
2752                         goto out_unlock;
2753                 }
2754         }
2755  out_unlock:
2756         mutex_unlock(&ftrace_cmd_mutex);
2757
2758         return ret;
2759 }
2760
2761 static ssize_t
2762 ftrace_regex_write(struct file *file, const char __user *ubuf,
2763                    size_t cnt, loff_t *ppos, int enable)
2764 {
2765         struct ftrace_iterator *iter;
2766         struct trace_parser *parser;
2767         ssize_t ret, read;
2768
2769         if (!cnt)
2770                 return 0;
2771
2772         mutex_lock(&ftrace_regex_lock);
2773
2774         ret = -ENODEV;
2775         if (unlikely(ftrace_disabled))
2776                 goto out_unlock;
2777
2778         if (file->f_mode & FMODE_READ) {
2779                 struct seq_file *m = file->private_data;
2780                 iter = m->private;
2781         } else
2782                 iter = file->private_data;
2783
2784         parser = &iter->parser;
2785         read = trace_get_user(parser, ubuf, cnt, ppos);
2786
2787         if (read >= 0 && trace_parser_loaded(parser) &&
2788             !trace_parser_cont(parser)) {
2789                 ret = ftrace_process_regex(iter->hash, parser->buffer,
2790                                            parser->idx, enable);
2791                 trace_parser_clear(parser);
2792                 if (ret)
2793                         goto out_unlock;
2794         }
2795
2796         ret = read;
2797 out_unlock:
2798         mutex_unlock(&ftrace_regex_lock);
2799
2800         return ret;
2801 }
2802
2803 static ssize_t
2804 ftrace_filter_write(struct file *file, const char __user *ubuf,
2805                     size_t cnt, loff_t *ppos)
2806 {
2807         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2808 }
2809
2810 static ssize_t
2811 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2812                      size_t cnt, loff_t *ppos)
2813 {
2814         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2815 }
2816
2817 static int
2818 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2819                  int reset, int enable)
2820 {
2821         struct ftrace_hash **orig_hash;
2822         struct ftrace_hash *hash;
2823         int ret;
2824
2825         /* All global ops uses the global ops filters */
2826         if (ops->flags & FTRACE_OPS_FL_GLOBAL)
2827                 ops = &global_ops;
2828
2829         if (unlikely(ftrace_disabled))
2830                 return -ENODEV;
2831
2832         if (enable)
2833                 orig_hash = &ops->filter_hash;
2834         else
2835                 orig_hash = &ops->notrace_hash;
2836
2837         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2838         if (!hash)
2839                 return -ENOMEM;
2840
2841         mutex_lock(&ftrace_regex_lock);
2842         if (reset)
2843                 ftrace_filter_reset(hash);
2844         if (buf)
2845                 ftrace_match_records(hash, buf, len);
2846
2847         mutex_lock(&ftrace_lock);
2848         ret = ftrace_hash_move(orig_hash, hash);
2849         mutex_unlock(&ftrace_lock);
2850
2851         mutex_unlock(&ftrace_regex_lock);
2852
2853         free_ftrace_hash(hash);
2854         return ret;
2855 }
2856
2857 /**
2858  * ftrace_set_filter - set a function to filter on in ftrace
2859  * @ops - the ops to set the filter with
2860  * @buf - the string that holds the function filter text.
2861  * @len - the length of the string.
2862  * @reset - non zero to reset all filters before applying this filter.
2863  *
2864  * Filters denote which functions should be enabled when tracing is enabled.
2865  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2866  */
2867 void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
2868                        int len, int reset)
2869 {
2870         ftrace_set_regex(ops, buf, len, reset, 1);
2871 }
2872 EXPORT_SYMBOL_GPL(ftrace_set_filter);
2873
2874 /**
2875  * ftrace_set_notrace - set a function to not trace in ftrace
2876  * @ops - the ops to set the notrace filter with
2877  * @buf - the string that holds the function notrace text.
2878  * @len - the length of the string.
2879  * @reset - non zero to reset all filters before applying this filter.
2880  *
2881  * Notrace Filters denote which functions should not be enabled when tracing
2882  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2883  * for tracing.
2884  */
2885 void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
2886                         int len, int reset)
2887 {
2888         ftrace_set_regex(ops, buf, len, reset, 0);
2889 }
2890 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
2891 /**
2892  * ftrace_set_filter - set a function to filter on in ftrace
2893  * @ops - the ops to set the filter with
2894  * @buf - the string that holds the function filter text.
2895  * @len - the length of the string.
2896  * @reset - non zero to reset all filters before applying this filter.
2897  *
2898  * Filters denote which functions should be enabled when tracing is enabled.
2899  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2900  */
2901 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
2902 {
2903         ftrace_set_regex(&global_ops, buf, len, reset, 1);
2904 }
2905 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
2906
2907 /**
2908  * ftrace_set_notrace - set a function to not trace in ftrace
2909  * @ops - the ops to set the notrace filter with
2910  * @buf - the string that holds the function notrace text.
2911  * @len - the length of the string.
2912  * @reset - non zero to reset all filters before applying this filter.
2913  *
2914  * Notrace Filters denote which functions should not be enabled when tracing
2915  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2916  * for tracing.
2917  */
2918 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
2919 {
2920         ftrace_set_regex(&global_ops, buf, len, reset, 0);
2921 }
2922 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
2923
2924 /*
2925  * command line interface to allow users to set filters on boot up.
2926  */
2927 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
2928 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2929 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2930
2931 static int __init set_ftrace_notrace(char *str)
2932 {
2933         strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2934         return 1;
2935 }
2936 __setup("ftrace_notrace=", set_ftrace_notrace);
2937
2938 static int __init set_ftrace_filter(char *str)
2939 {
2940         strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2941         return 1;
2942 }
2943 __setup("ftrace_filter=", set_ftrace_filter);
2944
2945 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2946 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
2947 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
2948
2949 static int __init set_graph_function(char *str)
2950 {
2951         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
2952         return 1;
2953 }
2954 __setup("ftrace_graph_filter=", set_graph_function);
2955
2956 static void __init set_ftrace_early_graph(char *buf)
2957 {
2958         int ret;
2959         char *func;
2960
2961         while (buf) {
2962                 func = strsep(&buf, ",");
2963                 /* we allow only one expression at a time */
2964                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2965                                       func);
2966                 if (ret)
2967                         printk(KERN_DEBUG "ftrace: function %s not "
2968                                           "traceable\n", func);
2969         }
2970 }
2971 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2972
2973 static void __init
2974 set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable)
2975 {
2976         char *func;
2977
2978         while (buf) {
2979                 func = strsep(&buf, ",");
2980                 ftrace_set_regex(ops, func, strlen(func), 0, enable);
2981         }
2982 }
2983
2984 static void __init set_ftrace_early_filters(void)
2985 {
2986         if (ftrace_filter_buf[0])
2987                 set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1);
2988         if (ftrace_notrace_buf[0])
2989                 set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0);
2990 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2991         if (ftrace_graph_buf[0])
2992                 set_ftrace_early_graph(ftrace_graph_buf);
2993 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2994 }
2995
2996 static int
2997 ftrace_regex_release(struct inode *inode, struct file *file)
2998 {
2999         struct seq_file *m = (struct seq_file *)file->private_data;
3000         struct ftrace_iterator *iter;
3001         struct ftrace_hash **orig_hash;
3002         struct trace_parser *parser;
3003         int filter_hash;
3004         int ret;
3005
3006         mutex_lock(&ftrace_regex_lock);
3007         if (file->f_mode & FMODE_READ) {
3008                 iter = m->private;
3009
3010                 seq_release(inode, file);
3011         } else
3012                 iter = file->private_data;
3013
3014         parser = &iter->parser;
3015         if (trace_parser_loaded(parser)) {
3016                 parser->buffer[parser->idx] = 0;
3017                 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3018         }
3019
3020         trace_parser_put(parser);
3021
3022         if (file->f_mode & FMODE_WRITE) {
3023                 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3024
3025                 if (filter_hash)
3026                         orig_hash = &iter->ops->filter_hash;
3027                 else
3028                         orig_hash = &iter->ops->notrace_hash;
3029
3030                 mutex_lock(&ftrace_lock);
3031                 /*
3032                  * Remove the current set, update the hash and add
3033                  * them back.
3034                  */
3035                 ftrace_hash_rec_disable(iter->ops, filter_hash);
3036                 ret = ftrace_hash_move(orig_hash, iter->hash);
3037                 if (!ret) {
3038                         ftrace_hash_rec_enable(iter->ops, filter_hash);
3039                         if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
3040                             && ftrace_enabled)
3041                                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
3042                 }
3043                 mutex_unlock(&ftrace_lock);
3044         }
3045         free_ftrace_hash(iter->hash);
3046         kfree(iter);
3047
3048         mutex_unlock(&ftrace_regex_lock);
3049         return 0;
3050 }
3051
3052 static const struct file_operations ftrace_avail_fops = {
3053         .open = ftrace_avail_open,
3054         .read = seq_read,
3055         .llseek = seq_lseek,
3056         .release = seq_release_private,
3057 };
3058
3059 static const struct file_operations ftrace_enabled_fops = {
3060         .open = ftrace_enabled_open,
3061         .read = seq_read,
3062         .llseek = seq_lseek,
3063         .release = seq_release_private,
3064 };
3065
3066 static const struct file_operations ftrace_filter_fops = {
3067         .open = ftrace_filter_open,
3068         .read = seq_read,
3069         .write = ftrace_filter_write,
3070         .llseek = ftrace_regex_lseek,
3071         .release = ftrace_regex_release,
3072 };
3073
3074 static const struct file_operations ftrace_notrace_fops = {
3075         .open = ftrace_notrace_open,
3076         .read = seq_read,
3077         .write = ftrace_notrace_write,
3078         .llseek = ftrace_regex_lseek,
3079         .release = ftrace_regex_release,
3080 };
3081
3082 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3083
3084 static DEFINE_MUTEX(graph_lock);
3085
3086 int ftrace_graph_count;
3087 int ftrace_graph_filter_enabled;
3088 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3089
3090 static void *
3091 __g_next(struct seq_file *m, loff_t *pos)
3092 {
3093         if (*pos >= ftrace_graph_count)
3094                 return NULL;
3095         return &ftrace_graph_funcs[*pos];
3096 }
3097
3098 static void *
3099 g_next(struct seq_file *m, void *v, loff_t *pos)
3100 {
3101         (*pos)++;
3102         return __g_next(m, pos);
3103 }
3104
3105 static void *g_start(struct seq_file *m, loff_t *pos)
3106 {
3107         mutex_lock(&graph_lock);
3108
3109         /* Nothing, tell g_show to print all functions are enabled */
3110         if (!ftrace_graph_filter_enabled && !*pos)
3111                 return (void *)1;
3112
3113         return __g_next(m, pos);
3114 }
3115
3116 static void g_stop(struct seq_file *m, void *p)
3117 {
3118         mutex_unlock(&graph_lock);
3119 }
3120
3121 static int g_show(struct seq_file *m, void *v)
3122 {
3123         unsigned long *ptr = v;
3124
3125         if (!ptr)
3126                 return 0;
3127
3128         if (ptr == (unsigned long *)1) {
3129                 seq_printf(m, "#### all functions enabled ####\n");
3130                 return 0;
3131         }
3132
3133         seq_printf(m, "%ps\n", (void *)*ptr);
3134
3135         return 0;
3136 }
3137
3138 static const struct seq_operations ftrace_graph_seq_ops = {
3139         .start = g_start,
3140         .next = g_next,
3141         .stop = g_stop,
3142         .show = g_show,
3143 };
3144
3145 static int
3146 ftrace_graph_open(struct inode *inode, struct file *file)
3147 {
3148         int ret = 0;
3149
3150         if (unlikely(ftrace_disabled))
3151                 return -ENODEV;
3152
3153         mutex_lock(&graph_lock);
3154         if ((file->f_mode & FMODE_WRITE) &&
3155             (file->f_flags & O_TRUNC)) {
3156                 ftrace_graph_filter_enabled = 0;
3157                 ftrace_graph_count = 0;
3158                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3159         }
3160         mutex_unlock(&graph_lock);
3161
3162         if (file->f_mode & FMODE_READ)
3163                 ret = seq_open(file, &ftrace_graph_seq_ops);
3164
3165         return ret;
3166 }
3167
3168 static int
3169 ftrace_graph_release(struct inode *inode, struct file *file)
3170 {
3171         if (file->f_mode & FMODE_READ)
3172                 seq_release(inode, file);
3173         return 0;
3174 }
3175
3176 static int
3177 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3178 {
3179         struct dyn_ftrace *rec;
3180         struct ftrace_page *pg;
3181         int search_len;
3182         int fail = 1;
3183         int type, not;
3184         char *search;
3185         bool exists;
3186         int i;
3187
3188         /* decode regex */
3189         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3190         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3191                 return -EBUSY;
3192
3193         search_len = strlen(search);
3194
3195         mutex_lock(&ftrace_lock);
3196
3197         if (unlikely(ftrace_disabled)) {
3198                 mutex_unlock(&ftrace_lock);
3199                 return -ENODEV;
3200         }
3201
3202         do_for_each_ftrace_rec(pg, rec) {
3203
3204                 if (rec->flags & FTRACE_FL_FREE)
3205                         continue;
3206
3207                 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3208                         /* if it is in the array */
3209                         exists = false;
3210                         for (i = 0; i < *idx; i++) {
3211                                 if (array[i] == rec->ip) {
3212                                         exists = true;
3213                                         break;
3214                                 }
3215                         }
3216
3217                         if (!not) {
3218                                 fail = 0;
3219                                 if (!exists) {
3220                                         array[(*idx)++] = rec->ip;
3221                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3222                                                 goto out;
3223                                 }
3224                         } else {
3225                                 if (exists) {
3226                                         array[i] = array[--(*idx)];
3227                                         array[*idx] = 0;
3228                                         fail = 0;
3229                                 }
3230                         }
3231                 }
3232         } while_for_each_ftrace_rec();
3233 out:
3234         mutex_unlock(&ftrace_lock);
3235
3236         if (fail)
3237                 return -EINVAL;
3238
3239         ftrace_graph_filter_enabled = 1;
3240         return 0;
3241 }
3242
3243 static ssize_t
3244 ftrace_graph_write(struct file *file, const char __user *ubuf,
3245                    size_t cnt, loff_t *ppos)
3246 {
3247         struct trace_parser parser;
3248         ssize_t read, ret;
3249
3250         if (!cnt)
3251                 return 0;
3252
3253         mutex_lock(&graph_lock);
3254
3255         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3256                 ret = -ENOMEM;
3257                 goto out_unlock;
3258         }
3259
3260         read = trace_get_user(&parser, ubuf, cnt, ppos);
3261
3262         if (read >= 0 && trace_parser_loaded((&parser))) {
3263                 parser.buffer[parser.idx] = 0;
3264
3265                 /* we allow only one expression at a time */
3266                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3267                                         parser.buffer);
3268                 if (ret)
3269                         goto out_free;
3270         }
3271
3272         ret = read;
3273
3274 out_free:
3275         trace_parser_put(&parser);
3276 out_unlock:
3277         mutex_unlock(&graph_lock);
3278
3279         return ret;
3280 }
3281
3282 static const struct file_operations ftrace_graph_fops = {
3283         .open           = ftrace_graph_open,
3284         .read           = seq_read,
3285         .write          = ftrace_graph_write,
3286         .release        = ftrace_graph_release,
3287         .llseek         = seq_lseek,
3288 };
3289 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3290
3291 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3292 {
3293
3294         trace_create_file("available_filter_functions", 0444,
3295                         d_tracer, NULL, &ftrace_avail_fops);
3296
3297         trace_create_file("enabled_functions", 0444,
3298                         d_tracer, NULL, &ftrace_enabled_fops);
3299
3300         trace_create_file("set_ftrace_filter", 0644, d_tracer,
3301                         NULL, &ftrace_filter_fops);
3302
3303         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3304                                     NULL, &ftrace_notrace_fops);
3305
3306 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3307         trace_create_file("set_graph_function", 0444, d_tracer,
3308                                     NULL,
3309                                     &ftrace_graph_fops);
3310 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3311
3312         return 0;
3313 }
3314
3315 static int ftrace_process_locs(struct module *mod,
3316                                unsigned long *start,
3317                                unsigned long *end)
3318 {
3319         unsigned long *p;
3320         unsigned long addr;
3321         unsigned long flags = 0; /* Shut up gcc */
3322
3323         mutex_lock(&ftrace_lock);
3324         p = start;
3325         while (p < end) {
3326                 addr = ftrace_call_adjust(*p++);
3327                 /*
3328                  * Some architecture linkers will pad between
3329                  * the different mcount_loc sections of different
3330                  * object files to satisfy alignments.
3331                  * Skip any NULL pointers.
3332                  */
3333                 if (!addr)
3334                         continue;
3335                 ftrace_record_ip(addr);
3336         }
3337
3338         /*
3339          * We only need to disable interrupts on start up
3340          * because we are modifying code that an interrupt
3341          * may execute, and the modification is not atomic.
3342          * But for modules, nothing runs the code we modify
3343          * until we are finished with it, and there's no
3344          * reason to cause large interrupt latencies while we do it.
3345          */
3346         if (!mod)
3347                 local_irq_save(flags);
3348         ftrace_update_code(mod);
3349         if (!mod)
3350                 local_irq_restore(flags);
3351         mutex_unlock(&ftrace_lock);
3352
3353         return 0;
3354 }
3355
3356 #ifdef CONFIG_MODULES
3357 void ftrace_release_mod(struct module *mod)
3358 {
3359         struct dyn_ftrace *rec;
3360         struct ftrace_page *pg;
3361
3362         mutex_lock(&ftrace_lock);
3363
3364         if (ftrace_disabled)
3365                 goto out_unlock;
3366
3367         do_for_each_ftrace_rec(pg, rec) {
3368                 if (within_module_core(rec->ip, mod)) {
3369                         /*
3370                          * rec->ip is changed in ftrace_free_rec()
3371                          * It should not between s and e if record was freed.
3372                          */
3373                         FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
3374                         ftrace_free_rec(rec);
3375                 }
3376         } while_for_each_ftrace_rec();
3377  out_unlock:
3378         mutex_unlock(&ftrace_lock);
3379 }
3380
3381 static void ftrace_init_module(struct module *mod,
3382                                unsigned long *start, unsigned long *end)
3383 {
3384         if (ftrace_disabled || start == end)
3385                 return;
3386         ftrace_process_locs(mod, start, end);
3387 }
3388
3389 static int ftrace_module_notify(struct notifier_block *self,
3390                                 unsigned long val, void *data)
3391 {
3392         struct module *mod = data;
3393
3394         switch (val) {
3395         case MODULE_STATE_COMING:
3396                 ftrace_init_module(mod, mod->ftrace_callsites,
3397                                    mod->ftrace_callsites +
3398                                    mod->num_ftrace_callsites);
3399                 break;
3400         case MODULE_STATE_GOING:
3401                 ftrace_release_mod(mod);
3402                 break;
3403         }
3404
3405         return 0;
3406 }
3407 #else
3408 static int ftrace_module_notify(struct notifier_block *self,
3409                                 unsigned long val, void *data)
3410 {
3411         return 0;
3412 }
3413 #endif /* CONFIG_MODULES */
3414
3415 struct notifier_block ftrace_module_nb = {
3416         .notifier_call = ftrace_module_notify,
3417         .priority = 0,
3418 };
3419
3420 extern unsigned long __start_mcount_loc[];
3421 extern unsigned long __stop_mcount_loc[];
3422
3423 void __init ftrace_init(void)
3424 {
3425         unsigned long count, addr, flags;
3426         int ret;
3427
3428         /* Keep the ftrace pointer to the stub */
3429         addr = (unsigned long)ftrace_stub;
3430
3431         local_irq_save(flags);
3432         ftrace_dyn_arch_init(&addr);
3433         local_irq_restore(flags);
3434
3435         /* ftrace_dyn_arch_init places the return code in addr */
3436         if (addr)
3437                 goto failed;
3438
3439         count = __stop_mcount_loc - __start_mcount_loc;
3440
3441         ret = ftrace_dyn_table_alloc(count);
3442         if (ret)
3443                 goto failed;
3444
3445         last_ftrace_enabled = ftrace_enabled = 1;
3446
3447         ret = ftrace_process_locs(NULL,
3448                                   __start_mcount_loc,
3449                                   __stop_mcount_loc);
3450
3451         ret = register_module_notifier(&ftrace_module_nb);
3452         if (ret)
3453                 pr_warning("Failed to register trace ftrace module notifier\n");
3454
3455         set_ftrace_early_filters();
3456
3457         return;
3458  failed:
3459         ftrace_disabled = 1;
3460 }
3461
3462 #else
3463
3464 static struct ftrace_ops global_ops = {
3465         .func                   = ftrace_stub,
3466 };
3467
3468 static int __init ftrace_nodyn_init(void)
3469 {
3470         ftrace_enabled = 1;
3471         return 0;
3472 }
3473 device_initcall(ftrace_nodyn_init);
3474
3475 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3476 static inline void ftrace_startup_enable(int command) { }
3477 /* Keep as macros so we do not need to define the commands */
3478 # define ftrace_startup(ops, command)                   \
3479         ({                                              \
3480                 (ops)->flags |= FTRACE_OPS_FL_ENABLED;  \
3481                 0;                                      \
3482         })
3483 # define ftrace_shutdown(ops, command)  do { } while (0)
3484 # define ftrace_startup_sysctl()        do { } while (0)
3485 # define ftrace_shutdown_sysctl()       do { } while (0)
3486
3487 static inline int
3488 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3489 {
3490         return 1;
3491 }
3492
3493 #endif /* CONFIG_DYNAMIC_FTRACE */
3494
3495 static void
3496 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3497 {
3498         struct ftrace_ops *op;
3499
3500         if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
3501                 return;
3502
3503         trace_recursion_set(TRACE_INTERNAL_BIT);
3504         /*
3505          * Some of the ops may be dynamically allocated,
3506          * they must be freed after a synchronize_sched().
3507          */
3508         preempt_disable_notrace();
3509         op = rcu_dereference_raw(ftrace_ops_list);
3510         while (op != &ftrace_list_end) {
3511                 if (ftrace_ops_test(op, ip))
3512                         op->func(ip, parent_ip);
3513                 op = rcu_dereference_raw(op->next);
3514         };
3515         preempt_enable_notrace();
3516         trace_recursion_clear(TRACE_INTERNAL_BIT);
3517 }
3518
3519 static void clear_ftrace_swapper(void)
3520 {
3521         struct task_struct *p;
3522         int cpu;
3523
3524         get_online_cpus();
3525         for_each_online_cpu(cpu) {
3526                 p = idle_task(cpu);
3527                 clear_tsk_trace_trace(p);
3528         }
3529         put_online_cpus();
3530 }
3531
3532 static void set_ftrace_swapper(void)
3533 {
3534         struct task_struct *p;
3535         int cpu;
3536
3537         get_online_cpus();
3538         for_each_online_cpu(cpu) {
3539                 p = idle_task(cpu);
3540                 set_tsk_trace_trace(p);
3541         }
3542         put_online_cpus();
3543 }
3544
3545 static void clear_ftrace_pid(struct pid *pid)
3546 {
3547         struct task_struct *p;
3548
3549         rcu_read_lock();
3550         do_each_pid_task(pid, PIDTYPE_PID, p) {
3551                 clear_tsk_trace_trace(p);
3552         } while_each_pid_task(pid, PIDTYPE_PID, p);
3553         rcu_read_unlock();
3554
3555         put_pid(pid);
3556 }
3557
3558 static void set_ftrace_pid(struct pid *pid)
3559 {
3560         struct task_struct *p;
3561
3562         rcu_read_lock();
3563         do_each_pid_task(pid, PIDTYPE_PID, p) {
3564                 set_tsk_trace_trace(p);
3565         } while_each_pid_task(pid, PIDTYPE_PID, p);
3566         rcu_read_unlock();
3567 }
3568
3569 static void clear_ftrace_pid_task(struct pid *pid)
3570 {
3571         if (pid == ftrace_swapper_pid)
3572                 clear_ftrace_swapper();
3573         else
3574                 clear_ftrace_pid(pid);
3575 }
3576
3577 static void set_ftrace_pid_task(struct pid *pid)
3578 {
3579         if (pid == ftrace_swapper_pid)
3580                 set_ftrace_swapper();
3581         else
3582                 set_ftrace_pid(pid);
3583 }
3584
3585 static int ftrace_pid_add(int p)
3586 {
3587         struct pid *pid;
3588         struct ftrace_pid *fpid;
3589         int ret = -EINVAL;
3590
3591         mutex_lock(&ftrace_lock);
3592
3593         if (!p)
3594                 pid = ftrace_swapper_pid;
3595         else
3596                 pid = find_get_pid(p);
3597
3598         if (!pid)
3599                 goto out;
3600
3601         ret = 0;
3602
3603         list_for_each_entry(fpid, &ftrace_pids, list)
3604                 if (fpid->pid == pid)
3605                         goto out_put;
3606
3607         ret = -ENOMEM;
3608
3609         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
3610         if (!fpid)
3611                 goto out_put;
3612
3613         list_add(&fpid->list, &ftrace_pids);
3614         fpid->pid = pid;
3615
3616         set_ftrace_pid_task(pid);
3617
3618         ftrace_update_pid_func();
3619         ftrace_startup_enable(0);
3620
3621         mutex_unlock(&ftrace_lock);
3622         return 0;
3623
3624 out_put:
3625         if (pid != ftrace_swapper_pid)
3626                 put_pid(pid);
3627
3628 out:
3629         mutex_unlock(&ftrace_lock);
3630         return ret;
3631 }
3632
3633 static void ftrace_pid_reset(void)
3634 {
3635         struct ftrace_pid *fpid, *safe;
3636
3637         mutex_lock(&ftrace_lock);
3638         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
3639                 struct pid *pid = fpid->pid;
3640
3641                 clear_ftrace_pid_task(pid);
3642
3643                 list_del(&fpid->list);
3644                 kfree(fpid);
3645         }
3646
3647         ftrace_update_pid_func();
3648         ftrace_startup_enable(0);
3649
3650         mutex_unlock(&ftrace_lock);
3651 }
3652
3653 static void *fpid_start(struct seq_file *m, loff_t *pos)
3654 {
3655         mutex_lock(&ftrace_lock);
3656
3657         if (list_empty(&ftrace_pids) && (!*pos))
3658                 return (void *) 1;
3659
3660         return seq_list_start(&ftrace_pids, *pos);
3661 }
3662
3663 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
3664 {
3665         if (v == (void *)1)
3666                 return NULL;
3667
3668         return seq_list_next(v, &ftrace_pids, pos);
3669 }
3670
3671 static void fpid_stop(struct seq_file *m, void *p)
3672 {
3673         mutex_unlock(&ftrace_lock);
3674 }
3675
3676 static int fpid_show(struct seq_file *m, void *v)
3677 {
3678         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
3679
3680         if (v == (void *)1) {
3681                 seq_printf(m, "no pid\n");
3682                 return 0;
3683         }
3684
3685         if (fpid->pid == ftrace_swapper_pid)
3686                 seq_printf(m, "swapper tasks\n");
3687         else
3688                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
3689
3690         return 0;
3691 }
3692
3693 static const struct seq_operations ftrace_pid_sops = {
3694         .start = fpid_start,
3695         .next = fpid_next,
3696         .stop = fpid_stop,
3697         .show = fpid_show,
3698 };
3699
3700 static int
3701 ftrace_pid_open(struct inode *inode, struct file *file)
3702 {
3703         int ret = 0;
3704
3705         if ((file->f_mode & FMODE_WRITE) &&
3706             (file->f_flags & O_TRUNC))
3707                 ftrace_pid_reset();
3708
3709         if (file->f_mode & FMODE_READ)
3710                 ret = seq_open(file, &ftrace_pid_sops);
3711
3712         return ret;
3713 }
3714
3715 static ssize_t
3716 ftrace_pid_write(struct file *filp, const char __user *ubuf,
3717                    size_t cnt, loff_t *ppos)
3718 {
3719         char buf[64], *tmp;
3720         long val;
3721         int ret;
3722
3723         if (cnt >= sizeof(buf))
3724                 return -EINVAL;
3725
3726         if (copy_from_user(&buf, ubuf, cnt))
3727                 return -EFAULT;
3728
3729         buf[cnt] = 0;
3730
3731         /*
3732          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3733          * to clean the filter quietly.
3734          */
3735         tmp = strstrip(buf);
3736         if (strlen(tmp) == 0)
3737                 return 1;
3738
3739         ret = strict_strtol(tmp, 10, &val);
3740         if (ret < 0)
3741                 return ret;
3742
3743         ret = ftrace_pid_add(val);
3744
3745         return ret ? ret : cnt;
3746 }
3747
3748 static int
3749 ftrace_pid_release(struct inode *inode, struct file *file)
3750 {
3751         if (file->f_mode & FMODE_READ)
3752                 seq_release(inode, file);
3753
3754         return 0;
3755 }
3756
3757 static const struct file_operations ftrace_pid_fops = {
3758         .open           = ftrace_pid_open,
3759         .write          = ftrace_pid_write,
3760         .read           = seq_read,
3761         .llseek         = seq_lseek,
3762         .release        = ftrace_pid_release,
3763 };
3764
3765 static __init int ftrace_init_debugfs(void)
3766 {
3767         struct dentry *d_tracer;
3768
3769         d_tracer = tracing_init_dentry();
3770         if (!d_tracer)
3771                 return 0;
3772
3773         ftrace_init_dyn_debugfs(d_tracer);
3774
3775         trace_create_file("set_ftrace_pid", 0644, d_tracer,
3776                             NULL, &ftrace_pid_fops);
3777
3778         ftrace_profile_debugfs(d_tracer);
3779
3780         return 0;
3781 }
3782 fs_initcall(ftrace_init_debugfs);
3783
3784 /**
3785  * ftrace_kill - kill ftrace
3786  *
3787  * This function should be used by panic code. It stops ftrace
3788  * but in a not so nice way. If you need to simply kill ftrace
3789  * from a non-atomic section, use ftrace_kill.
3790  */
3791 void ftrace_kill(void)
3792 {
3793         ftrace_disabled = 1;
3794         ftrace_enabled = 0;
3795         clear_ftrace_function();
3796 }
3797
3798 /**
3799  * register_ftrace_function - register a function for profiling
3800  * @ops - ops structure that holds the function for profiling.
3801  *
3802  * Register a function to be called by all functions in the
3803  * kernel.
3804  *
3805  * Note: @ops->func and all the functions it calls must be labeled
3806  *       with "notrace", otherwise it will go into a
3807  *       recursive loop.
3808  */
3809 int register_ftrace_function(struct ftrace_ops *ops)
3810 {
3811         int ret = -1;
3812
3813         mutex_lock(&ftrace_lock);
3814
3815         if (unlikely(ftrace_disabled))
3816                 goto out_unlock;
3817
3818         ret = __register_ftrace_function(ops);
3819         if (!ret)
3820                 ret = ftrace_startup(ops, 0);
3821
3822
3823  out_unlock:
3824         mutex_unlock(&ftrace_lock);
3825         return ret;
3826 }
3827 EXPORT_SYMBOL_GPL(register_ftrace_function);
3828
3829 /**
3830  * unregister_ftrace_function - unregister a function for profiling.
3831  * @ops - ops structure that holds the function to unregister
3832  *
3833  * Unregister a function that was added to be called by ftrace profiling.
3834  */
3835 int unregister_ftrace_function(struct ftrace_ops *ops)
3836 {
3837         int ret;
3838
3839         mutex_lock(&ftrace_lock);
3840         ret = __unregister_ftrace_function(ops);
3841         if (!ret)
3842                 ftrace_shutdown(ops, 0);
3843         mutex_unlock(&ftrace_lock);
3844
3845         return ret;
3846 }
3847 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
3848
3849 int
3850 ftrace_enable_sysctl(struct ctl_table *table, int write,
3851                      void __user *buffer, size_t *lenp,
3852                      loff_t *ppos)
3853 {
3854         int ret = -ENODEV;
3855
3856         mutex_lock(&ftrace_lock);
3857
3858         if (unlikely(ftrace_disabled))
3859                 goto out;
3860
3861         ret = proc_dointvec(table, write, buffer, lenp, ppos);
3862
3863         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3864                 goto out;
3865
3866         last_ftrace_enabled = !!ftrace_enabled;
3867
3868         if (ftrace_enabled) {
3869
3870                 ftrace_startup_sysctl();
3871
3872                 /* we are starting ftrace again */
3873                 if (ftrace_ops_list != &ftrace_list_end) {
3874                         if (ftrace_ops_list->next == &ftrace_list_end)
3875                                 ftrace_trace_function = ftrace_ops_list->func;
3876                         else
3877                                 ftrace_trace_function = ftrace_ops_list_func;
3878                 }
3879
3880         } else {
3881                 /* stopping ftrace calls (just send to ftrace_stub) */
3882                 ftrace_trace_function = ftrace_stub;
3883
3884                 ftrace_shutdown_sysctl();
3885         }
3886
3887  out:
3888         mutex_unlock(&ftrace_lock);
3889         return ret;
3890 }
3891
3892 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3893
3894 static int ftrace_graph_active;
3895 static struct notifier_block ftrace_suspend_notifier;
3896
3897 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3898 {
3899         return 0;
3900 }
3901
3902 /* The callbacks that hook a function */
3903 trace_func_graph_ret_t ftrace_graph_return =
3904                         (trace_func_graph_ret_t)ftrace_stub;
3905 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3906
3907 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3908 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3909 {
3910         int i;
3911         int ret = 0;
3912         unsigned long flags;
3913         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3914         struct task_struct *g, *t;
3915
3916         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3917                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3918                                         * sizeof(struct ftrace_ret_stack),
3919                                         GFP_KERNEL);
3920                 if (!ret_stack_list[i]) {
3921                         start = 0;
3922                         end = i;
3923                         ret = -ENOMEM;
3924                         goto free;
3925                 }
3926         }
3927
3928         read_lock_irqsave(&tasklist_lock, flags);
3929         do_each_thread(g, t) {
3930                 if (start == end) {
3931                         ret = -EAGAIN;
3932                         goto unlock;
3933                 }
3934
3935                 if (t->ret_stack == NULL) {
3936                         atomic_set(&t->tracing_graph_pause, 0);
3937                         atomic_set(&t->trace_overrun, 0);
3938                         t->curr_ret_stack = -1;
3939                         /* Make sure the tasks see the -1 first: */
3940                         smp_wmb();
3941                         t->ret_stack = ret_stack_list[start++];
3942                 }
3943         } while_each_thread(g, t);
3944
3945 unlock:
3946         read_unlock_irqrestore(&tasklist_lock, flags);
3947 free:
3948         for (i = start; i < end; i++)
3949                 kfree(ret_stack_list[i]);
3950         return ret;
3951 }
3952
3953 static void
3954 ftrace_graph_probe_sched_switch(void *ignore,
3955                         struct task_struct *prev, struct task_struct *next)
3956 {
3957         unsigned long long timestamp;
3958         int index;
3959
3960         /*
3961          * Does the user want to count the time a function was asleep.
3962          * If so, do not update the time stamps.
3963          */
3964         if (trace_flags & TRACE_ITER_SLEEP_TIME)
3965                 return;
3966
3967         timestamp = trace_clock_local();
3968
3969         prev->ftrace_timestamp = timestamp;
3970
3971         /* only process tasks that we timestamped */
3972         if (!next->ftrace_timestamp)
3973                 return;
3974
3975         /*
3976          * Update all the counters in next to make up for the
3977          * time next was sleeping.
3978          */
3979         timestamp -= next->ftrace_timestamp;
3980
3981         for (index = next->curr_ret_stack; index >= 0; index--)
3982                 next->ret_stack[index].calltime += timestamp;
3983 }
3984
3985 /* Allocate a return stack for each task */
3986 static int start_graph_tracing(void)
3987 {
3988         struct ftrace_ret_stack **ret_stack_list;
3989         int ret, cpu;
3990
3991         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3992                                 sizeof(struct ftrace_ret_stack *),
3993                                 GFP_KERNEL);
3994
3995         if (!ret_stack_list)
3996                 return -ENOMEM;
3997
3998         /* The cpu_boot init_task->ret_stack will never be freed */
3999         for_each_online_cpu(cpu) {
4000                 if (!idle_task(cpu)->ret_stack)
4001                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4002         }
4003
4004         do {
4005                 ret = alloc_retstack_tasklist(ret_stack_list);
4006         } while (ret == -EAGAIN);
4007
4008         if (!ret) {
4009                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4010                 if (ret)
4011                         pr_info("ftrace_graph: Couldn't activate tracepoint"
4012                                 " probe to kernel_sched_switch\n");
4013         }
4014
4015         kfree(ret_stack_list);
4016         return ret;
4017 }
4018
4019 /*
4020  * Hibernation protection.
4021  * The state of the current task is too much unstable during
4022  * suspend/restore to disk. We want to protect against that.
4023  */
4024 static int
4025 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4026                                                         void *unused)
4027 {
4028         switch (state) {
4029         case PM_HIBERNATION_PREPARE:
4030                 pause_graph_tracing();
4031                 break;
4032
4033         case PM_POST_HIBERNATION:
4034                 unpause_graph_tracing();
4035                 break;
4036         }
4037         return NOTIFY_DONE;
4038 }
4039
4040 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4041                         trace_func_graph_ent_t entryfunc)
4042 {
4043         int ret = 0;
4044
4045         mutex_lock(&ftrace_lock);
4046
4047         /* we currently allow only one tracer registered at a time */
4048         if (ftrace_graph_active) {
4049                 ret = -EBUSY;
4050                 goto out;
4051         }
4052
4053         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4054         register_pm_notifier(&ftrace_suspend_notifier);
4055
4056         ftrace_graph_active++;
4057         ret = start_graph_tracing();
4058         if (ret) {
4059                 ftrace_graph_active--;
4060                 goto out;
4061         }
4062
4063         ftrace_graph_return = retfunc;
4064         ftrace_graph_entry = entryfunc;
4065
4066         ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4067
4068 out:
4069         mutex_unlock(&ftrace_lock);
4070         return ret;
4071 }
4072
4073 void unregister_ftrace_graph(void)
4074 {
4075         mutex_lock(&ftrace_lock);
4076
4077         if (unlikely(!ftrace_graph_active))
4078                 goto out;
4079
4080         ftrace_graph_active--;
4081         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4082         ftrace_graph_entry = ftrace_graph_entry_stub;
4083         ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4084         unregister_pm_notifier(&ftrace_suspend_notifier);
4085         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4086
4087  out:
4088         mutex_unlock(&ftrace_lock);
4089 }
4090
4091 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4092
4093 static void
4094 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4095 {
4096         atomic_set(&t->tracing_graph_pause, 0);
4097         atomic_set(&t->trace_overrun, 0);
4098         t->ftrace_timestamp = 0;
4099         /* make curr_ret_stack visible before we add the ret_stack */
4100         smp_wmb();
4101         t->ret_stack = ret_stack;
4102 }
4103
4104 /*
4105  * Allocate a return stack for the idle task. May be the first
4106  * time through, or it may be done by CPU hotplug online.
4107  */
4108 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4109 {
4110         t->curr_ret_stack = -1;
4111         /*
4112          * The idle task has no parent, it either has its own
4113          * stack or no stack at all.
4114          */
4115         if (t->ret_stack)
4116                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4117
4118         if (ftrace_graph_active) {
4119                 struct ftrace_ret_stack *ret_stack;
4120
4121                 ret_stack = per_cpu(idle_ret_stack, cpu);
4122                 if (!ret_stack) {
4123                         ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4124                                             * sizeof(struct ftrace_ret_stack),
4125                                             GFP_KERNEL);
4126                         if (!ret_stack)
4127                                 return;
4128                         per_cpu(idle_ret_stack, cpu) = ret_stack;
4129                 }
4130                 graph_init_task(t, ret_stack);
4131         }
4132 }
4133
4134 /* Allocate a return stack for newly created task */
4135 void ftrace_graph_init_task(struct task_struct *t)
4136 {
4137         /* Make sure we do not use the parent ret_stack */
4138         t->ret_stack = NULL;
4139         t->curr_ret_stack = -1;
4140
4141         if (ftrace_graph_active) {
4142                 struct ftrace_ret_stack *ret_stack;
4143
4144                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4145                                 * sizeof(struct ftrace_ret_stack),
4146                                 GFP_KERNEL);
4147                 if (!ret_stack)
4148                         return;
4149                 graph_init_task(t, ret_stack);
4150         }
4151 }
4152
4153 void ftrace_graph_exit_task(struct task_struct *t)
4154 {
4155         struct ftrace_ret_stack *ret_stack = t->ret_stack;
4156
4157         t->ret_stack = NULL;
4158         /* NULL must become visible to IRQs before we free it: */
4159         barrier();
4160
4161         kfree(ret_stack);
4162 }
4163
4164 void ftrace_graph_stop(void)
4165 {
4166         ftrace_stop();
4167 }
4168 #endif