]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - kernel/trace/ftrace.c
bna: add missing iounmap() on error in bnad_init()
[karo-tx-linux.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
35
36 #include <trace/events/sched.h>
37
38 #include <asm/setup.h>
39
40 #include "trace_output.h"
41 #include "trace_stat.h"
42
43 #define FTRACE_WARN_ON(cond)                    \
44         ({                                      \
45                 int ___r = cond;                \
46                 if (WARN_ON(___r))              \
47                         ftrace_kill();          \
48                 ___r;                           \
49         })
50
51 #define FTRACE_WARN_ON_ONCE(cond)               \
52         ({                                      \
53                 int ___r = cond;                \
54                 if (WARN_ON_ONCE(___r))         \
55                         ftrace_kill();          \
56                 ___r;                           \
57         })
58
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
64
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66
67 static struct ftrace_ops ftrace_list_end __read_mostly = {
68         .func           = ftrace_stub,
69         .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
70 };
71
72 /* ftrace_enabled is a method to turn ftrace on or off */
73 int ftrace_enabled __read_mostly;
74 static int last_ftrace_enabled;
75
76 /* Quick disabling of function tracer. */
77 int function_trace_stop __read_mostly;
78
79 /* Current function tracing op */
80 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
81
82 /* List for set_ftrace_pid's pids. */
83 LIST_HEAD(ftrace_pids);
84 struct ftrace_pid {
85         struct list_head list;
86         struct pid *pid;
87 };
88
89 /*
90  * ftrace_disabled is set when an anomaly is discovered.
91  * ftrace_disabled is much stronger than ftrace_enabled.
92  */
93 static int ftrace_disabled __read_mostly;
94
95 static DEFINE_MUTEX(ftrace_lock);
96
97 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
98 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
99 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
100 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
101 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
102 static struct ftrace_ops global_ops;
103 static struct ftrace_ops control_ops;
104
105 #if ARCH_SUPPORTS_FTRACE_OPS
106 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
107                                  struct ftrace_ops *op, struct pt_regs *regs);
108 #else
109 /* See comment below, where ftrace_ops_list_func is defined */
110 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
111 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
112 #endif
113
114 /*
115  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
116  * can use rcu_dereference_raw() is that elements removed from this list
117  * are simply leaked, so there is no need to interact with a grace-period
118  * mechanism.  The rcu_dereference_raw() calls are needed to handle
119  * concurrent insertions into the ftrace_global_list.
120  *
121  * Silly Alpha and silly pointer-speculation compiler optimizations!
122  */
123 #define do_for_each_ftrace_op(op, list)                 \
124         op = rcu_dereference_raw(list);                 \
125         do
126
127 /*
128  * Optimized for just a single item in the list (as that is the normal case).
129  */
130 #define while_for_each_ftrace_op(op)                            \
131         while (likely(op = rcu_dereference_raw((op)->next)) &&  \
132                unlikely((op) != &ftrace_list_end))
133
134 /**
135  * ftrace_nr_registered_ops - return number of ops registered
136  *
137  * Returns the number of ftrace_ops registered and tracing functions
138  */
139 int ftrace_nr_registered_ops(void)
140 {
141         struct ftrace_ops *ops;
142         int cnt = 0;
143
144         mutex_lock(&ftrace_lock);
145
146         for (ops = ftrace_ops_list;
147              ops != &ftrace_list_end; ops = ops->next)
148                 cnt++;
149
150         mutex_unlock(&ftrace_lock);
151
152         return cnt;
153 }
154
155 static void
156 ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
157                         struct ftrace_ops *op, struct pt_regs *regs)
158 {
159         int bit;
160
161         bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
162         if (bit < 0)
163                 return;
164
165         do_for_each_ftrace_op(op, ftrace_global_list) {
166                 op->func(ip, parent_ip, op, regs);
167         } while_for_each_ftrace_op(op);
168
169         trace_clear_recursion(bit);
170 }
171
172 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
173                             struct ftrace_ops *op, struct pt_regs *regs)
174 {
175         if (!test_tsk_trace_trace(current))
176                 return;
177
178         ftrace_pid_function(ip, parent_ip, op, regs);
179 }
180
181 static void set_ftrace_pid_function(ftrace_func_t func)
182 {
183         /* do not set ftrace_pid_function to itself! */
184         if (func != ftrace_pid_func)
185                 ftrace_pid_function = func;
186 }
187
188 /**
189  * clear_ftrace_function - reset the ftrace function
190  *
191  * This NULLs the ftrace function and in essence stops
192  * tracing.  There may be lag
193  */
194 void clear_ftrace_function(void)
195 {
196         ftrace_trace_function = ftrace_stub;
197         ftrace_pid_function = ftrace_stub;
198 }
199
200 static void control_ops_disable_all(struct ftrace_ops *ops)
201 {
202         int cpu;
203
204         for_each_possible_cpu(cpu)
205                 *per_cpu_ptr(ops->disabled, cpu) = 1;
206 }
207
208 static int control_ops_alloc(struct ftrace_ops *ops)
209 {
210         int __percpu *disabled;
211
212         disabled = alloc_percpu(int);
213         if (!disabled)
214                 return -ENOMEM;
215
216         ops->disabled = disabled;
217         control_ops_disable_all(ops);
218         return 0;
219 }
220
221 static void control_ops_free(struct ftrace_ops *ops)
222 {
223         free_percpu(ops->disabled);
224 }
225
226 static void update_global_ops(void)
227 {
228         ftrace_func_t func;
229
230         /*
231          * If there's only one function registered, then call that
232          * function directly. Otherwise, we need to iterate over the
233          * registered callers.
234          */
235         if (ftrace_global_list == &ftrace_list_end ||
236             ftrace_global_list->next == &ftrace_list_end) {
237                 func = ftrace_global_list->func;
238                 /*
239                  * As we are calling the function directly.
240                  * If it does not have recursion protection,
241                  * the function_trace_op needs to be updated
242                  * accordingly.
243                  */
244                 if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)
245                         global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
246                 else
247                         global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
248         } else {
249                 func = ftrace_global_list_func;
250                 /* The list has its own recursion protection. */
251                 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
252         }
253
254
255         /* If we filter on pids, update to use the pid function */
256         if (!list_empty(&ftrace_pids)) {
257                 set_ftrace_pid_function(func);
258                 func = ftrace_pid_func;
259         }
260
261         global_ops.func = func;
262 }
263
264 static void update_ftrace_function(void)
265 {
266         ftrace_func_t func;
267
268         update_global_ops();
269
270         /*
271          * If we are at the end of the list and this ops is
272          * recursion safe and not dynamic and the arch supports passing ops,
273          * then have the mcount trampoline call the function directly.
274          */
275         if (ftrace_ops_list == &ftrace_list_end ||
276             (ftrace_ops_list->next == &ftrace_list_end &&
277              !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
278              (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
279              !FTRACE_FORCE_LIST_FUNC)) {
280                 /* Set the ftrace_ops that the arch callback uses */
281                 if (ftrace_ops_list == &global_ops)
282                         function_trace_op = ftrace_global_list;
283                 else
284                         function_trace_op = ftrace_ops_list;
285                 func = ftrace_ops_list->func;
286         } else {
287                 /* Just use the default ftrace_ops */
288                 function_trace_op = &ftrace_list_end;
289                 func = ftrace_ops_list_func;
290         }
291
292         ftrace_trace_function = func;
293 }
294
295 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
296 {
297         ops->next = *list;
298         /*
299          * We are entering ops into the list but another
300          * CPU might be walking that list. We need to make sure
301          * the ops->next pointer is valid before another CPU sees
302          * the ops pointer included into the list.
303          */
304         rcu_assign_pointer(*list, ops);
305 }
306
307 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
308 {
309         struct ftrace_ops **p;
310
311         /*
312          * If we are removing the last function, then simply point
313          * to the ftrace_stub.
314          */
315         if (*list == ops && ops->next == &ftrace_list_end) {
316                 *list = &ftrace_list_end;
317                 return 0;
318         }
319
320         for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
321                 if (*p == ops)
322                         break;
323
324         if (*p != ops)
325                 return -1;
326
327         *p = (*p)->next;
328         return 0;
329 }
330
331 static void add_ftrace_list_ops(struct ftrace_ops **list,
332                                 struct ftrace_ops *main_ops,
333                                 struct ftrace_ops *ops)
334 {
335         int first = *list == &ftrace_list_end;
336         add_ftrace_ops(list, ops);
337         if (first)
338                 add_ftrace_ops(&ftrace_ops_list, main_ops);
339 }
340
341 static int remove_ftrace_list_ops(struct ftrace_ops **list,
342                                   struct ftrace_ops *main_ops,
343                                   struct ftrace_ops *ops)
344 {
345         int ret = remove_ftrace_ops(list, ops);
346         if (!ret && *list == &ftrace_list_end)
347                 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
348         return ret;
349 }
350
351 static int __register_ftrace_function(struct ftrace_ops *ops)
352 {
353         if (unlikely(ftrace_disabled))
354                 return -ENODEV;
355
356         if (FTRACE_WARN_ON(ops == &global_ops))
357                 return -EINVAL;
358
359         if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
360                 return -EBUSY;
361
362         /* We don't support both control and global flags set. */
363         if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
364                 return -EINVAL;
365
366 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
367         /*
368          * If the ftrace_ops specifies SAVE_REGS, then it only can be used
369          * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
370          * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
371          */
372         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
373             !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
374                 return -EINVAL;
375
376         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
377                 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
378 #endif
379
380         if (!core_kernel_data((unsigned long)ops))
381                 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
382
383         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
384                 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
385                 ops->flags |= FTRACE_OPS_FL_ENABLED;
386         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
387                 if (control_ops_alloc(ops))
388                         return -ENOMEM;
389                 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
390         } else
391                 add_ftrace_ops(&ftrace_ops_list, ops);
392
393         if (ftrace_enabled)
394                 update_ftrace_function();
395
396         return 0;
397 }
398
399 static int __unregister_ftrace_function(struct ftrace_ops *ops)
400 {
401         int ret;
402
403         if (ftrace_disabled)
404                 return -ENODEV;
405
406         if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
407                 return -EBUSY;
408
409         if (FTRACE_WARN_ON(ops == &global_ops))
410                 return -EINVAL;
411
412         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
413                 ret = remove_ftrace_list_ops(&ftrace_global_list,
414                                              &global_ops, ops);
415                 if (!ret)
416                         ops->flags &= ~FTRACE_OPS_FL_ENABLED;
417         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
418                 ret = remove_ftrace_list_ops(&ftrace_control_list,
419                                              &control_ops, ops);
420                 if (!ret) {
421                         /*
422                          * The ftrace_ops is now removed from the list,
423                          * so there'll be no new users. We must ensure
424                          * all current users are done before we free
425                          * the control data.
426                          */
427                         synchronize_sched();
428                         control_ops_free(ops);
429                 }
430         } else
431                 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
432
433         if (ret < 0)
434                 return ret;
435
436         if (ftrace_enabled)
437                 update_ftrace_function();
438
439         /*
440          * Dynamic ops may be freed, we must make sure that all
441          * callers are done before leaving this function.
442          */
443         if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
444                 synchronize_sched();
445
446         return 0;
447 }
448
449 static void ftrace_update_pid_func(void)
450 {
451         /* Only do something if we are tracing something */
452         if (ftrace_trace_function == ftrace_stub)
453                 return;
454
455         update_ftrace_function();
456 }
457
458 #ifdef CONFIG_FUNCTION_PROFILER
459 struct ftrace_profile {
460         struct hlist_node               node;
461         unsigned long                   ip;
462         unsigned long                   counter;
463 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
464         unsigned long long              time;
465         unsigned long long              time_squared;
466 #endif
467 };
468
469 struct ftrace_profile_page {
470         struct ftrace_profile_page      *next;
471         unsigned long                   index;
472         struct ftrace_profile           records[];
473 };
474
475 struct ftrace_profile_stat {
476         atomic_t                        disabled;
477         struct hlist_head               *hash;
478         struct ftrace_profile_page      *pages;
479         struct ftrace_profile_page      *start;
480         struct tracer_stat              stat;
481 };
482
483 #define PROFILE_RECORDS_SIZE                                            \
484         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
485
486 #define PROFILES_PER_PAGE                                       \
487         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
488
489 static int ftrace_profile_enabled __read_mostly;
490
491 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
492 static DEFINE_MUTEX(ftrace_profile_lock);
493
494 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
495
496 #define FTRACE_PROFILE_HASH_BITS 10
497 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
498
499 static void *
500 function_stat_next(void *v, int idx)
501 {
502         struct ftrace_profile *rec = v;
503         struct ftrace_profile_page *pg;
504
505         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
506
507  again:
508         if (idx != 0)
509                 rec++;
510
511         if ((void *)rec >= (void *)&pg->records[pg->index]) {
512                 pg = pg->next;
513                 if (!pg)
514                         return NULL;
515                 rec = &pg->records[0];
516                 if (!rec->counter)
517                         goto again;
518         }
519
520         return rec;
521 }
522
523 static void *function_stat_start(struct tracer_stat *trace)
524 {
525         struct ftrace_profile_stat *stat =
526                 container_of(trace, struct ftrace_profile_stat, stat);
527
528         if (!stat || !stat->start)
529                 return NULL;
530
531         return function_stat_next(&stat->start->records[0], 0);
532 }
533
534 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
535 /* function graph compares on total time */
536 static int function_stat_cmp(void *p1, void *p2)
537 {
538         struct ftrace_profile *a = p1;
539         struct ftrace_profile *b = p2;
540
541         if (a->time < b->time)
542                 return -1;
543         if (a->time > b->time)
544                 return 1;
545         else
546                 return 0;
547 }
548 #else
549 /* not function graph compares against hits */
550 static int function_stat_cmp(void *p1, void *p2)
551 {
552         struct ftrace_profile *a = p1;
553         struct ftrace_profile *b = p2;
554
555         if (a->counter < b->counter)
556                 return -1;
557         if (a->counter > b->counter)
558                 return 1;
559         else
560                 return 0;
561 }
562 #endif
563
564 static int function_stat_headers(struct seq_file *m)
565 {
566 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
567         seq_printf(m, "  Function                               "
568                    "Hit    Time            Avg             s^2\n"
569                       "  --------                               "
570                    "---    ----            ---             ---\n");
571 #else
572         seq_printf(m, "  Function                               Hit\n"
573                       "  --------                               ---\n");
574 #endif
575         return 0;
576 }
577
578 static int function_stat_show(struct seq_file *m, void *v)
579 {
580         struct ftrace_profile *rec = v;
581         char str[KSYM_SYMBOL_LEN];
582         int ret = 0;
583 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
584         static struct trace_seq s;
585         unsigned long long avg;
586         unsigned long long stddev;
587 #endif
588         mutex_lock(&ftrace_profile_lock);
589
590         /* we raced with function_profile_reset() */
591         if (unlikely(rec->counter == 0)) {
592                 ret = -EBUSY;
593                 goto out;
594         }
595
596         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
597         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
598
599 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
600         seq_printf(m, "    ");
601         avg = rec->time;
602         do_div(avg, rec->counter);
603
604         /* Sample standard deviation (s^2) */
605         if (rec->counter <= 1)
606                 stddev = 0;
607         else {
608                 stddev = rec->time_squared - rec->counter * avg * avg;
609                 /*
610                  * Divide only 1000 for ns^2 -> us^2 conversion.
611                  * trace_print_graph_duration will divide 1000 again.
612                  */
613                 do_div(stddev, (rec->counter - 1) * 1000);
614         }
615
616         trace_seq_init(&s);
617         trace_print_graph_duration(rec->time, &s);
618         trace_seq_puts(&s, "    ");
619         trace_print_graph_duration(avg, &s);
620         trace_seq_puts(&s, "    ");
621         trace_print_graph_duration(stddev, &s);
622         trace_print_seq(m, &s);
623 #endif
624         seq_putc(m, '\n');
625 out:
626         mutex_unlock(&ftrace_profile_lock);
627
628         return ret;
629 }
630
631 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
632 {
633         struct ftrace_profile_page *pg;
634
635         pg = stat->pages = stat->start;
636
637         while (pg) {
638                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
639                 pg->index = 0;
640                 pg = pg->next;
641         }
642
643         memset(stat->hash, 0,
644                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
645 }
646
647 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
648 {
649         struct ftrace_profile_page *pg;
650         int functions;
651         int pages;
652         int i;
653
654         /* If we already allocated, do nothing */
655         if (stat->pages)
656                 return 0;
657
658         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
659         if (!stat->pages)
660                 return -ENOMEM;
661
662 #ifdef CONFIG_DYNAMIC_FTRACE
663         functions = ftrace_update_tot_cnt;
664 #else
665         /*
666          * We do not know the number of functions that exist because
667          * dynamic tracing is what counts them. With past experience
668          * we have around 20K functions. That should be more than enough.
669          * It is highly unlikely we will execute every function in
670          * the kernel.
671          */
672         functions = 20000;
673 #endif
674
675         pg = stat->start = stat->pages;
676
677         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
678
679         for (i = 1; i < pages; i++) {
680                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
681                 if (!pg->next)
682                         goto out_free;
683                 pg = pg->next;
684         }
685
686         return 0;
687
688  out_free:
689         pg = stat->start;
690         while (pg) {
691                 unsigned long tmp = (unsigned long)pg;
692
693                 pg = pg->next;
694                 free_page(tmp);
695         }
696
697         stat->pages = NULL;
698         stat->start = NULL;
699
700         return -ENOMEM;
701 }
702
703 static int ftrace_profile_init_cpu(int cpu)
704 {
705         struct ftrace_profile_stat *stat;
706         int size;
707
708         stat = &per_cpu(ftrace_profile_stats, cpu);
709
710         if (stat->hash) {
711                 /* If the profile is already created, simply reset it */
712                 ftrace_profile_reset(stat);
713                 return 0;
714         }
715
716         /*
717          * We are profiling all functions, but usually only a few thousand
718          * functions are hit. We'll make a hash of 1024 items.
719          */
720         size = FTRACE_PROFILE_HASH_SIZE;
721
722         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
723
724         if (!stat->hash)
725                 return -ENOMEM;
726
727         /* Preallocate the function profiling pages */
728         if (ftrace_profile_pages_init(stat) < 0) {
729                 kfree(stat->hash);
730                 stat->hash = NULL;
731                 return -ENOMEM;
732         }
733
734         return 0;
735 }
736
737 static int ftrace_profile_init(void)
738 {
739         int cpu;
740         int ret = 0;
741
742         for_each_online_cpu(cpu) {
743                 ret = ftrace_profile_init_cpu(cpu);
744                 if (ret)
745                         break;
746         }
747
748         return ret;
749 }
750
751 /* interrupts must be disabled */
752 static struct ftrace_profile *
753 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
754 {
755         struct ftrace_profile *rec;
756         struct hlist_head *hhd;
757         unsigned long key;
758
759         key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
760         hhd = &stat->hash[key];
761
762         if (hlist_empty(hhd))
763                 return NULL;
764
765         hlist_for_each_entry_rcu(rec, hhd, node) {
766                 if (rec->ip == ip)
767                         return rec;
768         }
769
770         return NULL;
771 }
772
773 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
774                                struct ftrace_profile *rec)
775 {
776         unsigned long key;
777
778         key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
779         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
780 }
781
782 /*
783  * The memory is already allocated, this simply finds a new record to use.
784  */
785 static struct ftrace_profile *
786 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
787 {
788         struct ftrace_profile *rec = NULL;
789
790         /* prevent recursion (from NMIs) */
791         if (atomic_inc_return(&stat->disabled) != 1)
792                 goto out;
793
794         /*
795          * Try to find the function again since an NMI
796          * could have added it
797          */
798         rec = ftrace_find_profiled_func(stat, ip);
799         if (rec)
800                 goto out;
801
802         if (stat->pages->index == PROFILES_PER_PAGE) {
803                 if (!stat->pages->next)
804                         goto out;
805                 stat->pages = stat->pages->next;
806         }
807
808         rec = &stat->pages->records[stat->pages->index++];
809         rec->ip = ip;
810         ftrace_add_profile(stat, rec);
811
812  out:
813         atomic_dec(&stat->disabled);
814
815         return rec;
816 }
817
818 static void
819 function_profile_call(unsigned long ip, unsigned long parent_ip,
820                       struct ftrace_ops *ops, struct pt_regs *regs)
821 {
822         struct ftrace_profile_stat *stat;
823         struct ftrace_profile *rec;
824         unsigned long flags;
825
826         if (!ftrace_profile_enabled)
827                 return;
828
829         local_irq_save(flags);
830
831         stat = &__get_cpu_var(ftrace_profile_stats);
832         if (!stat->hash || !ftrace_profile_enabled)
833                 goto out;
834
835         rec = ftrace_find_profiled_func(stat, ip);
836         if (!rec) {
837                 rec = ftrace_profile_alloc(stat, ip);
838                 if (!rec)
839                         goto out;
840         }
841
842         rec->counter++;
843  out:
844         local_irq_restore(flags);
845 }
846
847 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
848 static int profile_graph_entry(struct ftrace_graph_ent *trace)
849 {
850         function_profile_call(trace->func, 0, NULL, NULL);
851         return 1;
852 }
853
854 static void profile_graph_return(struct ftrace_graph_ret *trace)
855 {
856         struct ftrace_profile_stat *stat;
857         unsigned long long calltime;
858         struct ftrace_profile *rec;
859         unsigned long flags;
860
861         local_irq_save(flags);
862         stat = &__get_cpu_var(ftrace_profile_stats);
863         if (!stat->hash || !ftrace_profile_enabled)
864                 goto out;
865
866         /* If the calltime was zero'd ignore it */
867         if (!trace->calltime)
868                 goto out;
869
870         calltime = trace->rettime - trace->calltime;
871
872         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
873                 int index;
874
875                 index = trace->depth;
876
877                 /* Append this call time to the parent time to subtract */
878                 if (index)
879                         current->ret_stack[index - 1].subtime += calltime;
880
881                 if (current->ret_stack[index].subtime < calltime)
882                         calltime -= current->ret_stack[index].subtime;
883                 else
884                         calltime = 0;
885         }
886
887         rec = ftrace_find_profiled_func(stat, trace->func);
888         if (rec) {
889                 rec->time += calltime;
890                 rec->time_squared += calltime * calltime;
891         }
892
893  out:
894         local_irq_restore(flags);
895 }
896
897 static int register_ftrace_profiler(void)
898 {
899         return register_ftrace_graph(&profile_graph_return,
900                                      &profile_graph_entry);
901 }
902
903 static void unregister_ftrace_profiler(void)
904 {
905         unregister_ftrace_graph();
906 }
907 #else
908 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
909         .func           = function_profile_call,
910         .flags          = FTRACE_OPS_FL_RECURSION_SAFE,
911 };
912
913 static int register_ftrace_profiler(void)
914 {
915         return register_ftrace_function(&ftrace_profile_ops);
916 }
917
918 static void unregister_ftrace_profiler(void)
919 {
920         unregister_ftrace_function(&ftrace_profile_ops);
921 }
922 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
923
924 static ssize_t
925 ftrace_profile_write(struct file *filp, const char __user *ubuf,
926                      size_t cnt, loff_t *ppos)
927 {
928         unsigned long val;
929         int ret;
930
931         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
932         if (ret)
933                 return ret;
934
935         val = !!val;
936
937         mutex_lock(&ftrace_profile_lock);
938         if (ftrace_profile_enabled ^ val) {
939                 if (val) {
940                         ret = ftrace_profile_init();
941                         if (ret < 0) {
942                                 cnt = ret;
943                                 goto out;
944                         }
945
946                         ret = register_ftrace_profiler();
947                         if (ret < 0) {
948                                 cnt = ret;
949                                 goto out;
950                         }
951                         ftrace_profile_enabled = 1;
952                 } else {
953                         ftrace_profile_enabled = 0;
954                         /*
955                          * unregister_ftrace_profiler calls stop_machine
956                          * so this acts like an synchronize_sched.
957                          */
958                         unregister_ftrace_profiler();
959                 }
960         }
961  out:
962         mutex_unlock(&ftrace_profile_lock);
963
964         *ppos += cnt;
965
966         return cnt;
967 }
968
969 static ssize_t
970 ftrace_profile_read(struct file *filp, char __user *ubuf,
971                      size_t cnt, loff_t *ppos)
972 {
973         char buf[64];           /* big enough to hold a number */
974         int r;
975
976         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
977         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
978 }
979
980 static const struct file_operations ftrace_profile_fops = {
981         .open           = tracing_open_generic,
982         .read           = ftrace_profile_read,
983         .write          = ftrace_profile_write,
984         .llseek         = default_llseek,
985 };
986
987 /* used to initialize the real stat files */
988 static struct tracer_stat function_stats __initdata = {
989         .name           = "functions",
990         .stat_start     = function_stat_start,
991         .stat_next      = function_stat_next,
992         .stat_cmp       = function_stat_cmp,
993         .stat_headers   = function_stat_headers,
994         .stat_show      = function_stat_show
995 };
996
997 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
998 {
999         struct ftrace_profile_stat *stat;
1000         struct dentry *entry;
1001         char *name;
1002         int ret;
1003         int cpu;
1004
1005         for_each_possible_cpu(cpu) {
1006                 stat = &per_cpu(ftrace_profile_stats, cpu);
1007
1008                 /* allocate enough for function name + cpu number */
1009                 name = kmalloc(32, GFP_KERNEL);
1010                 if (!name) {
1011                         /*
1012                          * The files created are permanent, if something happens
1013                          * we still do not free memory.
1014                          */
1015                         WARN(1,
1016                              "Could not allocate stat file for cpu %d\n",
1017                              cpu);
1018                         return;
1019                 }
1020                 stat->stat = function_stats;
1021                 snprintf(name, 32, "function%d", cpu);
1022                 stat->stat.name = name;
1023                 ret = register_stat_tracer(&stat->stat);
1024                 if (ret) {
1025                         WARN(1,
1026                              "Could not register function stat for cpu %d\n",
1027                              cpu);
1028                         kfree(name);
1029                         return;
1030                 }
1031         }
1032
1033         entry = debugfs_create_file("function_profile_enabled", 0644,
1034                                     d_tracer, NULL, &ftrace_profile_fops);
1035         if (!entry)
1036                 pr_warning("Could not create debugfs "
1037                            "'function_profile_enabled' entry\n");
1038 }
1039
1040 #else /* CONFIG_FUNCTION_PROFILER */
1041 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1042 {
1043 }
1044 #endif /* CONFIG_FUNCTION_PROFILER */
1045
1046 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1047
1048 loff_t
1049 ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
1050 {
1051         loff_t ret;
1052
1053         if (file->f_mode & FMODE_READ)
1054                 ret = seq_lseek(file, offset, whence);
1055         else
1056                 file->f_pos = ret = 1;
1057
1058         return ret;
1059 }
1060
1061 #ifdef CONFIG_DYNAMIC_FTRACE
1062
1063 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1064 # error Dynamic ftrace depends on MCOUNT_RECORD
1065 #endif
1066
1067 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1068
1069 struct ftrace_func_probe {
1070         struct hlist_node       node;
1071         struct ftrace_probe_ops *ops;
1072         unsigned long           flags;
1073         unsigned long           ip;
1074         void                    *data;
1075         struct list_head        free_list;
1076 };
1077
1078 struct ftrace_func_entry {
1079         struct hlist_node hlist;
1080         unsigned long ip;
1081 };
1082
1083 struct ftrace_hash {
1084         unsigned long           size_bits;
1085         struct hlist_head       *buckets;
1086         unsigned long           count;
1087         struct rcu_head         rcu;
1088 };
1089
1090 /*
1091  * We make these constant because no one should touch them,
1092  * but they are used as the default "empty hash", to avoid allocating
1093  * it all the time. These are in a read only section such that if
1094  * anyone does try to modify it, it will cause an exception.
1095  */
1096 static const struct hlist_head empty_buckets[1];
1097 static const struct ftrace_hash empty_hash = {
1098         .buckets = (struct hlist_head *)empty_buckets,
1099 };
1100 #define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
1101
1102 static struct ftrace_ops global_ops = {
1103         .func                   = ftrace_stub,
1104         .notrace_hash           = EMPTY_HASH,
1105         .filter_hash            = EMPTY_HASH,
1106         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
1107 };
1108
1109 static DEFINE_MUTEX(ftrace_regex_lock);
1110
1111 struct ftrace_page {
1112         struct ftrace_page      *next;
1113         struct dyn_ftrace       *records;
1114         int                     index;
1115         int                     size;
1116 };
1117
1118 static struct ftrace_page *ftrace_new_pgs;
1119
1120 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1121 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1122
1123 /* estimate from running different kernels */
1124 #define NR_TO_INIT              10000
1125
1126 static struct ftrace_page       *ftrace_pages_start;
1127 static struct ftrace_page       *ftrace_pages;
1128
1129 static bool ftrace_hash_empty(struct ftrace_hash *hash)
1130 {
1131         return !hash || !hash->count;
1132 }
1133
1134 static struct ftrace_func_entry *
1135 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1136 {
1137         unsigned long key;
1138         struct ftrace_func_entry *entry;
1139         struct hlist_head *hhd;
1140
1141         if (ftrace_hash_empty(hash))
1142                 return NULL;
1143
1144         if (hash->size_bits > 0)
1145                 key = hash_long(ip, hash->size_bits);
1146         else
1147                 key = 0;
1148
1149         hhd = &hash->buckets[key];
1150
1151         hlist_for_each_entry_rcu(entry, hhd, hlist) {
1152                 if (entry->ip == ip)
1153                         return entry;
1154         }
1155         return NULL;
1156 }
1157
1158 static void __add_hash_entry(struct ftrace_hash *hash,
1159                              struct ftrace_func_entry *entry)
1160 {
1161         struct hlist_head *hhd;
1162         unsigned long key;
1163
1164         if (hash->size_bits)
1165                 key = hash_long(entry->ip, hash->size_bits);
1166         else
1167                 key = 0;
1168
1169         hhd = &hash->buckets[key];
1170         hlist_add_head(&entry->hlist, hhd);
1171         hash->count++;
1172 }
1173
1174 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1175 {
1176         struct ftrace_func_entry *entry;
1177
1178         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1179         if (!entry)
1180                 return -ENOMEM;
1181
1182         entry->ip = ip;
1183         __add_hash_entry(hash, entry);
1184
1185         return 0;
1186 }
1187
1188 static void
1189 free_hash_entry(struct ftrace_hash *hash,
1190                   struct ftrace_func_entry *entry)
1191 {
1192         hlist_del(&entry->hlist);
1193         kfree(entry);
1194         hash->count--;
1195 }
1196
1197 static void
1198 remove_hash_entry(struct ftrace_hash *hash,
1199                   struct ftrace_func_entry *entry)
1200 {
1201         hlist_del(&entry->hlist);
1202         hash->count--;
1203 }
1204
1205 static void ftrace_hash_clear(struct ftrace_hash *hash)
1206 {
1207         struct hlist_head *hhd;
1208         struct hlist_node *tn;
1209         struct ftrace_func_entry *entry;
1210         int size = 1 << hash->size_bits;
1211         int i;
1212
1213         if (!hash->count)
1214                 return;
1215
1216         for (i = 0; i < size; i++) {
1217                 hhd = &hash->buckets[i];
1218                 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1219                         free_hash_entry(hash, entry);
1220         }
1221         FTRACE_WARN_ON(hash->count);
1222 }
1223
1224 static void free_ftrace_hash(struct ftrace_hash *hash)
1225 {
1226         if (!hash || hash == EMPTY_HASH)
1227                 return;
1228         ftrace_hash_clear(hash);
1229         kfree(hash->buckets);
1230         kfree(hash);
1231 }
1232
1233 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1234 {
1235         struct ftrace_hash *hash;
1236
1237         hash = container_of(rcu, struct ftrace_hash, rcu);
1238         free_ftrace_hash(hash);
1239 }
1240
1241 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1242 {
1243         if (!hash || hash == EMPTY_HASH)
1244                 return;
1245         call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1246 }
1247
1248 void ftrace_free_filter(struct ftrace_ops *ops)
1249 {
1250         free_ftrace_hash(ops->filter_hash);
1251         free_ftrace_hash(ops->notrace_hash);
1252 }
1253
1254 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1255 {
1256         struct ftrace_hash *hash;
1257         int size;
1258
1259         hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1260         if (!hash)
1261                 return NULL;
1262
1263         size = 1 << size_bits;
1264         hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1265
1266         if (!hash->buckets) {
1267                 kfree(hash);
1268                 return NULL;
1269         }
1270
1271         hash->size_bits = size_bits;
1272
1273         return hash;
1274 }
1275
1276 static struct ftrace_hash *
1277 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1278 {
1279         struct ftrace_func_entry *entry;
1280         struct ftrace_hash *new_hash;
1281         int size;
1282         int ret;
1283         int i;
1284
1285         new_hash = alloc_ftrace_hash(size_bits);
1286         if (!new_hash)
1287                 return NULL;
1288
1289         /* Empty hash? */
1290         if (ftrace_hash_empty(hash))
1291                 return new_hash;
1292
1293         size = 1 << hash->size_bits;
1294         for (i = 0; i < size; i++) {
1295                 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1296                         ret = add_hash_entry(new_hash, entry->ip);
1297                         if (ret < 0)
1298                                 goto free_hash;
1299                 }
1300         }
1301
1302         FTRACE_WARN_ON(new_hash->count != hash->count);
1303
1304         return new_hash;
1305
1306  free_hash:
1307         free_ftrace_hash(new_hash);
1308         return NULL;
1309 }
1310
1311 static void
1312 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1313 static void
1314 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1315
1316 static int
1317 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1318                  struct ftrace_hash **dst, struct ftrace_hash *src)
1319 {
1320         struct ftrace_func_entry *entry;
1321         struct hlist_node *tn;
1322         struct hlist_head *hhd;
1323         struct ftrace_hash *old_hash;
1324         struct ftrace_hash *new_hash;
1325         int size = src->count;
1326         int bits = 0;
1327         int ret;
1328         int i;
1329
1330         /*
1331          * Remove the current set, update the hash and add
1332          * them back.
1333          */
1334         ftrace_hash_rec_disable(ops, enable);
1335
1336         /*
1337          * If the new source is empty, just free dst and assign it
1338          * the empty_hash.
1339          */
1340         if (!src->count) {
1341                 free_ftrace_hash_rcu(*dst);
1342                 rcu_assign_pointer(*dst, EMPTY_HASH);
1343                 /* still need to update the function records */
1344                 ret = 0;
1345                 goto out;
1346         }
1347
1348         /*
1349          * Make the hash size about 1/2 the # found
1350          */
1351         for (size /= 2; size; size >>= 1)
1352                 bits++;
1353
1354         /* Don't allocate too much */
1355         if (bits > FTRACE_HASH_MAX_BITS)
1356                 bits = FTRACE_HASH_MAX_BITS;
1357
1358         ret = -ENOMEM;
1359         new_hash = alloc_ftrace_hash(bits);
1360         if (!new_hash)
1361                 goto out;
1362
1363         size = 1 << src->size_bits;
1364         for (i = 0; i < size; i++) {
1365                 hhd = &src->buckets[i];
1366                 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1367                         remove_hash_entry(src, entry);
1368                         __add_hash_entry(new_hash, entry);
1369                 }
1370         }
1371
1372         old_hash = *dst;
1373         rcu_assign_pointer(*dst, new_hash);
1374         free_ftrace_hash_rcu(old_hash);
1375
1376         ret = 0;
1377  out:
1378         /*
1379          * Enable regardless of ret:
1380          *  On success, we enable the new hash.
1381          *  On failure, we re-enable the original hash.
1382          */
1383         ftrace_hash_rec_enable(ops, enable);
1384
1385         return ret;
1386 }
1387
1388 /*
1389  * Test the hashes for this ops to see if we want to call
1390  * the ops->func or not.
1391  *
1392  * It's a match if the ip is in the ops->filter_hash or
1393  * the filter_hash does not exist or is empty,
1394  *  AND
1395  * the ip is not in the ops->notrace_hash.
1396  *
1397  * This needs to be called with preemption disabled as
1398  * the hashes are freed with call_rcu_sched().
1399  */
1400 static int
1401 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1402 {
1403         struct ftrace_hash *filter_hash;
1404         struct ftrace_hash *notrace_hash;
1405         int ret;
1406
1407         filter_hash = rcu_dereference_raw(ops->filter_hash);
1408         notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1409
1410         if ((ftrace_hash_empty(filter_hash) ||
1411              ftrace_lookup_ip(filter_hash, ip)) &&
1412             (ftrace_hash_empty(notrace_hash) ||
1413              !ftrace_lookup_ip(notrace_hash, ip)))
1414                 ret = 1;
1415         else
1416                 ret = 0;
1417
1418         return ret;
1419 }
1420
1421 /*
1422  * This is a double for. Do not use 'break' to break out of the loop,
1423  * you must use a goto.
1424  */
1425 #define do_for_each_ftrace_rec(pg, rec)                                 \
1426         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1427                 int _____i;                                             \
1428                 for (_____i = 0; _____i < pg->index; _____i++) {        \
1429                         rec = &pg->records[_____i];
1430
1431 #define while_for_each_ftrace_rec()             \
1432                 }                               \
1433         }
1434
1435
1436 static int ftrace_cmp_recs(const void *a, const void *b)
1437 {
1438         const struct dyn_ftrace *key = a;
1439         const struct dyn_ftrace *rec = b;
1440
1441         if (key->flags < rec->ip)
1442                 return -1;
1443         if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1444                 return 1;
1445         return 0;
1446 }
1447
1448 static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1449 {
1450         struct ftrace_page *pg;
1451         struct dyn_ftrace *rec;
1452         struct dyn_ftrace key;
1453
1454         key.ip = start;
1455         key.flags = end;        /* overload flags, as it is unsigned long */
1456
1457         for (pg = ftrace_pages_start; pg; pg = pg->next) {
1458                 if (end < pg->records[0].ip ||
1459                     start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1460                         continue;
1461                 rec = bsearch(&key, pg->records, pg->index,
1462                               sizeof(struct dyn_ftrace),
1463                               ftrace_cmp_recs);
1464                 if (rec)
1465                         return rec->ip;
1466         }
1467
1468         return 0;
1469 }
1470
1471 /**
1472  * ftrace_location - return true if the ip giving is a traced location
1473  * @ip: the instruction pointer to check
1474  *
1475  * Returns rec->ip if @ip given is a pointer to a ftrace location.
1476  * That is, the instruction that is either a NOP or call to
1477  * the function tracer. It checks the ftrace internal tables to
1478  * determine if the address belongs or not.
1479  */
1480 unsigned long ftrace_location(unsigned long ip)
1481 {
1482         return ftrace_location_range(ip, ip);
1483 }
1484
1485 /**
1486  * ftrace_text_reserved - return true if range contains an ftrace location
1487  * @start: start of range to search
1488  * @end: end of range to search (inclusive). @end points to the last byte to check.
1489  *
1490  * Returns 1 if @start and @end contains a ftrace location.
1491  * That is, the instruction that is either a NOP or call to
1492  * the function tracer. It checks the ftrace internal tables to
1493  * determine if the address belongs or not.
1494  */
1495 int ftrace_text_reserved(void *start, void *end)
1496 {
1497         unsigned long ret;
1498
1499         ret = ftrace_location_range((unsigned long)start,
1500                                     (unsigned long)end);
1501
1502         return (int)!!ret;
1503 }
1504
1505 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1506                                      int filter_hash,
1507                                      bool inc)
1508 {
1509         struct ftrace_hash *hash;
1510         struct ftrace_hash *other_hash;
1511         struct ftrace_page *pg;
1512         struct dyn_ftrace *rec;
1513         int count = 0;
1514         int all = 0;
1515
1516         /* Only update if the ops has been registered */
1517         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1518                 return;
1519
1520         /*
1521          * In the filter_hash case:
1522          *   If the count is zero, we update all records.
1523          *   Otherwise we just update the items in the hash.
1524          *
1525          * In the notrace_hash case:
1526          *   We enable the update in the hash.
1527          *   As disabling notrace means enabling the tracing,
1528          *   and enabling notrace means disabling, the inc variable
1529          *   gets inversed.
1530          */
1531         if (filter_hash) {
1532                 hash = ops->filter_hash;
1533                 other_hash = ops->notrace_hash;
1534                 if (ftrace_hash_empty(hash))
1535                         all = 1;
1536         } else {
1537                 inc = !inc;
1538                 hash = ops->notrace_hash;
1539                 other_hash = ops->filter_hash;
1540                 /*
1541                  * If the notrace hash has no items,
1542                  * then there's nothing to do.
1543                  */
1544                 if (ftrace_hash_empty(hash))
1545                         return;
1546         }
1547
1548         do_for_each_ftrace_rec(pg, rec) {
1549                 int in_other_hash = 0;
1550                 int in_hash = 0;
1551                 int match = 0;
1552
1553                 if (all) {
1554                         /*
1555                          * Only the filter_hash affects all records.
1556                          * Update if the record is not in the notrace hash.
1557                          */
1558                         if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1559                                 match = 1;
1560                 } else {
1561                         in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1562                         in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1563
1564                         /*
1565                          *
1566                          */
1567                         if (filter_hash && in_hash && !in_other_hash)
1568                                 match = 1;
1569                         else if (!filter_hash && in_hash &&
1570                                  (in_other_hash || ftrace_hash_empty(other_hash)))
1571                                 match = 1;
1572                 }
1573                 if (!match)
1574                         continue;
1575
1576                 if (inc) {
1577                         rec->flags++;
1578                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1579                                 return;
1580                         /*
1581                          * If any ops wants regs saved for this function
1582                          * then all ops will get saved regs.
1583                          */
1584                         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1585                                 rec->flags |= FTRACE_FL_REGS;
1586                 } else {
1587                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1588                                 return;
1589                         rec->flags--;
1590                 }
1591                 count++;
1592                 /* Shortcut, if we handled all records, we are done. */
1593                 if (!all && count == hash->count)
1594                         return;
1595         } while_for_each_ftrace_rec();
1596 }
1597
1598 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1599                                     int filter_hash)
1600 {
1601         __ftrace_hash_rec_update(ops, filter_hash, 0);
1602 }
1603
1604 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1605                                    int filter_hash)
1606 {
1607         __ftrace_hash_rec_update(ops, filter_hash, 1);
1608 }
1609
1610 static void print_ip_ins(const char *fmt, unsigned char *p)
1611 {
1612         int i;
1613
1614         printk(KERN_CONT "%s", fmt);
1615
1616         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1617                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1618 }
1619
1620 /**
1621  * ftrace_bug - report and shutdown function tracer
1622  * @failed: The failed type (EFAULT, EINVAL, EPERM)
1623  * @ip: The address that failed
1624  *
1625  * The arch code that enables or disables the function tracing
1626  * can call ftrace_bug() when it has detected a problem in
1627  * modifying the code. @failed should be one of either:
1628  * EFAULT - if the problem happens on reading the @ip address
1629  * EINVAL - if what is read at @ip is not what was expected
1630  * EPERM - if the problem happens on writting to the @ip address
1631  */
1632 void ftrace_bug(int failed, unsigned long ip)
1633 {
1634         switch (failed) {
1635         case -EFAULT:
1636                 FTRACE_WARN_ON_ONCE(1);
1637                 pr_info("ftrace faulted on modifying ");
1638                 print_ip_sym(ip);
1639                 break;
1640         case -EINVAL:
1641                 FTRACE_WARN_ON_ONCE(1);
1642                 pr_info("ftrace failed to modify ");
1643                 print_ip_sym(ip);
1644                 print_ip_ins(" actual: ", (unsigned char *)ip);
1645                 printk(KERN_CONT "\n");
1646                 break;
1647         case -EPERM:
1648                 FTRACE_WARN_ON_ONCE(1);
1649                 pr_info("ftrace faulted on writing ");
1650                 print_ip_sym(ip);
1651                 break;
1652         default:
1653                 FTRACE_WARN_ON_ONCE(1);
1654                 pr_info("ftrace faulted on unknown error ");
1655                 print_ip_sym(ip);
1656         }
1657 }
1658
1659 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1660 {
1661         unsigned long flag = 0UL;
1662
1663         /*
1664          * If we are updating calls:
1665          *
1666          *   If the record has a ref count, then we need to enable it
1667          *   because someone is using it.
1668          *
1669          *   Otherwise we make sure its disabled.
1670          *
1671          * If we are disabling calls, then disable all records that
1672          * are enabled.
1673          */
1674         if (enable && (rec->flags & ~FTRACE_FL_MASK))
1675                 flag = FTRACE_FL_ENABLED;
1676
1677         /*
1678          * If enabling and the REGS flag does not match the REGS_EN, then
1679          * do not ignore this record. Set flags to fail the compare against
1680          * ENABLED.
1681          */
1682         if (flag &&
1683             (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
1684                 flag |= FTRACE_FL_REGS;
1685
1686         /* If the state of this record hasn't changed, then do nothing */
1687         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1688                 return FTRACE_UPDATE_IGNORE;
1689
1690         if (flag) {
1691                 /* Save off if rec is being enabled (for return value) */
1692                 flag ^= rec->flags & FTRACE_FL_ENABLED;
1693
1694                 if (update) {
1695                         rec->flags |= FTRACE_FL_ENABLED;
1696                         if (flag & FTRACE_FL_REGS) {
1697                                 if (rec->flags & FTRACE_FL_REGS)
1698                                         rec->flags |= FTRACE_FL_REGS_EN;
1699                                 else
1700                                         rec->flags &= ~FTRACE_FL_REGS_EN;
1701                         }
1702                 }
1703
1704                 /*
1705                  * If this record is being updated from a nop, then
1706                  *   return UPDATE_MAKE_CALL.
1707                  * Otherwise, if the EN flag is set, then return
1708                  *   UPDATE_MODIFY_CALL_REGS to tell the caller to convert
1709                  *   from the non-save regs, to a save regs function.
1710                  * Otherwise,
1711                  *   return UPDATE_MODIFY_CALL to tell the caller to convert
1712                  *   from the save regs, to a non-save regs function.
1713                  */
1714                 if (flag & FTRACE_FL_ENABLED)
1715                         return FTRACE_UPDATE_MAKE_CALL;
1716                 else if (rec->flags & FTRACE_FL_REGS_EN)
1717                         return FTRACE_UPDATE_MODIFY_CALL_REGS;
1718                 else
1719                         return FTRACE_UPDATE_MODIFY_CALL;
1720         }
1721
1722         if (update) {
1723                 /* If there's no more users, clear all flags */
1724                 if (!(rec->flags & ~FTRACE_FL_MASK))
1725                         rec->flags = 0;
1726                 else
1727                         /* Just disable the record (keep REGS state) */
1728                         rec->flags &= ~FTRACE_FL_ENABLED;
1729         }
1730
1731         return FTRACE_UPDATE_MAKE_NOP;
1732 }
1733
1734 /**
1735  * ftrace_update_record, set a record that now is tracing or not
1736  * @rec: the record to update
1737  * @enable: set to 1 if the record is tracing, zero to force disable
1738  *
1739  * The records that represent all functions that can be traced need
1740  * to be updated when tracing has been enabled.
1741  */
1742 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1743 {
1744         return ftrace_check_record(rec, enable, 1);
1745 }
1746
1747 /**
1748  * ftrace_test_record, check if the record has been enabled or not
1749  * @rec: the record to test
1750  * @enable: set to 1 to check if enabled, 0 if it is disabled
1751  *
1752  * The arch code may need to test if a record is already set to
1753  * tracing to determine how to modify the function code that it
1754  * represents.
1755  */
1756 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1757 {
1758         return ftrace_check_record(rec, enable, 0);
1759 }
1760
1761 static int
1762 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1763 {
1764         unsigned long ftrace_old_addr;
1765         unsigned long ftrace_addr;
1766         int ret;
1767
1768         ret = ftrace_update_record(rec, enable);
1769
1770         if (rec->flags & FTRACE_FL_REGS)
1771                 ftrace_addr = (unsigned long)FTRACE_REGS_ADDR;
1772         else
1773                 ftrace_addr = (unsigned long)FTRACE_ADDR;
1774
1775         switch (ret) {
1776         case FTRACE_UPDATE_IGNORE:
1777                 return 0;
1778
1779         case FTRACE_UPDATE_MAKE_CALL:
1780                 return ftrace_make_call(rec, ftrace_addr);
1781
1782         case FTRACE_UPDATE_MAKE_NOP:
1783                 return ftrace_make_nop(NULL, rec, ftrace_addr);
1784
1785         case FTRACE_UPDATE_MODIFY_CALL_REGS:
1786         case FTRACE_UPDATE_MODIFY_CALL:
1787                 if (rec->flags & FTRACE_FL_REGS)
1788                         ftrace_old_addr = (unsigned long)FTRACE_ADDR;
1789                 else
1790                         ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
1791
1792                 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
1793         }
1794
1795         return -1; /* unknow ftrace bug */
1796 }
1797
1798 void __weak ftrace_replace_code(int enable)
1799 {
1800         struct dyn_ftrace *rec;
1801         struct ftrace_page *pg;
1802         int failed;
1803
1804         if (unlikely(ftrace_disabled))
1805                 return;
1806
1807         do_for_each_ftrace_rec(pg, rec) {
1808                 failed = __ftrace_replace_code(rec, enable);
1809                 if (failed) {
1810                         ftrace_bug(failed, rec->ip);
1811                         /* Stop processing */
1812                         return;
1813                 }
1814         } while_for_each_ftrace_rec();
1815 }
1816
1817 struct ftrace_rec_iter {
1818         struct ftrace_page      *pg;
1819         int                     index;
1820 };
1821
1822 /**
1823  * ftrace_rec_iter_start, start up iterating over traced functions
1824  *
1825  * Returns an iterator handle that is used to iterate over all
1826  * the records that represent address locations where functions
1827  * are traced.
1828  *
1829  * May return NULL if no records are available.
1830  */
1831 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1832 {
1833         /*
1834          * We only use a single iterator.
1835          * Protected by the ftrace_lock mutex.
1836          */
1837         static struct ftrace_rec_iter ftrace_rec_iter;
1838         struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1839
1840         iter->pg = ftrace_pages_start;
1841         iter->index = 0;
1842
1843         /* Could have empty pages */
1844         while (iter->pg && !iter->pg->index)
1845                 iter->pg = iter->pg->next;
1846
1847         if (!iter->pg)
1848                 return NULL;
1849
1850         return iter;
1851 }
1852
1853 /**
1854  * ftrace_rec_iter_next, get the next record to process.
1855  * @iter: The handle to the iterator.
1856  *
1857  * Returns the next iterator after the given iterator @iter.
1858  */
1859 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1860 {
1861         iter->index++;
1862
1863         if (iter->index >= iter->pg->index) {
1864                 iter->pg = iter->pg->next;
1865                 iter->index = 0;
1866
1867                 /* Could have empty pages */
1868                 while (iter->pg && !iter->pg->index)
1869                         iter->pg = iter->pg->next;
1870         }
1871
1872         if (!iter->pg)
1873                 return NULL;
1874
1875         return iter;
1876 }
1877
1878 /**
1879  * ftrace_rec_iter_record, get the record at the iterator location
1880  * @iter: The current iterator location
1881  *
1882  * Returns the record that the current @iter is at.
1883  */
1884 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1885 {
1886         return &iter->pg->records[iter->index];
1887 }
1888
1889 static int
1890 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1891 {
1892         unsigned long ip;
1893         int ret;
1894
1895         ip = rec->ip;
1896
1897         if (unlikely(ftrace_disabled))
1898                 return 0;
1899
1900         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1901         if (ret) {
1902                 ftrace_bug(ret, ip);
1903                 return 0;
1904         }
1905         return 1;
1906 }
1907
1908 /*
1909  * archs can override this function if they must do something
1910  * before the modifying code is performed.
1911  */
1912 int __weak ftrace_arch_code_modify_prepare(void)
1913 {
1914         return 0;
1915 }
1916
1917 /*
1918  * archs can override this function if they must do something
1919  * after the modifying code is performed.
1920  */
1921 int __weak ftrace_arch_code_modify_post_process(void)
1922 {
1923         return 0;
1924 }
1925
1926 void ftrace_modify_all_code(int command)
1927 {
1928         if (command & FTRACE_UPDATE_CALLS)
1929                 ftrace_replace_code(1);
1930         else if (command & FTRACE_DISABLE_CALLS)
1931                 ftrace_replace_code(0);
1932
1933         if (command & FTRACE_UPDATE_TRACE_FUNC)
1934                 ftrace_update_ftrace_func(ftrace_trace_function);
1935
1936         if (command & FTRACE_START_FUNC_RET)
1937                 ftrace_enable_ftrace_graph_caller();
1938         else if (command & FTRACE_STOP_FUNC_RET)
1939                 ftrace_disable_ftrace_graph_caller();
1940 }
1941
1942 static int __ftrace_modify_code(void *data)
1943 {
1944         int *command = data;
1945
1946         ftrace_modify_all_code(*command);
1947
1948         return 0;
1949 }
1950
1951 /**
1952  * ftrace_run_stop_machine, go back to the stop machine method
1953  * @command: The command to tell ftrace what to do
1954  *
1955  * If an arch needs to fall back to the stop machine method, the
1956  * it can call this function.
1957  */
1958 void ftrace_run_stop_machine(int command)
1959 {
1960         stop_machine(__ftrace_modify_code, &command, NULL);
1961 }
1962
1963 /**
1964  * arch_ftrace_update_code, modify the code to trace or not trace
1965  * @command: The command that needs to be done
1966  *
1967  * Archs can override this function if it does not need to
1968  * run stop_machine() to modify code.
1969  */
1970 void __weak arch_ftrace_update_code(int command)
1971 {
1972         ftrace_run_stop_machine(command);
1973 }
1974
1975 static void ftrace_run_update_code(int command)
1976 {
1977         int ret;
1978
1979         ret = ftrace_arch_code_modify_prepare();
1980         FTRACE_WARN_ON(ret);
1981         if (ret)
1982                 return;
1983         /*
1984          * Do not call function tracer while we update the code.
1985          * We are in stop machine.
1986          */
1987         function_trace_stop++;
1988
1989         /*
1990          * By default we use stop_machine() to modify the code.
1991          * But archs can do what ever they want as long as it
1992          * is safe. The stop_machine() is the safest, but also
1993          * produces the most overhead.
1994          */
1995         arch_ftrace_update_code(command);
1996
1997         function_trace_stop--;
1998
1999         ret = ftrace_arch_code_modify_post_process();
2000         FTRACE_WARN_ON(ret);
2001 }
2002
2003 static ftrace_func_t saved_ftrace_func;
2004 static int ftrace_start_up;
2005 static int global_start_up;
2006
2007 static void ftrace_startup_enable(int command)
2008 {
2009         if (saved_ftrace_func != ftrace_trace_function) {
2010                 saved_ftrace_func = ftrace_trace_function;
2011                 command |= FTRACE_UPDATE_TRACE_FUNC;
2012         }
2013
2014         if (!command || !ftrace_enabled)
2015                 return;
2016
2017         ftrace_run_update_code(command);
2018 }
2019
2020 static int ftrace_startup(struct ftrace_ops *ops, int command)
2021 {
2022         bool hash_enable = true;
2023
2024         if (unlikely(ftrace_disabled))
2025                 return -ENODEV;
2026
2027         ftrace_start_up++;
2028         command |= FTRACE_UPDATE_CALLS;
2029
2030         /* ops marked global share the filter hashes */
2031         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2032                 ops = &global_ops;
2033                 /* Don't update hash if global is already set */
2034                 if (global_start_up)
2035                         hash_enable = false;
2036                 global_start_up++;
2037         }
2038
2039         ops->flags |= FTRACE_OPS_FL_ENABLED;
2040         if (hash_enable)
2041                 ftrace_hash_rec_enable(ops, 1);
2042
2043         ftrace_startup_enable(command);
2044
2045         return 0;
2046 }
2047
2048 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
2049 {
2050         bool hash_disable = true;
2051
2052         if (unlikely(ftrace_disabled))
2053                 return;
2054
2055         ftrace_start_up--;
2056         /*
2057          * Just warn in case of unbalance, no need to kill ftrace, it's not
2058          * critical but the ftrace_call callers may be never nopped again after
2059          * further ftrace uses.
2060          */
2061         WARN_ON_ONCE(ftrace_start_up < 0);
2062
2063         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2064                 ops = &global_ops;
2065                 global_start_up--;
2066                 WARN_ON_ONCE(global_start_up < 0);
2067                 /* Don't update hash if global still has users */
2068                 if (global_start_up) {
2069                         WARN_ON_ONCE(!ftrace_start_up);
2070                         hash_disable = false;
2071                 }
2072         }
2073
2074         if (hash_disable)
2075                 ftrace_hash_rec_disable(ops, 1);
2076
2077         if (ops != &global_ops || !global_start_up)
2078                 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2079
2080         command |= FTRACE_UPDATE_CALLS;
2081
2082         if (saved_ftrace_func != ftrace_trace_function) {
2083                 saved_ftrace_func = ftrace_trace_function;
2084                 command |= FTRACE_UPDATE_TRACE_FUNC;
2085         }
2086
2087         if (!command || !ftrace_enabled)
2088                 return;
2089
2090         ftrace_run_update_code(command);
2091 }
2092
2093 static void ftrace_startup_sysctl(void)
2094 {
2095         if (unlikely(ftrace_disabled))
2096                 return;
2097
2098         /* Force update next time */
2099         saved_ftrace_func = NULL;
2100         /* ftrace_start_up is true if we want ftrace running */
2101         if (ftrace_start_up)
2102                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2103 }
2104
2105 static void ftrace_shutdown_sysctl(void)
2106 {
2107         if (unlikely(ftrace_disabled))
2108                 return;
2109
2110         /* ftrace_start_up is true if ftrace is running */
2111         if (ftrace_start_up)
2112                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2113 }
2114
2115 static cycle_t          ftrace_update_time;
2116 static unsigned long    ftrace_update_cnt;
2117 unsigned long           ftrace_update_tot_cnt;
2118
2119 static int ops_traces_mod(struct ftrace_ops *ops)
2120 {
2121         struct ftrace_hash *hash;
2122
2123         hash = ops->filter_hash;
2124         return ftrace_hash_empty(hash);
2125 }
2126
2127 static int ftrace_update_code(struct module *mod)
2128 {
2129         struct ftrace_page *pg;
2130         struct dyn_ftrace *p;
2131         cycle_t start, stop;
2132         unsigned long ref = 0;
2133         int i;
2134
2135         /*
2136          * When adding a module, we need to check if tracers are
2137          * currently enabled and if they are set to trace all functions.
2138          * If they are, we need to enable the module functions as well
2139          * as update the reference counts for those function records.
2140          */
2141         if (mod) {
2142                 struct ftrace_ops *ops;
2143
2144                 for (ops = ftrace_ops_list;
2145                      ops != &ftrace_list_end; ops = ops->next) {
2146                         if (ops->flags & FTRACE_OPS_FL_ENABLED &&
2147                             ops_traces_mod(ops))
2148                                 ref++;
2149                 }
2150         }
2151
2152         start = ftrace_now(raw_smp_processor_id());
2153         ftrace_update_cnt = 0;
2154
2155         for (pg = ftrace_new_pgs; pg; pg = pg->next) {
2156
2157                 for (i = 0; i < pg->index; i++) {
2158                         /* If something went wrong, bail without enabling anything */
2159                         if (unlikely(ftrace_disabled))
2160                                 return -1;
2161
2162                         p = &pg->records[i];
2163                         p->flags = ref;
2164
2165                         /*
2166                          * Do the initial record conversion from mcount jump
2167                          * to the NOP instructions.
2168                          */
2169                         if (!ftrace_code_disable(mod, p))
2170                                 break;
2171
2172                         ftrace_update_cnt++;
2173
2174                         /*
2175                          * If the tracing is enabled, go ahead and enable the record.
2176                          *
2177                          * The reason not to enable the record immediatelly is the
2178                          * inherent check of ftrace_make_nop/ftrace_make_call for
2179                          * correct previous instructions.  Making first the NOP
2180                          * conversion puts the module to the correct state, thus
2181                          * passing the ftrace_make_call check.
2182                          */
2183                         if (ftrace_start_up && ref) {
2184                                 int failed = __ftrace_replace_code(p, 1);
2185                                 if (failed)
2186                                         ftrace_bug(failed, p->ip);
2187                         }
2188                 }
2189         }
2190
2191         ftrace_new_pgs = NULL;
2192
2193         stop = ftrace_now(raw_smp_processor_id());
2194         ftrace_update_time = stop - start;
2195         ftrace_update_tot_cnt += ftrace_update_cnt;
2196
2197         return 0;
2198 }
2199
2200 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2201 {
2202         int order;
2203         int cnt;
2204
2205         if (WARN_ON(!count))
2206                 return -EINVAL;
2207
2208         order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2209
2210         /*
2211          * We want to fill as much as possible. No more than a page
2212          * may be empty.
2213          */
2214         while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2215                 order--;
2216
2217  again:
2218         pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2219
2220         if (!pg->records) {
2221                 /* if we can't allocate this size, try something smaller */
2222                 if (!order)
2223                         return -ENOMEM;
2224                 order >>= 1;
2225                 goto again;
2226         }
2227
2228         cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2229         pg->size = cnt;
2230
2231         if (cnt > count)
2232                 cnt = count;
2233
2234         return cnt;
2235 }
2236
2237 static struct ftrace_page *
2238 ftrace_allocate_pages(unsigned long num_to_init)
2239 {
2240         struct ftrace_page *start_pg;
2241         struct ftrace_page *pg;
2242         int order;
2243         int cnt;
2244
2245         if (!num_to_init)
2246                 return 0;
2247
2248         start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2249         if (!pg)
2250                 return NULL;
2251
2252         /*
2253          * Try to allocate as much as possible in one continues
2254          * location that fills in all of the space. We want to
2255          * waste as little space as possible.
2256          */
2257         for (;;) {
2258                 cnt = ftrace_allocate_records(pg, num_to_init);
2259                 if (cnt < 0)
2260                         goto free_pages;
2261
2262                 num_to_init -= cnt;
2263                 if (!num_to_init)
2264                         break;
2265
2266                 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2267                 if (!pg->next)
2268                         goto free_pages;
2269
2270                 pg = pg->next;
2271         }
2272
2273         return start_pg;
2274
2275  free_pages:
2276         while (start_pg) {
2277                 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2278                 free_pages((unsigned long)pg->records, order);
2279                 start_pg = pg->next;
2280                 kfree(pg);
2281                 pg = start_pg;
2282         }
2283         pr_info("ftrace: FAILED to allocate memory for functions\n");
2284         return NULL;
2285 }
2286
2287 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2288 {
2289         int cnt;
2290
2291         if (!num_to_init) {
2292                 pr_info("ftrace: No functions to be traced?\n");
2293                 return -1;
2294         }
2295
2296         cnt = num_to_init / ENTRIES_PER_PAGE;
2297         pr_info("ftrace: allocating %ld entries in %d pages\n",
2298                 num_to_init, cnt + 1);
2299
2300         return 0;
2301 }
2302
2303 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2304
2305 struct ftrace_iterator {
2306         loff_t                          pos;
2307         loff_t                          func_pos;
2308         struct ftrace_page              *pg;
2309         struct dyn_ftrace               *func;
2310         struct ftrace_func_probe        *probe;
2311         struct trace_parser             parser;
2312         struct ftrace_hash              *hash;
2313         struct ftrace_ops               *ops;
2314         int                             hidx;
2315         int                             idx;
2316         unsigned                        flags;
2317 };
2318
2319 static void *
2320 t_hash_next(struct seq_file *m, loff_t *pos)
2321 {
2322         struct ftrace_iterator *iter = m->private;
2323         struct hlist_node *hnd = NULL;
2324         struct hlist_head *hhd;
2325
2326         (*pos)++;
2327         iter->pos = *pos;
2328
2329         if (iter->probe)
2330                 hnd = &iter->probe->node;
2331  retry:
2332         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2333                 return NULL;
2334
2335         hhd = &ftrace_func_hash[iter->hidx];
2336
2337         if (hlist_empty(hhd)) {
2338                 iter->hidx++;
2339                 hnd = NULL;
2340                 goto retry;
2341         }
2342
2343         if (!hnd)
2344                 hnd = hhd->first;
2345         else {
2346                 hnd = hnd->next;
2347                 if (!hnd) {
2348                         iter->hidx++;
2349                         goto retry;
2350                 }
2351         }
2352
2353         if (WARN_ON_ONCE(!hnd))
2354                 return NULL;
2355
2356         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2357
2358         return iter;
2359 }
2360
2361 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2362 {
2363         struct ftrace_iterator *iter = m->private;
2364         void *p = NULL;
2365         loff_t l;
2366
2367         if (!(iter->flags & FTRACE_ITER_DO_HASH))
2368                 return NULL;
2369
2370         if (iter->func_pos > *pos)
2371                 return NULL;
2372
2373         iter->hidx = 0;
2374         for (l = 0; l <= (*pos - iter->func_pos); ) {
2375                 p = t_hash_next(m, &l);
2376                 if (!p)
2377                         break;
2378         }
2379         if (!p)
2380                 return NULL;
2381
2382         /* Only set this if we have an item */
2383         iter->flags |= FTRACE_ITER_HASH;
2384
2385         return iter;
2386 }
2387
2388 static int
2389 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2390 {
2391         struct ftrace_func_probe *rec;
2392
2393         rec = iter->probe;
2394         if (WARN_ON_ONCE(!rec))
2395                 return -EIO;
2396
2397         if (rec->ops->print)
2398                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2399
2400         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2401
2402         if (rec->data)
2403                 seq_printf(m, ":%p", rec->data);
2404         seq_putc(m, '\n');
2405
2406         return 0;
2407 }
2408
2409 static void *
2410 t_next(struct seq_file *m, void *v, loff_t *pos)
2411 {
2412         struct ftrace_iterator *iter = m->private;
2413         struct ftrace_ops *ops = iter->ops;
2414         struct dyn_ftrace *rec = NULL;
2415
2416         if (unlikely(ftrace_disabled))
2417                 return NULL;
2418
2419         if (iter->flags & FTRACE_ITER_HASH)
2420                 return t_hash_next(m, pos);
2421
2422         (*pos)++;
2423         iter->pos = iter->func_pos = *pos;
2424
2425         if (iter->flags & FTRACE_ITER_PRINTALL)
2426                 return t_hash_start(m, pos);
2427
2428  retry:
2429         if (iter->idx >= iter->pg->index) {
2430                 if (iter->pg->next) {
2431                         iter->pg = iter->pg->next;
2432                         iter->idx = 0;
2433                         goto retry;
2434                 }
2435         } else {
2436                 rec = &iter->pg->records[iter->idx++];
2437                 if (((iter->flags & FTRACE_ITER_FILTER) &&
2438                      !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2439
2440                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
2441                      !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2442
2443                     ((iter->flags & FTRACE_ITER_ENABLED) &&
2444                      !(rec->flags & ~FTRACE_FL_MASK))) {
2445
2446                         rec = NULL;
2447                         goto retry;
2448                 }
2449         }
2450
2451         if (!rec)
2452                 return t_hash_start(m, pos);
2453
2454         iter->func = rec;
2455
2456         return iter;
2457 }
2458
2459 static void reset_iter_read(struct ftrace_iterator *iter)
2460 {
2461         iter->pos = 0;
2462         iter->func_pos = 0;
2463         iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2464 }
2465
2466 static void *t_start(struct seq_file *m, loff_t *pos)
2467 {
2468         struct ftrace_iterator *iter = m->private;
2469         struct ftrace_ops *ops = iter->ops;
2470         void *p = NULL;
2471         loff_t l;
2472
2473         mutex_lock(&ftrace_lock);
2474
2475         if (unlikely(ftrace_disabled))
2476                 return NULL;
2477
2478         /*
2479          * If an lseek was done, then reset and start from beginning.
2480          */
2481         if (*pos < iter->pos)
2482                 reset_iter_read(iter);
2483
2484         /*
2485          * For set_ftrace_filter reading, if we have the filter
2486          * off, we can short cut and just print out that all
2487          * functions are enabled.
2488          */
2489         if (iter->flags & FTRACE_ITER_FILTER &&
2490             ftrace_hash_empty(ops->filter_hash)) {
2491                 if (*pos > 0)
2492                         return t_hash_start(m, pos);
2493                 iter->flags |= FTRACE_ITER_PRINTALL;
2494                 /* reset in case of seek/pread */
2495                 iter->flags &= ~FTRACE_ITER_HASH;
2496                 return iter;
2497         }
2498
2499         if (iter->flags & FTRACE_ITER_HASH)
2500                 return t_hash_start(m, pos);
2501
2502         /*
2503          * Unfortunately, we need to restart at ftrace_pages_start
2504          * every time we let go of the ftrace_mutex. This is because
2505          * those pointers can change without the lock.
2506          */
2507         iter->pg = ftrace_pages_start;
2508         iter->idx = 0;
2509         for (l = 0; l <= *pos; ) {
2510                 p = t_next(m, p, &l);
2511                 if (!p)
2512                         break;
2513         }
2514
2515         if (!p)
2516                 return t_hash_start(m, pos);
2517
2518         return iter;
2519 }
2520
2521 static void t_stop(struct seq_file *m, void *p)
2522 {
2523         mutex_unlock(&ftrace_lock);
2524 }
2525
2526 static int t_show(struct seq_file *m, void *v)
2527 {
2528         struct ftrace_iterator *iter = m->private;
2529         struct dyn_ftrace *rec;
2530
2531         if (iter->flags & FTRACE_ITER_HASH)
2532                 return t_hash_show(m, iter);
2533
2534         if (iter->flags & FTRACE_ITER_PRINTALL) {
2535                 seq_printf(m, "#### all functions enabled ####\n");
2536                 return 0;
2537         }
2538
2539         rec = iter->func;
2540
2541         if (!rec)
2542                 return 0;
2543
2544         seq_printf(m, "%ps", (void *)rec->ip);
2545         if (iter->flags & FTRACE_ITER_ENABLED)
2546                 seq_printf(m, " (%ld)%s",
2547                            rec->flags & ~FTRACE_FL_MASK,
2548                            rec->flags & FTRACE_FL_REGS ? " R" : "");
2549         seq_printf(m, "\n");
2550
2551         return 0;
2552 }
2553
2554 static const struct seq_operations show_ftrace_seq_ops = {
2555         .start = t_start,
2556         .next = t_next,
2557         .stop = t_stop,
2558         .show = t_show,
2559 };
2560
2561 static int
2562 ftrace_avail_open(struct inode *inode, struct file *file)
2563 {
2564         struct ftrace_iterator *iter;
2565
2566         if (unlikely(ftrace_disabled))
2567                 return -ENODEV;
2568
2569         iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2570         if (iter) {
2571                 iter->pg = ftrace_pages_start;
2572                 iter->ops = &global_ops;
2573         }
2574
2575         return iter ? 0 : -ENOMEM;
2576 }
2577
2578 static int
2579 ftrace_enabled_open(struct inode *inode, struct file *file)
2580 {
2581         struct ftrace_iterator *iter;
2582
2583         if (unlikely(ftrace_disabled))
2584                 return -ENODEV;
2585
2586         iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2587         if (iter) {
2588                 iter->pg = ftrace_pages_start;
2589                 iter->flags = FTRACE_ITER_ENABLED;
2590                 iter->ops = &global_ops;
2591         }
2592
2593         return iter ? 0 : -ENOMEM;
2594 }
2595
2596 static void ftrace_filter_reset(struct ftrace_hash *hash)
2597 {
2598         mutex_lock(&ftrace_lock);
2599         ftrace_hash_clear(hash);
2600         mutex_unlock(&ftrace_lock);
2601 }
2602
2603 /**
2604  * ftrace_regex_open - initialize function tracer filter files
2605  * @ops: The ftrace_ops that hold the hash filters
2606  * @flag: The type of filter to process
2607  * @inode: The inode, usually passed in to your open routine
2608  * @file: The file, usually passed in to your open routine
2609  *
2610  * ftrace_regex_open() initializes the filter files for the
2611  * @ops. Depending on @flag it may process the filter hash or
2612  * the notrace hash of @ops. With this called from the open
2613  * routine, you can use ftrace_filter_write() for the write
2614  * routine if @flag has FTRACE_ITER_FILTER set, or
2615  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2616  * ftrace_filter_lseek() should be used as the lseek routine, and
2617  * release must call ftrace_regex_release().
2618  */
2619 int
2620 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2621                   struct inode *inode, struct file *file)
2622 {
2623         struct ftrace_iterator *iter;
2624         struct ftrace_hash *hash;
2625         int ret = 0;
2626
2627         if (unlikely(ftrace_disabled))
2628                 return -ENODEV;
2629
2630         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2631         if (!iter)
2632                 return -ENOMEM;
2633
2634         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2635                 kfree(iter);
2636                 return -ENOMEM;
2637         }
2638
2639         if (flag & FTRACE_ITER_NOTRACE)
2640                 hash = ops->notrace_hash;
2641         else
2642                 hash = ops->filter_hash;
2643
2644         iter->ops = ops;
2645         iter->flags = flag;
2646
2647         if (file->f_mode & FMODE_WRITE) {
2648                 mutex_lock(&ftrace_lock);
2649                 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2650                 mutex_unlock(&ftrace_lock);
2651
2652                 if (!iter->hash) {
2653                         trace_parser_put(&iter->parser);
2654                         kfree(iter);
2655                         return -ENOMEM;
2656                 }
2657         }
2658
2659         mutex_lock(&ftrace_regex_lock);
2660
2661         if ((file->f_mode & FMODE_WRITE) &&
2662             (file->f_flags & O_TRUNC))
2663                 ftrace_filter_reset(iter->hash);
2664
2665         if (file->f_mode & FMODE_READ) {
2666                 iter->pg = ftrace_pages_start;
2667
2668                 ret = seq_open(file, &show_ftrace_seq_ops);
2669                 if (!ret) {
2670                         struct seq_file *m = file->private_data;
2671                         m->private = iter;
2672                 } else {
2673                         /* Failed */
2674                         free_ftrace_hash(iter->hash);
2675                         trace_parser_put(&iter->parser);
2676                         kfree(iter);
2677                 }
2678         } else
2679                 file->private_data = iter;
2680         mutex_unlock(&ftrace_regex_lock);
2681
2682         return ret;
2683 }
2684
2685 static int
2686 ftrace_filter_open(struct inode *inode, struct file *file)
2687 {
2688         return ftrace_regex_open(&global_ops,
2689                         FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2690                         inode, file);
2691 }
2692
2693 static int
2694 ftrace_notrace_open(struct inode *inode, struct file *file)
2695 {
2696         return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2697                                  inode, file);
2698 }
2699
2700 static int ftrace_match(char *str, char *regex, int len, int type)
2701 {
2702         int matched = 0;
2703         int slen;
2704
2705         switch (type) {
2706         case MATCH_FULL:
2707                 if (strcmp(str, regex) == 0)
2708                         matched = 1;
2709                 break;
2710         case MATCH_FRONT_ONLY:
2711                 if (strncmp(str, regex, len) == 0)
2712                         matched = 1;
2713                 break;
2714         case MATCH_MIDDLE_ONLY:
2715                 if (strstr(str, regex))
2716                         matched = 1;
2717                 break;
2718         case MATCH_END_ONLY:
2719                 slen = strlen(str);
2720                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2721                         matched = 1;
2722                 break;
2723         }
2724
2725         return matched;
2726 }
2727
2728 static int
2729 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2730 {
2731         struct ftrace_func_entry *entry;
2732         int ret = 0;
2733
2734         entry = ftrace_lookup_ip(hash, rec->ip);
2735         if (not) {
2736                 /* Do nothing if it doesn't exist */
2737                 if (!entry)
2738                         return 0;
2739
2740                 free_hash_entry(hash, entry);
2741         } else {
2742                 /* Do nothing if it exists */
2743                 if (entry)
2744                         return 0;
2745
2746                 ret = add_hash_entry(hash, rec->ip);
2747         }
2748         return ret;
2749 }
2750
2751 static int
2752 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2753                     char *regex, int len, int type)
2754 {
2755         char str[KSYM_SYMBOL_LEN];
2756         char *modname;
2757
2758         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2759
2760         if (mod) {
2761                 /* module lookup requires matching the module */
2762                 if (!modname || strcmp(modname, mod))
2763                         return 0;
2764
2765                 /* blank search means to match all funcs in the mod */
2766                 if (!len)
2767                         return 1;
2768         }
2769
2770         return ftrace_match(str, regex, len, type);
2771 }
2772
2773 static int
2774 match_records(struct ftrace_hash *hash, char *buff,
2775               int len, char *mod, int not)
2776 {
2777         unsigned search_len = 0;
2778         struct ftrace_page *pg;
2779         struct dyn_ftrace *rec;
2780         int type = MATCH_FULL;
2781         char *search = buff;
2782         int found = 0;
2783         int ret;
2784
2785         if (len) {
2786                 type = filter_parse_regex(buff, len, &search, &not);
2787                 search_len = strlen(search);
2788         }
2789
2790         mutex_lock(&ftrace_lock);
2791
2792         if (unlikely(ftrace_disabled))
2793                 goto out_unlock;
2794
2795         do_for_each_ftrace_rec(pg, rec) {
2796                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2797                         ret = enter_record(hash, rec, not);
2798                         if (ret < 0) {
2799                                 found = ret;
2800                                 goto out_unlock;
2801                         }
2802                         found = 1;
2803                 }
2804         } while_for_each_ftrace_rec();
2805  out_unlock:
2806         mutex_unlock(&ftrace_lock);
2807
2808         return found;
2809 }
2810
2811 static int
2812 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2813 {
2814         return match_records(hash, buff, len, NULL, 0);
2815 }
2816
2817 static int
2818 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2819 {
2820         int not = 0;
2821
2822         /* blank or '*' mean the same */
2823         if (strcmp(buff, "*") == 0)
2824                 buff[0] = 0;
2825
2826         /* handle the case of 'dont filter this module' */
2827         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2828                 buff[0] = 0;
2829                 not = 1;
2830         }
2831
2832         return match_records(hash, buff, strlen(buff), mod, not);
2833 }
2834
2835 /*
2836  * We register the module command as a template to show others how
2837  * to register the a command as well.
2838  */
2839
2840 static int
2841 ftrace_mod_callback(struct ftrace_hash *hash,
2842                     char *func, char *cmd, char *param, int enable)
2843 {
2844         char *mod;
2845         int ret = -EINVAL;
2846
2847         /*
2848          * cmd == 'mod' because we only registered this func
2849          * for the 'mod' ftrace_func_command.
2850          * But if you register one func with multiple commands,
2851          * you can tell which command was used by the cmd
2852          * parameter.
2853          */
2854
2855         /* we must have a module name */
2856         if (!param)
2857                 return ret;
2858
2859         mod = strsep(&param, ":");
2860         if (!strlen(mod))
2861                 return ret;
2862
2863         ret = ftrace_match_module_records(hash, func, mod);
2864         if (!ret)
2865                 ret = -EINVAL;
2866         if (ret < 0)
2867                 return ret;
2868
2869         return 0;
2870 }
2871
2872 static struct ftrace_func_command ftrace_mod_cmd = {
2873         .name                   = "mod",
2874         .func                   = ftrace_mod_callback,
2875 };
2876
2877 static int __init ftrace_mod_cmd_init(void)
2878 {
2879         return register_ftrace_command(&ftrace_mod_cmd);
2880 }
2881 core_initcall(ftrace_mod_cmd_init);
2882
2883 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2884                                       struct ftrace_ops *op, struct pt_regs *pt_regs)
2885 {
2886         struct ftrace_func_probe *entry;
2887         struct hlist_head *hhd;
2888         unsigned long key;
2889
2890         key = hash_long(ip, FTRACE_HASH_BITS);
2891
2892         hhd = &ftrace_func_hash[key];
2893
2894         if (hlist_empty(hhd))
2895                 return;
2896
2897         /*
2898          * Disable preemption for these calls to prevent a RCU grace
2899          * period. This syncs the hash iteration and freeing of items
2900          * on the hash. rcu_read_lock is too dangerous here.
2901          */
2902         preempt_disable_notrace();
2903         hlist_for_each_entry_rcu(entry, hhd, node) {
2904                 if (entry->ip == ip)
2905                         entry->ops->func(ip, parent_ip, &entry->data);
2906         }
2907         preempt_enable_notrace();
2908 }
2909
2910 static struct ftrace_ops trace_probe_ops __read_mostly =
2911 {
2912         .func           = function_trace_probe_call,
2913 };
2914
2915 static int ftrace_probe_registered;
2916
2917 static void __enable_ftrace_function_probe(void)
2918 {
2919         int ret;
2920         int i;
2921
2922         if (ftrace_probe_registered)
2923                 return;
2924
2925         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2926                 struct hlist_head *hhd = &ftrace_func_hash[i];
2927                 if (hhd->first)
2928                         break;
2929         }
2930         /* Nothing registered? */
2931         if (i == FTRACE_FUNC_HASHSIZE)
2932                 return;
2933
2934         ret = __register_ftrace_function(&trace_probe_ops);
2935         if (!ret)
2936                 ret = ftrace_startup(&trace_probe_ops, 0);
2937
2938         ftrace_probe_registered = 1;
2939 }
2940
2941 static void __disable_ftrace_function_probe(void)
2942 {
2943         int ret;
2944         int i;
2945
2946         if (!ftrace_probe_registered)
2947                 return;
2948
2949         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2950                 struct hlist_head *hhd = &ftrace_func_hash[i];
2951                 if (hhd->first)
2952                         return;
2953         }
2954
2955         /* no more funcs left */
2956         ret = __unregister_ftrace_function(&trace_probe_ops);
2957         if (!ret)
2958                 ftrace_shutdown(&trace_probe_ops, 0);
2959
2960         ftrace_probe_registered = 0;
2961 }
2962
2963
2964 static void ftrace_free_entry(struct ftrace_func_probe *entry)
2965 {
2966         if (entry->ops->free)
2967                 entry->ops->free(entry->ops, entry->ip, &entry->data);
2968         kfree(entry);
2969 }
2970
2971 int
2972 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2973                               void *data)
2974 {
2975         struct ftrace_func_probe *entry;
2976         struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
2977         struct ftrace_hash *hash;
2978         struct ftrace_page *pg;
2979         struct dyn_ftrace *rec;
2980         int type, len, not;
2981         unsigned long key;
2982         int count = 0;
2983         char *search;
2984         int ret;
2985
2986         type = filter_parse_regex(glob, strlen(glob), &search, &not);
2987         len = strlen(search);
2988
2989         /* we do not support '!' for function probes */
2990         if (WARN_ON(not))
2991                 return -EINVAL;
2992
2993         mutex_lock(&ftrace_lock);
2994
2995         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2996         if (!hash) {
2997                 count = -ENOMEM;
2998                 goto out_unlock;
2999         }
3000
3001         if (unlikely(ftrace_disabled)) {
3002                 count = -ENODEV;
3003                 goto out_unlock;
3004         }
3005
3006         do_for_each_ftrace_rec(pg, rec) {
3007
3008                 if (!ftrace_match_record(rec, NULL, search, len, type))
3009                         continue;
3010
3011                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3012                 if (!entry) {
3013                         /* If we did not process any, then return error */
3014                         if (!count)
3015                                 count = -ENOMEM;
3016                         goto out_unlock;
3017                 }
3018
3019                 count++;
3020
3021                 entry->data = data;
3022
3023                 /*
3024                  * The caller might want to do something special
3025                  * for each function we find. We call the callback
3026                  * to give the caller an opportunity to do so.
3027                  */
3028                 if (ops->init) {
3029                         if (ops->init(ops, rec->ip, &entry->data) < 0) {
3030                                 /* caller does not like this func */
3031                                 kfree(entry);
3032                                 continue;
3033                         }
3034                 }
3035
3036                 ret = enter_record(hash, rec, 0);
3037                 if (ret < 0) {
3038                         kfree(entry);
3039                         count = ret;
3040                         goto out_unlock;
3041                 }
3042
3043                 entry->ops = ops;
3044                 entry->ip = rec->ip;
3045
3046                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3047                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3048
3049         } while_for_each_ftrace_rec();
3050
3051         ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3052         if (ret < 0)
3053                 count = ret;
3054
3055         __enable_ftrace_function_probe();
3056
3057  out_unlock:
3058         mutex_unlock(&ftrace_lock);
3059         free_ftrace_hash(hash);
3060
3061         return count;
3062 }
3063
3064 enum {
3065         PROBE_TEST_FUNC         = 1,
3066         PROBE_TEST_DATA         = 2
3067 };
3068
3069 static void
3070 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3071                                   void *data, int flags)
3072 {
3073         struct ftrace_func_entry *rec_entry;
3074         struct ftrace_func_probe *entry;
3075         struct ftrace_func_probe *p;
3076         struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3077         struct list_head free_list;
3078         struct ftrace_hash *hash;
3079         struct hlist_node *tmp;
3080         char str[KSYM_SYMBOL_LEN];
3081         int type = MATCH_FULL;
3082         int i, len = 0;
3083         char *search;
3084
3085         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3086                 glob = NULL;
3087         else if (glob) {
3088                 int not;
3089
3090                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3091                 len = strlen(search);
3092
3093                 /* we do not support '!' for function probes */
3094                 if (WARN_ON(not))
3095                         return;
3096         }
3097
3098         mutex_lock(&ftrace_lock);
3099
3100         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3101         if (!hash)
3102                 /* Hmm, should report this somehow */
3103                 goto out_unlock;
3104
3105         INIT_LIST_HEAD(&free_list);
3106
3107         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3108                 struct hlist_head *hhd = &ftrace_func_hash[i];
3109
3110                 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3111
3112                         /* break up if statements for readability */
3113                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3114                                 continue;
3115
3116                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
3117                                 continue;
3118
3119                         /* do this last, since it is the most expensive */
3120                         if (glob) {
3121                                 kallsyms_lookup(entry->ip, NULL, NULL,
3122                                                 NULL, str);
3123                                 if (!ftrace_match(str, glob, len, type))
3124                                         continue;
3125                         }
3126
3127                         rec_entry = ftrace_lookup_ip(hash, entry->ip);
3128                         /* It is possible more than one entry had this ip */
3129                         if (rec_entry)
3130                                 free_hash_entry(hash, rec_entry);
3131
3132                         hlist_del_rcu(&entry->node);
3133                         list_add(&entry->free_list, &free_list);
3134                 }
3135         }
3136         __disable_ftrace_function_probe();
3137         /*
3138          * Remove after the disable is called. Otherwise, if the last
3139          * probe is removed, a null hash means *all enabled*.
3140          */
3141         ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3142         synchronize_sched();
3143         list_for_each_entry_safe(entry, p, &free_list, free_list) {
3144                 list_del(&entry->free_list);
3145                 ftrace_free_entry(entry);
3146         }
3147                 
3148  out_unlock:
3149         mutex_unlock(&ftrace_lock);
3150         free_ftrace_hash(hash);
3151 }
3152
3153 void
3154 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3155                                 void *data)
3156 {
3157         __unregister_ftrace_function_probe(glob, ops, data,
3158                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
3159 }
3160
3161 void
3162 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3163 {
3164         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3165 }
3166
3167 void unregister_ftrace_function_probe_all(char *glob)
3168 {
3169         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3170 }
3171
3172 static LIST_HEAD(ftrace_commands);
3173 static DEFINE_MUTEX(ftrace_cmd_mutex);
3174
3175 int register_ftrace_command(struct ftrace_func_command *cmd)
3176 {
3177         struct ftrace_func_command *p;
3178         int ret = 0;
3179
3180         mutex_lock(&ftrace_cmd_mutex);
3181         list_for_each_entry(p, &ftrace_commands, list) {
3182                 if (strcmp(cmd->name, p->name) == 0) {
3183                         ret = -EBUSY;
3184                         goto out_unlock;
3185                 }
3186         }
3187         list_add(&cmd->list, &ftrace_commands);
3188  out_unlock:
3189         mutex_unlock(&ftrace_cmd_mutex);
3190
3191         return ret;
3192 }
3193
3194 int unregister_ftrace_command(struct ftrace_func_command *cmd)
3195 {
3196         struct ftrace_func_command *p, *n;
3197         int ret = -ENODEV;
3198
3199         mutex_lock(&ftrace_cmd_mutex);
3200         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3201                 if (strcmp(cmd->name, p->name) == 0) {
3202                         ret = 0;
3203                         list_del_init(&p->list);
3204                         goto out_unlock;
3205                 }
3206         }
3207  out_unlock:
3208         mutex_unlock(&ftrace_cmd_mutex);
3209
3210         return ret;
3211 }
3212
3213 static int ftrace_process_regex(struct ftrace_hash *hash,
3214                                 char *buff, int len, int enable)
3215 {
3216         char *func, *command, *next = buff;
3217         struct ftrace_func_command *p;
3218         int ret = -EINVAL;
3219
3220         func = strsep(&next, ":");
3221
3222         if (!next) {
3223                 ret = ftrace_match_records(hash, func, len);
3224                 if (!ret)
3225                         ret = -EINVAL;
3226                 if (ret < 0)
3227                         return ret;
3228                 return 0;
3229         }
3230
3231         /* command found */
3232
3233         command = strsep(&next, ":");
3234
3235         mutex_lock(&ftrace_cmd_mutex);
3236         list_for_each_entry(p, &ftrace_commands, list) {
3237                 if (strcmp(p->name, command) == 0) {
3238                         ret = p->func(hash, func, command, next, enable);
3239                         goto out_unlock;
3240                 }
3241         }
3242  out_unlock:
3243         mutex_unlock(&ftrace_cmd_mutex);
3244
3245         return ret;
3246 }
3247
3248 static ssize_t
3249 ftrace_regex_write(struct file *file, const char __user *ubuf,
3250                    size_t cnt, loff_t *ppos, int enable)
3251 {
3252         struct ftrace_iterator *iter;
3253         struct trace_parser *parser;
3254         ssize_t ret, read;
3255
3256         if (!cnt)
3257                 return 0;
3258
3259         mutex_lock(&ftrace_regex_lock);
3260
3261         ret = -ENODEV;
3262         if (unlikely(ftrace_disabled))
3263                 goto out_unlock;
3264
3265         if (file->f_mode & FMODE_READ) {
3266                 struct seq_file *m = file->private_data;
3267                 iter = m->private;
3268         } else
3269                 iter = file->private_data;
3270
3271         parser = &iter->parser;
3272         read = trace_get_user(parser, ubuf, cnt, ppos);
3273
3274         if (read >= 0 && trace_parser_loaded(parser) &&
3275             !trace_parser_cont(parser)) {
3276                 ret = ftrace_process_regex(iter->hash, parser->buffer,
3277                                            parser->idx, enable);
3278                 trace_parser_clear(parser);
3279                 if (ret)
3280                         goto out_unlock;
3281         }
3282
3283         ret = read;
3284 out_unlock:
3285         mutex_unlock(&ftrace_regex_lock);
3286
3287         return ret;
3288 }
3289
3290 ssize_t
3291 ftrace_filter_write(struct file *file, const char __user *ubuf,
3292                     size_t cnt, loff_t *ppos)
3293 {
3294         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3295 }
3296
3297 ssize_t
3298 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3299                      size_t cnt, loff_t *ppos)
3300 {
3301         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3302 }
3303
3304 static int
3305 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3306 {
3307         struct ftrace_func_entry *entry;
3308
3309         if (!ftrace_location(ip))
3310                 return -EINVAL;
3311
3312         if (remove) {
3313                 entry = ftrace_lookup_ip(hash, ip);
3314                 if (!entry)
3315                         return -ENOENT;
3316                 free_hash_entry(hash, entry);
3317                 return 0;
3318         }
3319
3320         return add_hash_entry(hash, ip);
3321 }
3322
3323 static int
3324 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3325                 unsigned long ip, int remove, int reset, int enable)
3326 {
3327         struct ftrace_hash **orig_hash;
3328         struct ftrace_hash *hash;
3329         int ret;
3330
3331         /* All global ops uses the global ops filters */
3332         if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3333                 ops = &global_ops;
3334
3335         if (unlikely(ftrace_disabled))
3336                 return -ENODEV;
3337
3338         if (enable)
3339                 orig_hash = &ops->filter_hash;
3340         else
3341                 orig_hash = &ops->notrace_hash;
3342
3343         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3344         if (!hash)
3345                 return -ENOMEM;
3346
3347         mutex_lock(&ftrace_regex_lock);
3348         if (reset)
3349                 ftrace_filter_reset(hash);
3350         if (buf && !ftrace_match_records(hash, buf, len)) {
3351                 ret = -EINVAL;
3352                 goto out_regex_unlock;
3353         }
3354         if (ip) {
3355                 ret = ftrace_match_addr(hash, ip, remove);
3356                 if (ret < 0)
3357                         goto out_regex_unlock;
3358         }
3359
3360         mutex_lock(&ftrace_lock);
3361         ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3362         if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
3363             && ftrace_enabled)
3364                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3365
3366         mutex_unlock(&ftrace_lock);
3367
3368  out_regex_unlock:
3369         mutex_unlock(&ftrace_regex_lock);
3370
3371         free_ftrace_hash(hash);
3372         return ret;
3373 }
3374
3375 static int
3376 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3377                 int reset, int enable)
3378 {
3379         return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3380 }
3381
3382 /**
3383  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3384  * @ops - the ops to set the filter with
3385  * @ip - the address to add to or remove from the filter.
3386  * @remove - non zero to remove the ip from the filter
3387  * @reset - non zero to reset all filters before applying this filter.
3388  *
3389  * Filters denote which functions should be enabled when tracing is enabled
3390  * If @ip is NULL, it failes to update filter.
3391  */
3392 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3393                          int remove, int reset)
3394 {
3395         return ftrace_set_addr(ops, ip, remove, reset, 1);
3396 }
3397 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3398
3399 static int
3400 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3401                  int reset, int enable)
3402 {
3403         return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3404 }
3405
3406 /**
3407  * ftrace_set_filter - set a function to filter on in ftrace
3408  * @ops - the ops to set the filter with
3409  * @buf - the string that holds the function filter text.
3410  * @len - the length of the string.
3411  * @reset - non zero to reset all filters before applying this filter.
3412  *
3413  * Filters denote which functions should be enabled when tracing is enabled.
3414  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3415  */
3416 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3417                        int len, int reset)
3418 {
3419         return ftrace_set_regex(ops, buf, len, reset, 1);
3420 }
3421 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3422
3423 /**
3424  * ftrace_set_notrace - set a function to not trace in ftrace
3425  * @ops - the ops to set the notrace filter with
3426  * @buf - the string that holds the function notrace text.
3427  * @len - the length of the string.
3428  * @reset - non zero to reset all filters before applying this filter.
3429  *
3430  * Notrace Filters denote which functions should not be enabled when tracing
3431  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3432  * for tracing.
3433  */
3434 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3435                         int len, int reset)
3436 {
3437         return ftrace_set_regex(ops, buf, len, reset, 0);
3438 }
3439 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3440 /**
3441  * ftrace_set_filter - set a function to filter on in ftrace
3442  * @ops - the ops to set the filter with
3443  * @buf - the string that holds the function filter text.
3444  * @len - the length of the string.
3445  * @reset - non zero to reset all filters before applying this filter.
3446  *
3447  * Filters denote which functions should be enabled when tracing is enabled.
3448  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3449  */
3450 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3451 {
3452         ftrace_set_regex(&global_ops, buf, len, reset, 1);
3453 }
3454 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3455
3456 /**
3457  * ftrace_set_notrace - set a function to not trace in ftrace
3458  * @ops - the ops to set the notrace filter with
3459  * @buf - the string that holds the function notrace text.
3460  * @len - the length of the string.
3461  * @reset - non zero to reset all filters before applying this filter.
3462  *
3463  * Notrace Filters denote which functions should not be enabled when tracing
3464  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3465  * for tracing.
3466  */
3467 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3468 {
3469         ftrace_set_regex(&global_ops, buf, len, reset, 0);
3470 }
3471 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3472
3473 /*
3474  * command line interface to allow users to set filters on boot up.
3475  */
3476 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
3477 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3478 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3479
3480 static int __init set_ftrace_notrace(char *str)
3481 {
3482         strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3483         return 1;
3484 }
3485 __setup("ftrace_notrace=", set_ftrace_notrace);
3486
3487 static int __init set_ftrace_filter(char *str)
3488 {
3489         strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3490         return 1;
3491 }
3492 __setup("ftrace_filter=", set_ftrace_filter);
3493
3494 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3495 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3496 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3497
3498 static int __init set_graph_function(char *str)
3499 {
3500         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3501         return 1;
3502 }
3503 __setup("ftrace_graph_filter=", set_graph_function);
3504
3505 static void __init set_ftrace_early_graph(char *buf)
3506 {
3507         int ret;
3508         char *func;
3509
3510         while (buf) {
3511                 func = strsep(&buf, ",");
3512                 /* we allow only one expression at a time */
3513                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3514                                       func);
3515                 if (ret)
3516                         printk(KERN_DEBUG "ftrace: function %s not "
3517                                           "traceable\n", func);
3518         }
3519 }
3520 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3521
3522 void __init
3523 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3524 {
3525         char *func;
3526
3527         while (buf) {
3528                 func = strsep(&buf, ",");
3529                 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3530         }
3531 }
3532
3533 static void __init set_ftrace_early_filters(void)
3534 {
3535         if (ftrace_filter_buf[0])
3536                 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
3537         if (ftrace_notrace_buf[0])
3538                 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
3539 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3540         if (ftrace_graph_buf[0])
3541                 set_ftrace_early_graph(ftrace_graph_buf);
3542 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3543 }
3544
3545 int ftrace_regex_release(struct inode *inode, struct file *file)
3546 {
3547         struct seq_file *m = (struct seq_file *)file->private_data;
3548         struct ftrace_iterator *iter;
3549         struct ftrace_hash **orig_hash;
3550         struct trace_parser *parser;
3551         int filter_hash;
3552         int ret;
3553
3554         mutex_lock(&ftrace_regex_lock);
3555         if (file->f_mode & FMODE_READ) {
3556                 iter = m->private;
3557
3558                 seq_release(inode, file);
3559         } else
3560                 iter = file->private_data;
3561
3562         parser = &iter->parser;
3563         if (trace_parser_loaded(parser)) {
3564                 parser->buffer[parser->idx] = 0;
3565                 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3566         }
3567
3568         trace_parser_put(parser);
3569
3570         if (file->f_mode & FMODE_WRITE) {
3571                 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3572
3573                 if (filter_hash)
3574                         orig_hash = &iter->ops->filter_hash;
3575                 else
3576                         orig_hash = &iter->ops->notrace_hash;
3577
3578                 mutex_lock(&ftrace_lock);
3579                 ret = ftrace_hash_move(iter->ops, filter_hash,
3580                                        orig_hash, iter->hash);
3581                 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3582                     && ftrace_enabled)
3583                         ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3584
3585                 mutex_unlock(&ftrace_lock);
3586         }
3587         free_ftrace_hash(iter->hash);
3588         kfree(iter);
3589
3590         mutex_unlock(&ftrace_regex_lock);
3591         return 0;
3592 }
3593
3594 static const struct file_operations ftrace_avail_fops = {
3595         .open = ftrace_avail_open,
3596         .read = seq_read,
3597         .llseek = seq_lseek,
3598         .release = seq_release_private,
3599 };
3600
3601 static const struct file_operations ftrace_enabled_fops = {
3602         .open = ftrace_enabled_open,
3603         .read = seq_read,
3604         .llseek = seq_lseek,
3605         .release = seq_release_private,
3606 };
3607
3608 static const struct file_operations ftrace_filter_fops = {
3609         .open = ftrace_filter_open,
3610         .read = seq_read,
3611         .write = ftrace_filter_write,
3612         .llseek = ftrace_filter_lseek,
3613         .release = ftrace_regex_release,
3614 };
3615
3616 static const struct file_operations ftrace_notrace_fops = {
3617         .open = ftrace_notrace_open,
3618         .read = seq_read,
3619         .write = ftrace_notrace_write,
3620         .llseek = ftrace_filter_lseek,
3621         .release = ftrace_regex_release,
3622 };
3623
3624 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3625
3626 static DEFINE_MUTEX(graph_lock);
3627
3628 int ftrace_graph_count;
3629 int ftrace_graph_filter_enabled;
3630 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3631
3632 static void *
3633 __g_next(struct seq_file *m, loff_t *pos)
3634 {
3635         if (*pos >= ftrace_graph_count)
3636                 return NULL;
3637         return &ftrace_graph_funcs[*pos];
3638 }
3639
3640 static void *
3641 g_next(struct seq_file *m, void *v, loff_t *pos)
3642 {
3643         (*pos)++;
3644         return __g_next(m, pos);
3645 }
3646
3647 static void *g_start(struct seq_file *m, loff_t *pos)
3648 {
3649         mutex_lock(&graph_lock);
3650
3651         /* Nothing, tell g_show to print all functions are enabled */
3652         if (!ftrace_graph_filter_enabled && !*pos)
3653                 return (void *)1;
3654
3655         return __g_next(m, pos);
3656 }
3657
3658 static void g_stop(struct seq_file *m, void *p)
3659 {
3660         mutex_unlock(&graph_lock);
3661 }
3662
3663 static int g_show(struct seq_file *m, void *v)
3664 {
3665         unsigned long *ptr = v;
3666
3667         if (!ptr)
3668                 return 0;
3669
3670         if (ptr == (unsigned long *)1) {
3671                 seq_printf(m, "#### all functions enabled ####\n");
3672                 return 0;
3673         }
3674
3675         seq_printf(m, "%ps\n", (void *)*ptr);
3676
3677         return 0;
3678 }
3679
3680 static const struct seq_operations ftrace_graph_seq_ops = {
3681         .start = g_start,
3682         .next = g_next,
3683         .stop = g_stop,
3684         .show = g_show,
3685 };
3686
3687 static int
3688 ftrace_graph_open(struct inode *inode, struct file *file)
3689 {
3690         int ret = 0;
3691
3692         if (unlikely(ftrace_disabled))
3693                 return -ENODEV;
3694
3695         mutex_lock(&graph_lock);
3696         if ((file->f_mode & FMODE_WRITE) &&
3697             (file->f_flags & O_TRUNC)) {
3698                 ftrace_graph_filter_enabled = 0;
3699                 ftrace_graph_count = 0;
3700                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3701         }
3702         mutex_unlock(&graph_lock);
3703
3704         if (file->f_mode & FMODE_READ)
3705                 ret = seq_open(file, &ftrace_graph_seq_ops);
3706
3707         return ret;
3708 }
3709
3710 static int
3711 ftrace_graph_release(struct inode *inode, struct file *file)
3712 {
3713         if (file->f_mode & FMODE_READ)
3714                 seq_release(inode, file);
3715         return 0;
3716 }
3717
3718 static int
3719 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3720 {
3721         struct dyn_ftrace *rec;
3722         struct ftrace_page *pg;
3723         int search_len;
3724         int fail = 1;
3725         int type, not;
3726         char *search;
3727         bool exists;
3728         int i;
3729
3730         /* decode regex */
3731         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3732         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3733                 return -EBUSY;
3734
3735         search_len = strlen(search);
3736
3737         mutex_lock(&ftrace_lock);
3738
3739         if (unlikely(ftrace_disabled)) {
3740                 mutex_unlock(&ftrace_lock);
3741                 return -ENODEV;
3742         }
3743
3744         do_for_each_ftrace_rec(pg, rec) {
3745
3746                 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3747                         /* if it is in the array */
3748                         exists = false;
3749                         for (i = 0; i < *idx; i++) {
3750                                 if (array[i] == rec->ip) {
3751                                         exists = true;
3752                                         break;
3753                                 }
3754                         }
3755
3756                         if (!not) {
3757                                 fail = 0;
3758                                 if (!exists) {
3759                                         array[(*idx)++] = rec->ip;
3760                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3761                                                 goto out;
3762                                 }
3763                         } else {
3764                                 if (exists) {
3765                                         array[i] = array[--(*idx)];
3766                                         array[*idx] = 0;
3767                                         fail = 0;
3768                                 }
3769                         }
3770                 }
3771         } while_for_each_ftrace_rec();
3772 out:
3773         mutex_unlock(&ftrace_lock);
3774
3775         if (fail)
3776                 return -EINVAL;
3777
3778         ftrace_graph_filter_enabled = !!(*idx);
3779
3780         return 0;
3781 }
3782
3783 static ssize_t
3784 ftrace_graph_write(struct file *file, const char __user *ubuf,
3785                    size_t cnt, loff_t *ppos)
3786 {
3787         struct trace_parser parser;
3788         ssize_t read, ret;
3789
3790         if (!cnt)
3791                 return 0;
3792
3793         mutex_lock(&graph_lock);
3794
3795         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3796                 ret = -ENOMEM;
3797                 goto out_unlock;
3798         }
3799
3800         read = trace_get_user(&parser, ubuf, cnt, ppos);
3801
3802         if (read >= 0 && trace_parser_loaded((&parser))) {
3803                 parser.buffer[parser.idx] = 0;
3804
3805                 /* we allow only one expression at a time */
3806                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3807                                         parser.buffer);
3808                 if (ret)
3809                         goto out_free;
3810         }
3811
3812         ret = read;
3813
3814 out_free:
3815         trace_parser_put(&parser);
3816 out_unlock:
3817         mutex_unlock(&graph_lock);
3818
3819         return ret;
3820 }
3821
3822 static const struct file_operations ftrace_graph_fops = {
3823         .open           = ftrace_graph_open,
3824         .read           = seq_read,
3825         .write          = ftrace_graph_write,
3826         .llseek         = ftrace_filter_lseek,
3827         .release        = ftrace_graph_release,
3828 };
3829 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3830
3831 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3832 {
3833
3834         trace_create_file("available_filter_functions", 0444,
3835                         d_tracer, NULL, &ftrace_avail_fops);
3836
3837         trace_create_file("enabled_functions", 0444,
3838                         d_tracer, NULL, &ftrace_enabled_fops);
3839
3840         trace_create_file("set_ftrace_filter", 0644, d_tracer,
3841                         NULL, &ftrace_filter_fops);
3842
3843         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3844                                     NULL, &ftrace_notrace_fops);
3845
3846 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3847         trace_create_file("set_graph_function", 0444, d_tracer,
3848                                     NULL,
3849                                     &ftrace_graph_fops);
3850 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3851
3852         return 0;
3853 }
3854
3855 static int ftrace_cmp_ips(const void *a, const void *b)
3856 {
3857         const unsigned long *ipa = a;
3858         const unsigned long *ipb = b;
3859
3860         if (*ipa > *ipb)
3861                 return 1;
3862         if (*ipa < *ipb)
3863                 return -1;
3864         return 0;
3865 }
3866
3867 static void ftrace_swap_ips(void *a, void *b, int size)
3868 {
3869         unsigned long *ipa = a;
3870         unsigned long *ipb = b;
3871         unsigned long t;
3872
3873         t = *ipa;
3874         *ipa = *ipb;
3875         *ipb = t;
3876 }
3877
3878 static int ftrace_process_locs(struct module *mod,
3879                                unsigned long *start,
3880                                unsigned long *end)
3881 {
3882         struct ftrace_page *start_pg;
3883         struct ftrace_page *pg;
3884         struct dyn_ftrace *rec;
3885         unsigned long count;
3886         unsigned long *p;
3887         unsigned long addr;
3888         unsigned long flags = 0; /* Shut up gcc */
3889         int ret = -ENOMEM;
3890
3891         count = end - start;
3892
3893         if (!count)
3894                 return 0;
3895
3896         sort(start, count, sizeof(*start),
3897              ftrace_cmp_ips, ftrace_swap_ips);
3898
3899         start_pg = ftrace_allocate_pages(count);
3900         if (!start_pg)
3901                 return -ENOMEM;
3902
3903         mutex_lock(&ftrace_lock);
3904
3905         /*
3906          * Core and each module needs their own pages, as
3907          * modules will free them when they are removed.
3908          * Force a new page to be allocated for modules.
3909          */
3910         if (!mod) {
3911                 WARN_ON(ftrace_pages || ftrace_pages_start);
3912                 /* First initialization */
3913                 ftrace_pages = ftrace_pages_start = start_pg;
3914         } else {
3915                 if (!ftrace_pages)
3916                         goto out;
3917
3918                 if (WARN_ON(ftrace_pages->next)) {
3919                         /* Hmm, we have free pages? */
3920                         while (ftrace_pages->next)
3921                                 ftrace_pages = ftrace_pages->next;
3922                 }
3923
3924                 ftrace_pages->next = start_pg;
3925         }
3926
3927         p = start;
3928         pg = start_pg;
3929         while (p < end) {
3930                 addr = ftrace_call_adjust(*p++);
3931                 /*
3932                  * Some architecture linkers will pad between
3933                  * the different mcount_loc sections of different
3934                  * object files to satisfy alignments.
3935                  * Skip any NULL pointers.
3936                  */
3937                 if (!addr)
3938                         continue;
3939
3940                 if (pg->index == pg->size) {
3941                         /* We should have allocated enough */
3942                         if (WARN_ON(!pg->next))
3943                                 break;
3944                         pg = pg->next;
3945                 }
3946
3947                 rec = &pg->records[pg->index++];
3948                 rec->ip = addr;
3949         }
3950
3951         /* We should have used all pages */
3952         WARN_ON(pg->next);
3953
3954         /* Assign the last page to ftrace_pages */
3955         ftrace_pages = pg;
3956
3957         /* These new locations need to be initialized */
3958         ftrace_new_pgs = start_pg;
3959
3960         /*
3961          * We only need to disable interrupts on start up
3962          * because we are modifying code that an interrupt
3963          * may execute, and the modification is not atomic.
3964          * But for modules, nothing runs the code we modify
3965          * until we are finished with it, and there's no
3966          * reason to cause large interrupt latencies while we do it.
3967          */
3968         if (!mod)
3969                 local_irq_save(flags);
3970         ftrace_update_code(mod);
3971         if (!mod)
3972                 local_irq_restore(flags);
3973         ret = 0;
3974  out:
3975         mutex_unlock(&ftrace_lock);
3976
3977         return ret;
3978 }
3979
3980 #ifdef CONFIG_MODULES
3981
3982 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
3983
3984 void ftrace_release_mod(struct module *mod)
3985 {
3986         struct dyn_ftrace *rec;
3987         struct ftrace_page **last_pg;
3988         struct ftrace_page *pg;
3989         int order;
3990
3991         mutex_lock(&ftrace_lock);
3992
3993         if (ftrace_disabled)
3994                 goto out_unlock;
3995
3996         /*
3997          * Each module has its own ftrace_pages, remove
3998          * them from the list.
3999          */
4000         last_pg = &ftrace_pages_start;
4001         for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4002                 rec = &pg->records[0];
4003                 if (within_module_core(rec->ip, mod)) {
4004                         /*
4005                          * As core pages are first, the first
4006                          * page should never be a module page.
4007                          */
4008                         if (WARN_ON(pg == ftrace_pages_start))
4009                                 goto out_unlock;
4010
4011                         /* Check if we are deleting the last page */
4012                         if (pg == ftrace_pages)
4013                                 ftrace_pages = next_to_ftrace_page(last_pg);
4014
4015                         *last_pg = pg->next;
4016                         order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4017                         free_pages((unsigned long)pg->records, order);
4018                         kfree(pg);
4019                 } else
4020                         last_pg = &pg->next;
4021         }
4022  out_unlock:
4023         mutex_unlock(&ftrace_lock);
4024 }
4025
4026 static void ftrace_init_module(struct module *mod,
4027                                unsigned long *start, unsigned long *end)
4028 {
4029         if (ftrace_disabled || start == end)
4030                 return;
4031         ftrace_process_locs(mod, start, end);
4032 }
4033
4034 static int ftrace_module_notify_enter(struct notifier_block *self,
4035                                       unsigned long val, void *data)
4036 {
4037         struct module *mod = data;
4038
4039         if (val == MODULE_STATE_COMING)
4040                 ftrace_init_module(mod, mod->ftrace_callsites,
4041                                    mod->ftrace_callsites +
4042                                    mod->num_ftrace_callsites);
4043         return 0;
4044 }
4045
4046 static int ftrace_module_notify_exit(struct notifier_block *self,
4047                                      unsigned long val, void *data)
4048 {
4049         struct module *mod = data;
4050
4051         if (val == MODULE_STATE_GOING)
4052                 ftrace_release_mod(mod);
4053
4054         return 0;
4055 }
4056 #else
4057 static int ftrace_module_notify_enter(struct notifier_block *self,
4058                                       unsigned long val, void *data)
4059 {
4060         return 0;
4061 }
4062 static int ftrace_module_notify_exit(struct notifier_block *self,
4063                                      unsigned long val, void *data)
4064 {
4065         return 0;
4066 }
4067 #endif /* CONFIG_MODULES */
4068
4069 struct notifier_block ftrace_module_enter_nb = {
4070         .notifier_call = ftrace_module_notify_enter,
4071         .priority = INT_MAX,    /* Run before anything that can use kprobes */
4072 };
4073
4074 struct notifier_block ftrace_module_exit_nb = {
4075         .notifier_call = ftrace_module_notify_exit,
4076         .priority = INT_MIN,    /* Run after anything that can remove kprobes */
4077 };
4078
4079 extern unsigned long __start_mcount_loc[];
4080 extern unsigned long __stop_mcount_loc[];
4081
4082 void __init ftrace_init(void)
4083 {
4084         unsigned long count, addr, flags;
4085         int ret;
4086
4087         /* Keep the ftrace pointer to the stub */
4088         addr = (unsigned long)ftrace_stub;
4089
4090         local_irq_save(flags);
4091         ftrace_dyn_arch_init(&addr);
4092         local_irq_restore(flags);
4093
4094         /* ftrace_dyn_arch_init places the return code in addr */
4095         if (addr)
4096                 goto failed;
4097
4098         count = __stop_mcount_loc - __start_mcount_loc;
4099
4100         ret = ftrace_dyn_table_alloc(count);
4101         if (ret)
4102                 goto failed;
4103
4104         last_ftrace_enabled = ftrace_enabled = 1;
4105
4106         ret = ftrace_process_locs(NULL,
4107                                   __start_mcount_loc,
4108                                   __stop_mcount_loc);
4109
4110         ret = register_module_notifier(&ftrace_module_enter_nb);
4111         if (ret)
4112                 pr_warning("Failed to register trace ftrace module enter notifier\n");
4113
4114         ret = register_module_notifier(&ftrace_module_exit_nb);
4115         if (ret)
4116                 pr_warning("Failed to register trace ftrace module exit notifier\n");
4117
4118         set_ftrace_early_filters();
4119
4120         return;
4121  failed:
4122         ftrace_disabled = 1;
4123 }
4124
4125 #else
4126
4127 static struct ftrace_ops global_ops = {
4128         .func                   = ftrace_stub,
4129         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
4130 };
4131
4132 static int __init ftrace_nodyn_init(void)
4133 {
4134         ftrace_enabled = 1;
4135         return 0;
4136 }
4137 core_initcall(ftrace_nodyn_init);
4138
4139 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4140 static inline void ftrace_startup_enable(int command) { }
4141 /* Keep as macros so we do not need to define the commands */
4142 # define ftrace_startup(ops, command)                   \
4143         ({                                              \
4144                 (ops)->flags |= FTRACE_OPS_FL_ENABLED;  \
4145                 0;                                      \
4146         })
4147 # define ftrace_shutdown(ops, command)  do { } while (0)
4148 # define ftrace_startup_sysctl()        do { } while (0)
4149 # define ftrace_shutdown_sysctl()       do { } while (0)
4150
4151 static inline int
4152 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
4153 {
4154         return 1;
4155 }
4156
4157 #endif /* CONFIG_DYNAMIC_FTRACE */
4158
4159 static void
4160 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4161                         struct ftrace_ops *op, struct pt_regs *regs)
4162 {
4163         if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4164                 return;
4165
4166         /*
4167          * Some of the ops may be dynamically allocated,
4168          * they must be freed after a synchronize_sched().
4169          */
4170         preempt_disable_notrace();
4171         trace_recursion_set(TRACE_CONTROL_BIT);
4172         do_for_each_ftrace_op(op, ftrace_control_list) {
4173                 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4174                     !ftrace_function_local_disabled(op) &&
4175                     ftrace_ops_test(op, ip))
4176                         op->func(ip, parent_ip, op, regs);
4177         } while_for_each_ftrace_op(op);
4178         trace_recursion_clear(TRACE_CONTROL_BIT);
4179         preempt_enable_notrace();
4180 }
4181
4182 static struct ftrace_ops control_ops = {
4183         .func = ftrace_ops_control_func,
4184         .flags = FTRACE_OPS_FL_RECURSION_SAFE,
4185 };
4186
4187 static inline void
4188 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4189                        struct ftrace_ops *ignored, struct pt_regs *regs)
4190 {
4191         struct ftrace_ops *op;
4192         int bit;
4193
4194         if (function_trace_stop)
4195                 return;
4196
4197         bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4198         if (bit < 0)
4199                 return;
4200
4201         /*
4202          * Some of the ops may be dynamically allocated,
4203          * they must be freed after a synchronize_sched().
4204          */
4205         preempt_disable_notrace();
4206         do_for_each_ftrace_op(op, ftrace_ops_list) {
4207                 if (ftrace_ops_test(op, ip))
4208                         op->func(ip, parent_ip, op, regs);
4209         } while_for_each_ftrace_op(op);
4210         preempt_enable_notrace();
4211         trace_clear_recursion(bit);
4212 }
4213
4214 /*
4215  * Some archs only support passing ip and parent_ip. Even though
4216  * the list function ignores the op parameter, we do not want any
4217  * C side effects, where a function is called without the caller
4218  * sending a third parameter.
4219  * Archs are to support both the regs and ftrace_ops at the same time.
4220  * If they support ftrace_ops, it is assumed they support regs.
4221  * If call backs want to use regs, they must either check for regs
4222  * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4223  * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4224  * An architecture can pass partial regs with ftrace_ops and still
4225  * set the ARCH_SUPPORT_FTARCE_OPS.
4226  */
4227 #if ARCH_SUPPORTS_FTRACE_OPS
4228 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4229                                  struct ftrace_ops *op, struct pt_regs *regs)
4230 {
4231         __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4232 }
4233 #else
4234 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4235 {
4236         __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4237 }
4238 #endif
4239
4240 static void clear_ftrace_swapper(void)
4241 {
4242         struct task_struct *p;
4243         int cpu;
4244
4245         get_online_cpus();
4246         for_each_online_cpu(cpu) {
4247                 p = idle_task(cpu);
4248                 clear_tsk_trace_trace(p);
4249         }
4250         put_online_cpus();
4251 }
4252
4253 static void set_ftrace_swapper(void)
4254 {
4255         struct task_struct *p;
4256         int cpu;
4257
4258         get_online_cpus();
4259         for_each_online_cpu(cpu) {
4260                 p = idle_task(cpu);
4261                 set_tsk_trace_trace(p);
4262         }
4263         put_online_cpus();
4264 }
4265
4266 static void clear_ftrace_pid(struct pid *pid)
4267 {
4268         struct task_struct *p;
4269
4270         rcu_read_lock();
4271         do_each_pid_task(pid, PIDTYPE_PID, p) {
4272                 clear_tsk_trace_trace(p);
4273         } while_each_pid_task(pid, PIDTYPE_PID, p);
4274         rcu_read_unlock();
4275
4276         put_pid(pid);
4277 }
4278
4279 static void set_ftrace_pid(struct pid *pid)
4280 {
4281         struct task_struct *p;
4282
4283         rcu_read_lock();
4284         do_each_pid_task(pid, PIDTYPE_PID, p) {
4285                 set_tsk_trace_trace(p);
4286         } while_each_pid_task(pid, PIDTYPE_PID, p);
4287         rcu_read_unlock();
4288 }
4289
4290 static void clear_ftrace_pid_task(struct pid *pid)
4291 {
4292         if (pid == ftrace_swapper_pid)
4293                 clear_ftrace_swapper();
4294         else
4295                 clear_ftrace_pid(pid);
4296 }
4297
4298 static void set_ftrace_pid_task(struct pid *pid)
4299 {
4300         if (pid == ftrace_swapper_pid)
4301                 set_ftrace_swapper();
4302         else
4303                 set_ftrace_pid(pid);
4304 }
4305
4306 static int ftrace_pid_add(int p)
4307 {
4308         struct pid *pid;
4309         struct ftrace_pid *fpid;
4310         int ret = -EINVAL;
4311
4312         mutex_lock(&ftrace_lock);
4313
4314         if (!p)
4315                 pid = ftrace_swapper_pid;
4316         else
4317                 pid = find_get_pid(p);
4318
4319         if (!pid)
4320                 goto out;
4321
4322         ret = 0;
4323
4324         list_for_each_entry(fpid, &ftrace_pids, list)
4325                 if (fpid->pid == pid)
4326                         goto out_put;
4327
4328         ret = -ENOMEM;
4329
4330         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4331         if (!fpid)
4332                 goto out_put;
4333
4334         list_add(&fpid->list, &ftrace_pids);
4335         fpid->pid = pid;
4336
4337         set_ftrace_pid_task(pid);
4338
4339         ftrace_update_pid_func();
4340         ftrace_startup_enable(0);
4341
4342         mutex_unlock(&ftrace_lock);
4343         return 0;
4344
4345 out_put:
4346         if (pid != ftrace_swapper_pid)
4347                 put_pid(pid);
4348
4349 out:
4350         mutex_unlock(&ftrace_lock);
4351         return ret;
4352 }
4353
4354 static void ftrace_pid_reset(void)
4355 {
4356         struct ftrace_pid *fpid, *safe;
4357
4358         mutex_lock(&ftrace_lock);
4359         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4360                 struct pid *pid = fpid->pid;
4361
4362                 clear_ftrace_pid_task(pid);
4363
4364                 list_del(&fpid->list);
4365                 kfree(fpid);
4366         }
4367
4368         ftrace_update_pid_func();
4369         ftrace_startup_enable(0);
4370
4371         mutex_unlock(&ftrace_lock);
4372 }
4373
4374 static void *fpid_start(struct seq_file *m, loff_t *pos)
4375 {
4376         mutex_lock(&ftrace_lock);
4377
4378         if (list_empty(&ftrace_pids) && (!*pos))
4379                 return (void *) 1;
4380
4381         return seq_list_start(&ftrace_pids, *pos);
4382 }
4383
4384 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4385 {
4386         if (v == (void *)1)
4387                 return NULL;
4388
4389         return seq_list_next(v, &ftrace_pids, pos);
4390 }
4391
4392 static void fpid_stop(struct seq_file *m, void *p)
4393 {
4394         mutex_unlock(&ftrace_lock);
4395 }
4396
4397 static int fpid_show(struct seq_file *m, void *v)
4398 {
4399         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4400
4401         if (v == (void *)1) {
4402                 seq_printf(m, "no pid\n");
4403                 return 0;
4404         }
4405
4406         if (fpid->pid == ftrace_swapper_pid)
4407                 seq_printf(m, "swapper tasks\n");
4408         else
4409                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4410
4411         return 0;
4412 }
4413
4414 static const struct seq_operations ftrace_pid_sops = {
4415         .start = fpid_start,
4416         .next = fpid_next,
4417         .stop = fpid_stop,
4418         .show = fpid_show,
4419 };
4420
4421 static int
4422 ftrace_pid_open(struct inode *inode, struct file *file)
4423 {
4424         int ret = 0;
4425
4426         if ((file->f_mode & FMODE_WRITE) &&
4427             (file->f_flags & O_TRUNC))
4428                 ftrace_pid_reset();
4429
4430         if (file->f_mode & FMODE_READ)
4431                 ret = seq_open(file, &ftrace_pid_sops);
4432
4433         return ret;
4434 }
4435
4436 static ssize_t
4437 ftrace_pid_write(struct file *filp, const char __user *ubuf,
4438                    size_t cnt, loff_t *ppos)
4439 {
4440         char buf[64], *tmp;
4441         long val;
4442         int ret;
4443
4444         if (cnt >= sizeof(buf))
4445                 return -EINVAL;
4446
4447         if (copy_from_user(&buf, ubuf, cnt))
4448                 return -EFAULT;
4449
4450         buf[cnt] = 0;
4451
4452         /*
4453          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4454          * to clean the filter quietly.
4455          */
4456         tmp = strstrip(buf);
4457         if (strlen(tmp) == 0)
4458                 return 1;
4459
4460         ret = kstrtol(tmp, 10, &val);
4461         if (ret < 0)
4462                 return ret;
4463
4464         ret = ftrace_pid_add(val);
4465
4466         return ret ? ret : cnt;
4467 }
4468
4469 static int
4470 ftrace_pid_release(struct inode *inode, struct file *file)
4471 {
4472         if (file->f_mode & FMODE_READ)
4473                 seq_release(inode, file);
4474
4475         return 0;
4476 }
4477
4478 static const struct file_operations ftrace_pid_fops = {
4479         .open           = ftrace_pid_open,
4480         .write          = ftrace_pid_write,
4481         .read           = seq_read,
4482         .llseek         = ftrace_filter_lseek,
4483         .release        = ftrace_pid_release,
4484 };
4485
4486 static __init int ftrace_init_debugfs(void)
4487 {
4488         struct dentry *d_tracer;
4489
4490         d_tracer = tracing_init_dentry();
4491         if (!d_tracer)
4492                 return 0;
4493
4494         ftrace_init_dyn_debugfs(d_tracer);
4495
4496         trace_create_file("set_ftrace_pid", 0644, d_tracer,
4497                             NULL, &ftrace_pid_fops);
4498
4499         ftrace_profile_debugfs(d_tracer);
4500
4501         return 0;
4502 }
4503 fs_initcall(ftrace_init_debugfs);
4504
4505 /**
4506  * ftrace_kill - kill ftrace
4507  *
4508  * This function should be used by panic code. It stops ftrace
4509  * but in a not so nice way. If you need to simply kill ftrace
4510  * from a non-atomic section, use ftrace_kill.
4511  */
4512 void ftrace_kill(void)
4513 {
4514         ftrace_disabled = 1;
4515         ftrace_enabled = 0;
4516         clear_ftrace_function();
4517 }
4518
4519 /**
4520  * Test if ftrace is dead or not.
4521  */
4522 int ftrace_is_dead(void)
4523 {
4524         return ftrace_disabled;
4525 }
4526
4527 /**
4528  * register_ftrace_function - register a function for profiling
4529  * @ops - ops structure that holds the function for profiling.
4530  *
4531  * Register a function to be called by all functions in the
4532  * kernel.
4533  *
4534  * Note: @ops->func and all the functions it calls must be labeled
4535  *       with "notrace", otherwise it will go into a
4536  *       recursive loop.
4537  */
4538 int register_ftrace_function(struct ftrace_ops *ops)
4539 {
4540         int ret = -1;
4541
4542         mutex_lock(&ftrace_lock);
4543
4544         ret = __register_ftrace_function(ops);
4545         if (!ret)
4546                 ret = ftrace_startup(ops, 0);
4547
4548         mutex_unlock(&ftrace_lock);
4549
4550         return ret;
4551 }
4552 EXPORT_SYMBOL_GPL(register_ftrace_function);
4553
4554 /**
4555  * unregister_ftrace_function - unregister a function for profiling.
4556  * @ops - ops structure that holds the function to unregister
4557  *
4558  * Unregister a function that was added to be called by ftrace profiling.
4559  */
4560 int unregister_ftrace_function(struct ftrace_ops *ops)
4561 {
4562         int ret;
4563
4564         mutex_lock(&ftrace_lock);
4565         ret = __unregister_ftrace_function(ops);
4566         if (!ret)
4567                 ftrace_shutdown(ops, 0);
4568         mutex_unlock(&ftrace_lock);
4569
4570         return ret;
4571 }
4572 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4573
4574 int
4575 ftrace_enable_sysctl(struct ctl_table *table, int write,
4576                      void __user *buffer, size_t *lenp,
4577                      loff_t *ppos)
4578 {
4579         int ret = -ENODEV;
4580
4581         mutex_lock(&ftrace_lock);
4582
4583         if (unlikely(ftrace_disabled))
4584                 goto out;
4585
4586         ret = proc_dointvec(table, write, buffer, lenp, ppos);
4587
4588         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4589                 goto out;
4590
4591         last_ftrace_enabled = !!ftrace_enabled;
4592
4593         if (ftrace_enabled) {
4594
4595                 ftrace_startup_sysctl();
4596
4597                 /* we are starting ftrace again */
4598                 if (ftrace_ops_list != &ftrace_list_end)
4599                         update_ftrace_function();
4600
4601         } else {
4602                 /* stopping ftrace calls (just send to ftrace_stub) */
4603                 ftrace_trace_function = ftrace_stub;
4604
4605                 ftrace_shutdown_sysctl();
4606         }
4607
4608  out:
4609         mutex_unlock(&ftrace_lock);
4610         return ret;
4611 }
4612
4613 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4614
4615 static int ftrace_graph_active;
4616 static struct notifier_block ftrace_suspend_notifier;
4617
4618 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4619 {
4620         return 0;
4621 }
4622
4623 /* The callbacks that hook a function */
4624 trace_func_graph_ret_t ftrace_graph_return =
4625                         (trace_func_graph_ret_t)ftrace_stub;
4626 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4627
4628 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4629 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4630 {
4631         int i;
4632         int ret = 0;
4633         unsigned long flags;
4634         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4635         struct task_struct *g, *t;
4636
4637         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4638                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4639                                         * sizeof(struct ftrace_ret_stack),
4640                                         GFP_KERNEL);
4641                 if (!ret_stack_list[i]) {
4642                         start = 0;
4643                         end = i;
4644                         ret = -ENOMEM;
4645                         goto free;
4646                 }
4647         }
4648
4649         read_lock_irqsave(&tasklist_lock, flags);
4650         do_each_thread(g, t) {
4651                 if (start == end) {
4652                         ret = -EAGAIN;
4653                         goto unlock;
4654                 }
4655
4656                 if (t->ret_stack == NULL) {
4657                         atomic_set(&t->tracing_graph_pause, 0);
4658                         atomic_set(&t->trace_overrun, 0);
4659                         t->curr_ret_stack = -1;
4660                         /* Make sure the tasks see the -1 first: */
4661                         smp_wmb();
4662                         t->ret_stack = ret_stack_list[start++];
4663                 }
4664         } while_each_thread(g, t);
4665
4666 unlock:
4667         read_unlock_irqrestore(&tasklist_lock, flags);
4668 free:
4669         for (i = start; i < end; i++)
4670                 kfree(ret_stack_list[i]);
4671         return ret;
4672 }
4673
4674 static void
4675 ftrace_graph_probe_sched_switch(void *ignore,
4676                         struct task_struct *prev, struct task_struct *next)
4677 {
4678         unsigned long long timestamp;
4679         int index;
4680
4681         /*
4682          * Does the user want to count the time a function was asleep.
4683          * If so, do not update the time stamps.
4684          */
4685         if (trace_flags & TRACE_ITER_SLEEP_TIME)
4686                 return;
4687
4688         timestamp = trace_clock_local();
4689
4690         prev->ftrace_timestamp = timestamp;
4691
4692         /* only process tasks that we timestamped */
4693         if (!next->ftrace_timestamp)
4694                 return;
4695
4696         /*
4697          * Update all the counters in next to make up for the
4698          * time next was sleeping.
4699          */
4700         timestamp -= next->ftrace_timestamp;
4701
4702         for (index = next->curr_ret_stack; index >= 0; index--)
4703                 next->ret_stack[index].calltime += timestamp;
4704 }
4705
4706 /* Allocate a return stack for each task */
4707 static int start_graph_tracing(void)
4708 {
4709         struct ftrace_ret_stack **ret_stack_list;
4710         int ret, cpu;
4711
4712         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4713                                 sizeof(struct ftrace_ret_stack *),
4714                                 GFP_KERNEL);
4715
4716         if (!ret_stack_list)
4717                 return -ENOMEM;
4718
4719         /* The cpu_boot init_task->ret_stack will never be freed */
4720         for_each_online_cpu(cpu) {
4721                 if (!idle_task(cpu)->ret_stack)
4722                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4723         }
4724
4725         do {
4726                 ret = alloc_retstack_tasklist(ret_stack_list);
4727         } while (ret == -EAGAIN);
4728
4729         if (!ret) {
4730                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4731                 if (ret)
4732                         pr_info("ftrace_graph: Couldn't activate tracepoint"
4733                                 " probe to kernel_sched_switch\n");
4734         }
4735
4736         kfree(ret_stack_list);
4737         return ret;
4738 }
4739
4740 /*
4741  * Hibernation protection.
4742  * The state of the current task is too much unstable during
4743  * suspend/restore to disk. We want to protect against that.
4744  */
4745 static int
4746 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4747                                                         void *unused)
4748 {
4749         switch (state) {
4750         case PM_HIBERNATION_PREPARE:
4751                 pause_graph_tracing();
4752                 break;
4753
4754         case PM_POST_HIBERNATION:
4755                 unpause_graph_tracing();
4756                 break;
4757         }
4758         return NOTIFY_DONE;
4759 }
4760
4761 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4762                         trace_func_graph_ent_t entryfunc)
4763 {
4764         int ret = 0;
4765
4766         mutex_lock(&ftrace_lock);
4767
4768         /* we currently allow only one tracer registered at a time */
4769         if (ftrace_graph_active) {
4770                 ret = -EBUSY;
4771                 goto out;
4772         }
4773
4774         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4775         register_pm_notifier(&ftrace_suspend_notifier);
4776
4777         ftrace_graph_active++;
4778         ret = start_graph_tracing();
4779         if (ret) {
4780                 ftrace_graph_active--;
4781                 goto out;
4782         }
4783
4784         ftrace_graph_return = retfunc;
4785         ftrace_graph_entry = entryfunc;
4786
4787         ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4788
4789 out:
4790         mutex_unlock(&ftrace_lock);
4791         return ret;
4792 }
4793
4794 void unregister_ftrace_graph(void)
4795 {
4796         mutex_lock(&ftrace_lock);
4797
4798         if (unlikely(!ftrace_graph_active))
4799                 goto out;
4800
4801         ftrace_graph_active--;
4802         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4803         ftrace_graph_entry = ftrace_graph_entry_stub;
4804         ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4805         unregister_pm_notifier(&ftrace_suspend_notifier);
4806         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4807
4808  out:
4809         mutex_unlock(&ftrace_lock);
4810 }
4811
4812 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4813
4814 static void
4815 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4816 {
4817         atomic_set(&t->tracing_graph_pause, 0);
4818         atomic_set(&t->trace_overrun, 0);
4819         t->ftrace_timestamp = 0;
4820         /* make curr_ret_stack visible before we add the ret_stack */
4821         smp_wmb();
4822         t->ret_stack = ret_stack;
4823 }
4824
4825 /*
4826  * Allocate a return stack for the idle task. May be the first
4827  * time through, or it may be done by CPU hotplug online.
4828  */
4829 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4830 {
4831         t->curr_ret_stack = -1;
4832         /*
4833          * The idle task has no parent, it either has its own
4834          * stack or no stack at all.
4835          */
4836         if (t->ret_stack)
4837                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4838
4839         if (ftrace_graph_active) {
4840                 struct ftrace_ret_stack *ret_stack;
4841
4842                 ret_stack = per_cpu(idle_ret_stack, cpu);
4843                 if (!ret_stack) {
4844                         ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4845                                             * sizeof(struct ftrace_ret_stack),
4846                                             GFP_KERNEL);
4847                         if (!ret_stack)
4848                                 return;
4849                         per_cpu(idle_ret_stack, cpu) = ret_stack;
4850                 }
4851                 graph_init_task(t, ret_stack);
4852         }
4853 }
4854
4855 /* Allocate a return stack for newly created task */
4856 void ftrace_graph_init_task(struct task_struct *t)
4857 {
4858         /* Make sure we do not use the parent ret_stack */
4859         t->ret_stack = NULL;
4860         t->curr_ret_stack = -1;
4861
4862         if (ftrace_graph_active) {
4863                 struct ftrace_ret_stack *ret_stack;
4864
4865                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4866                                 * sizeof(struct ftrace_ret_stack),
4867                                 GFP_KERNEL);
4868                 if (!ret_stack)
4869                         return;
4870                 graph_init_task(t, ret_stack);
4871         }
4872 }
4873
4874 void ftrace_graph_exit_task(struct task_struct *t)
4875 {
4876         struct ftrace_ret_stack *ret_stack = t->ret_stack;
4877
4878         t->ret_stack = NULL;
4879         /* NULL must become visible to IRQs before we free it: */
4880         barrier();
4881
4882         kfree(ret_stack);
4883 }
4884
4885 void ftrace_graph_stop(void)
4886 {
4887         ftrace_stop();
4888 }
4889 #endif