]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - kernel/trace/trace_kprobe.c
trace/kprobes: Sanitize derived event names
[karo-tx-linux.git] / kernel / trace / trace_kprobe.c
1 /*
2  * Kprobes-based tracing events
3  *
4  * Created by Masami Hiramatsu <mhiramat@redhat.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19 #define pr_fmt(fmt)     "trace_kprobe: " fmt
20
21 #include <linux/module.h>
22 #include <linux/uaccess.h>
23 #include <linux/rculist.h>
24
25 #include "trace_probe.h"
26
27 #define KPROBE_EVENT_SYSTEM "kprobes"
28 #define KRETPROBE_MAXACTIVE_MAX 4096
29
30 /**
31  * Kprobe event core functions
32  */
33 struct trace_kprobe {
34         struct list_head        list;
35         struct kretprobe        rp;     /* Use rp.kp for kprobe use */
36         unsigned long __percpu *nhit;
37         const char              *symbol;        /* symbol name */
38         struct trace_probe      tp;
39 };
40
41 #define SIZEOF_TRACE_KPROBE(n)                          \
42         (offsetof(struct trace_kprobe, tp.args) +       \
43         (sizeof(struct probe_arg) * (n)))
44
45
46 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
47 {
48         return tk->rp.handler != NULL;
49 }
50
51 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
52 {
53         return tk->symbol ? tk->symbol : "unknown";
54 }
55
56 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
57 {
58         return tk->rp.kp.offset;
59 }
60
61 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
62 {
63         return !!(kprobe_gone(&tk->rp.kp));
64 }
65
66 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
67                                                  struct module *mod)
68 {
69         int len = strlen(mod->name);
70         const char *name = trace_kprobe_symbol(tk);
71         return strncmp(mod->name, name, len) == 0 && name[len] == ':';
72 }
73
74 static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
75 {
76         return !!strchr(trace_kprobe_symbol(tk), ':');
77 }
78
79 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
80 {
81         unsigned long nhit = 0;
82         int cpu;
83
84         for_each_possible_cpu(cpu)
85                 nhit += *per_cpu_ptr(tk->nhit, cpu);
86
87         return nhit;
88 }
89
90 static int register_kprobe_event(struct trace_kprobe *tk);
91 static int unregister_kprobe_event(struct trace_kprobe *tk);
92
93 static DEFINE_MUTEX(probe_lock);
94 static LIST_HEAD(probe_list);
95
96 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
97 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
98                                 struct pt_regs *regs);
99
100 /* Memory fetching by symbol */
101 struct symbol_cache {
102         char            *symbol;
103         long            offset;
104         unsigned long   addr;
105 };
106
107 unsigned long update_symbol_cache(struct symbol_cache *sc)
108 {
109         sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
110
111         if (sc->addr)
112                 sc->addr += sc->offset;
113
114         return sc->addr;
115 }
116
117 void free_symbol_cache(struct symbol_cache *sc)
118 {
119         kfree(sc->symbol);
120         kfree(sc);
121 }
122
123 struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
124 {
125         struct symbol_cache *sc;
126
127         if (!sym || strlen(sym) == 0)
128                 return NULL;
129
130         sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
131         if (!sc)
132                 return NULL;
133
134         sc->symbol = kstrdup(sym, GFP_KERNEL);
135         if (!sc->symbol) {
136                 kfree(sc);
137                 return NULL;
138         }
139         sc->offset = offset;
140         update_symbol_cache(sc);
141
142         return sc;
143 }
144
145 /*
146  * Kprobes-specific fetch functions
147  */
148 #define DEFINE_FETCH_stack(type)                                        \
149 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,          \
150                                           void *offset, void *dest)     \
151 {                                                                       \
152         *(type *)dest = (type)regs_get_kernel_stack_nth(regs,           \
153                                 (unsigned int)((unsigned long)offset)); \
154 }                                                                       \
155 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));
156
157 DEFINE_BASIC_FETCH_FUNCS(stack)
158 /* No string on the stack entry */
159 #define fetch_stack_string      NULL
160 #define fetch_stack_string_size NULL
161
162 #define DEFINE_FETCH_memory(type)                                       \
163 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,         \
164                                           void *addr, void *dest)       \
165 {                                                                       \
166         type retval;                                                    \
167         if (probe_kernel_address(addr, retval))                         \
168                 *(type *)dest = 0;                                      \
169         else                                                            \
170                 *(type *)dest = retval;                                 \
171 }                                                                       \
172 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));
173
174 DEFINE_BASIC_FETCH_FUNCS(memory)
175 /*
176  * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
177  * length and relative data location.
178  */
179 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
180                                             void *addr, void *dest)
181 {
182         int maxlen = get_rloc_len(*(u32 *)dest);
183         u8 *dst = get_rloc_data(dest);
184         long ret;
185
186         if (!maxlen)
187                 return;
188
189         /*
190          * Try to get string again, since the string can be changed while
191          * probing.
192          */
193         ret = strncpy_from_unsafe(dst, addr, maxlen);
194
195         if (ret < 0) {  /* Failed to fetch string */
196                 dst[0] = '\0';
197                 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
198         } else {
199                 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest));
200         }
201 }
202 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
203
204 /* Return the length of string -- including null terminal byte */
205 static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
206                                                  void *addr, void *dest)
207 {
208         mm_segment_t old_fs;
209         int ret, len = 0;
210         u8 c;
211
212         old_fs = get_fs();
213         set_fs(KERNEL_DS);
214         pagefault_disable();
215
216         do {
217                 ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
218                 len++;
219         } while (c && ret == 0 && len < MAX_STRING_SIZE);
220
221         pagefault_enable();
222         set_fs(old_fs);
223
224         if (ret < 0)    /* Failed to check the length */
225                 *(u32 *)dest = 0;
226         else
227                 *(u32 *)dest = len;
228 }
229 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size));
230
231 #define DEFINE_FETCH_symbol(type)                                       \
232 void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
233 {                                                                       \
234         struct symbol_cache *sc = data;                                 \
235         if (sc->addr)                                                   \
236                 fetch_memory_##type(regs, (void *)sc->addr, dest);      \
237         else                                                            \
238                 *(type *)dest = 0;                                      \
239 }                                                                       \
240 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));
241
242 DEFINE_BASIC_FETCH_FUNCS(symbol)
243 DEFINE_FETCH_symbol(string)
244 DEFINE_FETCH_symbol(string_size)
245
246 /* kprobes don't support file_offset fetch methods */
247 #define fetch_file_offset_u8            NULL
248 #define fetch_file_offset_u16           NULL
249 #define fetch_file_offset_u32           NULL
250 #define fetch_file_offset_u64           NULL
251 #define fetch_file_offset_string        NULL
252 #define fetch_file_offset_string_size   NULL
253
254 /* Fetch type information table */
255 static const struct fetch_type kprobes_fetch_type_table[] = {
256         /* Special types */
257         [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
258                                         sizeof(u32), 1, "__data_loc char[]"),
259         [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
260                                         string_size, sizeof(u32), 0, "u32"),
261         /* Basic types */
262         ASSIGN_FETCH_TYPE(u8,  u8,  0),
263         ASSIGN_FETCH_TYPE(u16, u16, 0),
264         ASSIGN_FETCH_TYPE(u32, u32, 0),
265         ASSIGN_FETCH_TYPE(u64, u64, 0),
266         ASSIGN_FETCH_TYPE(s8,  u8,  1),
267         ASSIGN_FETCH_TYPE(s16, u16, 1),
268         ASSIGN_FETCH_TYPE(s32, u32, 1),
269         ASSIGN_FETCH_TYPE(s64, u64, 1),
270         ASSIGN_FETCH_TYPE_ALIAS(x8,  u8,  u8,  0),
271         ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
272         ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
273         ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
274
275         ASSIGN_FETCH_TYPE_END
276 };
277
278 /*
279  * Allocate new trace_probe and initialize it (including kprobes).
280  */
281 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
282                                              const char *event,
283                                              void *addr,
284                                              const char *symbol,
285                                              unsigned long offs,
286                                              int maxactive,
287                                              int nargs, bool is_return)
288 {
289         struct trace_kprobe *tk;
290         int ret = -ENOMEM;
291
292         tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
293         if (!tk)
294                 return ERR_PTR(ret);
295
296         tk->nhit = alloc_percpu(unsigned long);
297         if (!tk->nhit)
298                 goto error;
299
300         if (symbol) {
301                 tk->symbol = kstrdup(symbol, GFP_KERNEL);
302                 if (!tk->symbol)
303                         goto error;
304                 tk->rp.kp.symbol_name = tk->symbol;
305                 tk->rp.kp.offset = offs;
306         } else
307                 tk->rp.kp.addr = addr;
308
309         if (is_return)
310                 tk->rp.handler = kretprobe_dispatcher;
311         else
312                 tk->rp.kp.pre_handler = kprobe_dispatcher;
313
314         tk->rp.maxactive = maxactive;
315
316         if (!event || !is_good_name(event)) {
317                 ret = -EINVAL;
318                 goto error;
319         }
320
321         tk->tp.call.class = &tk->tp.class;
322         tk->tp.call.name = kstrdup(event, GFP_KERNEL);
323         if (!tk->tp.call.name)
324                 goto error;
325
326         if (!group || !is_good_name(group)) {
327                 ret = -EINVAL;
328                 goto error;
329         }
330
331         tk->tp.class.system = kstrdup(group, GFP_KERNEL);
332         if (!tk->tp.class.system)
333                 goto error;
334
335         INIT_LIST_HEAD(&tk->list);
336         INIT_LIST_HEAD(&tk->tp.files);
337         return tk;
338 error:
339         kfree(tk->tp.call.name);
340         kfree(tk->symbol);
341         free_percpu(tk->nhit);
342         kfree(tk);
343         return ERR_PTR(ret);
344 }
345
346 static void free_trace_kprobe(struct trace_kprobe *tk)
347 {
348         int i;
349
350         for (i = 0; i < tk->tp.nr_args; i++)
351                 traceprobe_free_probe_arg(&tk->tp.args[i]);
352
353         kfree(tk->tp.call.class->system);
354         kfree(tk->tp.call.name);
355         kfree(tk->symbol);
356         free_percpu(tk->nhit);
357         kfree(tk);
358 }
359
360 static struct trace_kprobe *find_trace_kprobe(const char *event,
361                                               const char *group)
362 {
363         struct trace_kprobe *tk;
364
365         list_for_each_entry(tk, &probe_list, list)
366                 if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
367                     strcmp(tk->tp.call.class->system, group) == 0)
368                         return tk;
369         return NULL;
370 }
371
372 /*
373  * Enable trace_probe
374  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
375  */
376 static int
377 enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
378 {
379         int ret = 0;
380
381         if (file) {
382                 struct event_file_link *link;
383
384                 link = kmalloc(sizeof(*link), GFP_KERNEL);
385                 if (!link) {
386                         ret = -ENOMEM;
387                         goto out;
388                 }
389
390                 link->file = file;
391                 list_add_tail_rcu(&link->list, &tk->tp.files);
392
393                 tk->tp.flags |= TP_FLAG_TRACE;
394         } else
395                 tk->tp.flags |= TP_FLAG_PROFILE;
396
397         if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
398                 if (trace_kprobe_is_return(tk))
399                         ret = enable_kretprobe(&tk->rp);
400                 else
401                         ret = enable_kprobe(&tk->rp.kp);
402         }
403  out:
404         return ret;
405 }
406
407 /*
408  * Disable trace_probe
409  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
410  */
411 static int
412 disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
413 {
414         struct event_file_link *link = NULL;
415         int wait = 0;
416         int ret = 0;
417
418         if (file) {
419                 link = find_event_file_link(&tk->tp, file);
420                 if (!link) {
421                         ret = -EINVAL;
422                         goto out;
423                 }
424
425                 list_del_rcu(&link->list);
426                 wait = 1;
427                 if (!list_empty(&tk->tp.files))
428                         goto out;
429
430                 tk->tp.flags &= ~TP_FLAG_TRACE;
431         } else
432                 tk->tp.flags &= ~TP_FLAG_PROFILE;
433
434         if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
435                 if (trace_kprobe_is_return(tk))
436                         disable_kretprobe(&tk->rp);
437                 else
438                         disable_kprobe(&tk->rp.kp);
439                 wait = 1;
440         }
441  out:
442         if (wait) {
443                 /*
444                  * Synchronize with kprobe_trace_func/kretprobe_trace_func
445                  * to ensure disabled (all running handlers are finished).
446                  * This is not only for kfree(), but also the caller,
447                  * trace_remove_event_call() supposes it for releasing
448                  * event_call related objects, which will be accessed in
449                  * the kprobe_trace_func/kretprobe_trace_func.
450                  */
451                 synchronize_sched();
452                 kfree(link);    /* Ignored if link == NULL */
453         }
454
455         return ret;
456 }
457
458 /* Internal register function - just handle k*probes and flags */
459 static int __register_trace_kprobe(struct trace_kprobe *tk)
460 {
461         int i, ret;
462
463         if (trace_probe_is_registered(&tk->tp))
464                 return -EINVAL;
465
466         for (i = 0; i < tk->tp.nr_args; i++)
467                 traceprobe_update_arg(&tk->tp.args[i]);
468
469         /* Set/clear disabled flag according to tp->flag */
470         if (trace_probe_is_enabled(&tk->tp))
471                 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
472         else
473                 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
474
475         if (trace_kprobe_is_return(tk))
476                 ret = register_kretprobe(&tk->rp);
477         else
478                 ret = register_kprobe(&tk->rp.kp);
479
480         if (ret == 0)
481                 tk->tp.flags |= TP_FLAG_REGISTERED;
482         else {
483                 pr_warn("Could not insert probe at %s+%lu: %d\n",
484                         trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
485                 if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
486                         pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
487                         ret = 0;
488                 } else if (ret == -EILSEQ) {
489                         pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
490                                 tk->rp.kp.addr);
491                         ret = -EINVAL;
492                 }
493         }
494
495         return ret;
496 }
497
498 /* Internal unregister function - just handle k*probes and flags */
499 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
500 {
501         if (trace_probe_is_registered(&tk->tp)) {
502                 if (trace_kprobe_is_return(tk))
503                         unregister_kretprobe(&tk->rp);
504                 else
505                         unregister_kprobe(&tk->rp.kp);
506                 tk->tp.flags &= ~TP_FLAG_REGISTERED;
507                 /* Cleanup kprobe for reuse */
508                 if (tk->rp.kp.symbol_name)
509                         tk->rp.kp.addr = NULL;
510         }
511 }
512
513 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
514 static int unregister_trace_kprobe(struct trace_kprobe *tk)
515 {
516         /* Enabled event can not be unregistered */
517         if (trace_probe_is_enabled(&tk->tp))
518                 return -EBUSY;
519
520         /* Will fail if probe is being used by ftrace or perf */
521         if (unregister_kprobe_event(tk))
522                 return -EBUSY;
523
524         __unregister_trace_kprobe(tk);
525         list_del(&tk->list);
526
527         return 0;
528 }
529
530 /* Register a trace_probe and probe_event */
531 static int register_trace_kprobe(struct trace_kprobe *tk)
532 {
533         struct trace_kprobe *old_tk;
534         int ret;
535
536         mutex_lock(&probe_lock);
537
538         /* Delete old (same name) event if exist */
539         old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
540                         tk->tp.call.class->system);
541         if (old_tk) {
542                 ret = unregister_trace_kprobe(old_tk);
543                 if (ret < 0)
544                         goto end;
545                 free_trace_kprobe(old_tk);
546         }
547
548         /* Register new event */
549         ret = register_kprobe_event(tk);
550         if (ret) {
551                 pr_warn("Failed to register probe event(%d)\n", ret);
552                 goto end;
553         }
554
555         /* Register k*probe */
556         ret = __register_trace_kprobe(tk);
557         if (ret < 0)
558                 unregister_kprobe_event(tk);
559         else
560                 list_add_tail(&tk->list, &probe_list);
561
562 end:
563         mutex_unlock(&probe_lock);
564         return ret;
565 }
566
567 /* Module notifier call back, checking event on the module */
568 static int trace_kprobe_module_callback(struct notifier_block *nb,
569                                        unsigned long val, void *data)
570 {
571         struct module *mod = data;
572         struct trace_kprobe *tk;
573         int ret;
574
575         if (val != MODULE_STATE_COMING)
576                 return NOTIFY_DONE;
577
578         /* Update probes on coming module */
579         mutex_lock(&probe_lock);
580         list_for_each_entry(tk, &probe_list, list) {
581                 if (trace_kprobe_within_module(tk, mod)) {
582                         /* Don't need to check busy - this should have gone. */
583                         __unregister_trace_kprobe(tk);
584                         ret = __register_trace_kprobe(tk);
585                         if (ret)
586                                 pr_warn("Failed to re-register probe %s on %s: %d\n",
587                                         trace_event_name(&tk->tp.call),
588                                         mod->name, ret);
589                 }
590         }
591         mutex_unlock(&probe_lock);
592
593         return NOTIFY_DONE;
594 }
595
596 static struct notifier_block trace_kprobe_module_nb = {
597         .notifier_call = trace_kprobe_module_callback,
598         .priority = 1   /* Invoked after kprobe module callback */
599 };
600
601 /* Convert certain expected symbols into '_' when generating event names */
602 static inline void sanitize_event_name(char *name)
603 {
604         while (*name++ != '\0')
605                 if (*name == ':' || *name == '.')
606                         *name = '_';
607 }
608
609 static int create_trace_kprobe(int argc, char **argv)
610 {
611         /*
612          * Argument syntax:
613          *  - Add kprobe:
614          *      p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
615          *  - Add kretprobe:
616          *      r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
617          * Fetch args:
618          *  $retval     : fetch return value
619          *  $stack      : fetch stack address
620          *  $stackN     : fetch Nth of stack (N:0-)
621          *  $comm       : fetch current task comm
622          *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
623          *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
624          *  %REG        : fetch register REG
625          * Dereferencing memory fetch:
626          *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
627          * Alias name of args:
628          *  NAME=FETCHARG : set NAME as alias of FETCHARG.
629          * Type of args:
630          *  FETCHARG:TYPE : use TYPE instead of unsigned long.
631          */
632         struct trace_kprobe *tk;
633         int i, ret = 0;
634         bool is_return = false, is_delete = false;
635         char *symbol = NULL, *event = NULL, *group = NULL;
636         int maxactive = 0;
637         char *arg;
638         unsigned long offset = 0;
639         void *addr = NULL;
640         char buf[MAX_EVENT_NAME_LEN];
641
642         /* argc must be >= 1 */
643         if (argv[0][0] == 'p')
644                 is_return = false;
645         else if (argv[0][0] == 'r')
646                 is_return = true;
647         else if (argv[0][0] == '-')
648                 is_delete = true;
649         else {
650                 pr_info("Probe definition must be started with 'p', 'r' or"
651                         " '-'.\n");
652                 return -EINVAL;
653         }
654
655         event = strchr(&argv[0][1], ':');
656         if (event) {
657                 event[0] = '\0';
658                 event++;
659         }
660         if (is_return && isdigit(argv[0][1])) {
661                 ret = kstrtouint(&argv[0][1], 0, &maxactive);
662                 if (ret) {
663                         pr_info("Failed to parse maxactive.\n");
664                         return ret;
665                 }
666                 /* kretprobes instances are iterated over via a list. The
667                  * maximum should stay reasonable.
668                  */
669                 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
670                         pr_info("Maxactive is too big (%d > %d).\n",
671                                 maxactive, KRETPROBE_MAXACTIVE_MAX);
672                         return -E2BIG;
673                 }
674         }
675
676         if (event) {
677                 if (strchr(event, '/')) {
678                         group = event;
679                         event = strchr(group, '/') + 1;
680                         event[-1] = '\0';
681                         if (strlen(group) == 0) {
682                                 pr_info("Group name is not specified\n");
683                                 return -EINVAL;
684                         }
685                 }
686                 if (strlen(event) == 0) {
687                         pr_info("Event name is not specified\n");
688                         return -EINVAL;
689                 }
690         }
691         if (!group)
692                 group = KPROBE_EVENT_SYSTEM;
693
694         if (is_delete) {
695                 if (!event) {
696                         pr_info("Delete command needs an event name.\n");
697                         return -EINVAL;
698                 }
699                 mutex_lock(&probe_lock);
700                 tk = find_trace_kprobe(event, group);
701                 if (!tk) {
702                         mutex_unlock(&probe_lock);
703                         pr_info("Event %s/%s doesn't exist.\n", group, event);
704                         return -ENOENT;
705                 }
706                 /* delete an event */
707                 ret = unregister_trace_kprobe(tk);
708                 if (ret == 0)
709                         free_trace_kprobe(tk);
710                 mutex_unlock(&probe_lock);
711                 return ret;
712         }
713
714         if (argc < 2) {
715                 pr_info("Probe point is not specified.\n");
716                 return -EINVAL;
717         }
718         if (isdigit(argv[1][0])) {
719                 /* an address specified */
720                 ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
721                 if (ret) {
722                         pr_info("Failed to parse address.\n");
723                         return ret;
724                 }
725         } else {
726                 /* a symbol specified */
727                 symbol = argv[1];
728                 /* TODO: support .init module functions */
729                 ret = traceprobe_split_symbol_offset(symbol, &offset);
730                 if (ret) {
731                         pr_info("Failed to parse symbol.\n");
732                         return ret;
733                 }
734                 if (offset && is_return &&
735                     !function_offset_within_entry(NULL, symbol, offset)) {
736                         pr_info("Given offset is not valid for return probe.\n");
737                         return -EINVAL;
738                 }
739         }
740         argc -= 2; argv += 2;
741
742         /* setup a probe */
743         if (!event) {
744                 /* Make a new event name */
745                 if (symbol)
746                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
747                                  is_return ? 'r' : 'p', symbol, offset);
748                 else
749                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
750                                  is_return ? 'r' : 'p', addr);
751                 sanitize_event_name(buf);
752                 event = buf;
753         }
754         tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
755                                argc, is_return);
756         if (IS_ERR(tk)) {
757                 pr_info("Failed to allocate trace_probe.(%d)\n",
758                         (int)PTR_ERR(tk));
759                 return PTR_ERR(tk);
760         }
761
762         /* parse arguments */
763         ret = 0;
764         for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
765                 struct probe_arg *parg = &tk->tp.args[i];
766
767                 /* Increment count for freeing args in error case */
768                 tk->tp.nr_args++;
769
770                 /* Parse argument name */
771                 arg = strchr(argv[i], '=');
772                 if (arg) {
773                         *arg++ = '\0';
774                         parg->name = kstrdup(argv[i], GFP_KERNEL);
775                 } else {
776                         arg = argv[i];
777                         /* If argument name is omitted, set "argN" */
778                         snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
779                         parg->name = kstrdup(buf, GFP_KERNEL);
780                 }
781
782                 if (!parg->name) {
783                         pr_info("Failed to allocate argument[%d] name.\n", i);
784                         ret = -ENOMEM;
785                         goto error;
786                 }
787
788                 if (!is_good_name(parg->name)) {
789                         pr_info("Invalid argument[%d] name: %s\n",
790                                 i, parg->name);
791                         ret = -EINVAL;
792                         goto error;
793                 }
794
795                 if (traceprobe_conflict_field_name(parg->name,
796                                                         tk->tp.args, i)) {
797                         pr_info("Argument[%d] name '%s' conflicts with "
798                                 "another field.\n", i, argv[i]);
799                         ret = -EINVAL;
800                         goto error;
801                 }
802
803                 /* Parse fetch argument */
804                 ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
805                                                 is_return, true,
806                                                 kprobes_fetch_type_table);
807                 if (ret) {
808                         pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
809                         goto error;
810                 }
811         }
812
813         ret = register_trace_kprobe(tk);
814         if (ret)
815                 goto error;
816         return 0;
817
818 error:
819         free_trace_kprobe(tk);
820         return ret;
821 }
822
823 static int release_all_trace_kprobes(void)
824 {
825         struct trace_kprobe *tk;
826         int ret = 0;
827
828         mutex_lock(&probe_lock);
829         /* Ensure no probe is in use. */
830         list_for_each_entry(tk, &probe_list, list)
831                 if (trace_probe_is_enabled(&tk->tp)) {
832                         ret = -EBUSY;
833                         goto end;
834                 }
835         /* TODO: Use batch unregistration */
836         while (!list_empty(&probe_list)) {
837                 tk = list_entry(probe_list.next, struct trace_kprobe, list);
838                 ret = unregister_trace_kprobe(tk);
839                 if (ret)
840                         goto end;
841                 free_trace_kprobe(tk);
842         }
843
844 end:
845         mutex_unlock(&probe_lock);
846
847         return ret;
848 }
849
850 /* Probes listing interfaces */
851 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
852 {
853         mutex_lock(&probe_lock);
854         return seq_list_start(&probe_list, *pos);
855 }
856
857 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
858 {
859         return seq_list_next(v, &probe_list, pos);
860 }
861
862 static void probes_seq_stop(struct seq_file *m, void *v)
863 {
864         mutex_unlock(&probe_lock);
865 }
866
867 static int probes_seq_show(struct seq_file *m, void *v)
868 {
869         struct trace_kprobe *tk = v;
870         int i;
871
872         seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
873         seq_printf(m, ":%s/%s", tk->tp.call.class->system,
874                         trace_event_name(&tk->tp.call));
875
876         if (!tk->symbol)
877                 seq_printf(m, " 0x%p", tk->rp.kp.addr);
878         else if (tk->rp.kp.offset)
879                 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
880                            tk->rp.kp.offset);
881         else
882                 seq_printf(m, " %s", trace_kprobe_symbol(tk));
883
884         for (i = 0; i < tk->tp.nr_args; i++)
885                 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
886         seq_putc(m, '\n');
887
888         return 0;
889 }
890
891 static const struct seq_operations probes_seq_op = {
892         .start  = probes_seq_start,
893         .next   = probes_seq_next,
894         .stop   = probes_seq_stop,
895         .show   = probes_seq_show
896 };
897
898 static int probes_open(struct inode *inode, struct file *file)
899 {
900         int ret;
901
902         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
903                 ret = release_all_trace_kprobes();
904                 if (ret < 0)
905                         return ret;
906         }
907
908         return seq_open(file, &probes_seq_op);
909 }
910
911 static ssize_t probes_write(struct file *file, const char __user *buffer,
912                             size_t count, loff_t *ppos)
913 {
914         return traceprobe_probes_write(file, buffer, count, ppos,
915                         create_trace_kprobe);
916 }
917
918 static const struct file_operations kprobe_events_ops = {
919         .owner          = THIS_MODULE,
920         .open           = probes_open,
921         .read           = seq_read,
922         .llseek         = seq_lseek,
923         .release        = seq_release,
924         .write          = probes_write,
925 };
926
927 /* Probes profiling interfaces */
928 static int probes_profile_seq_show(struct seq_file *m, void *v)
929 {
930         struct trace_kprobe *tk = v;
931
932         seq_printf(m, "  %-44s %15lu %15lu\n",
933                    trace_event_name(&tk->tp.call),
934                    trace_kprobe_nhit(tk),
935                    tk->rp.kp.nmissed);
936
937         return 0;
938 }
939
940 static const struct seq_operations profile_seq_op = {
941         .start  = probes_seq_start,
942         .next   = probes_seq_next,
943         .stop   = probes_seq_stop,
944         .show   = probes_profile_seq_show
945 };
946
947 static int profile_open(struct inode *inode, struct file *file)
948 {
949         return seq_open(file, &profile_seq_op);
950 }
951
952 static const struct file_operations kprobe_profile_ops = {
953         .owner          = THIS_MODULE,
954         .open           = profile_open,
955         .read           = seq_read,
956         .llseek         = seq_lseek,
957         .release        = seq_release,
958 };
959
960 /* Kprobe handler */
961 static nokprobe_inline void
962 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
963                     struct trace_event_file *trace_file)
964 {
965         struct kprobe_trace_entry_head *entry;
966         struct ring_buffer_event *event;
967         struct ring_buffer *buffer;
968         int size, dsize, pc;
969         unsigned long irq_flags;
970         struct trace_event_call *call = &tk->tp.call;
971
972         WARN_ON(call != trace_file->event_call);
973
974         if (trace_trigger_soft_disabled(trace_file))
975                 return;
976
977         local_save_flags(irq_flags);
978         pc = preempt_count();
979
980         dsize = __get_data_size(&tk->tp, regs);
981         size = sizeof(*entry) + tk->tp.size + dsize;
982
983         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
984                                                 call->event.type,
985                                                 size, irq_flags, pc);
986         if (!event)
987                 return;
988
989         entry = ring_buffer_event_data(event);
990         entry->ip = (unsigned long)tk->rp.kp.addr;
991         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
992
993         event_trigger_unlock_commit_regs(trace_file, buffer, event,
994                                          entry, irq_flags, pc, regs);
995 }
996
997 static void
998 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
999 {
1000         struct event_file_link *link;
1001
1002         list_for_each_entry_rcu(link, &tk->tp.files, list)
1003                 __kprobe_trace_func(tk, regs, link->file);
1004 }
1005 NOKPROBE_SYMBOL(kprobe_trace_func);
1006
1007 /* Kretprobe handler */
1008 static nokprobe_inline void
1009 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1010                        struct pt_regs *regs,
1011                        struct trace_event_file *trace_file)
1012 {
1013         struct kretprobe_trace_entry_head *entry;
1014         struct ring_buffer_event *event;
1015         struct ring_buffer *buffer;
1016         int size, pc, dsize;
1017         unsigned long irq_flags;
1018         struct trace_event_call *call = &tk->tp.call;
1019
1020         WARN_ON(call != trace_file->event_call);
1021
1022         if (trace_trigger_soft_disabled(trace_file))
1023                 return;
1024
1025         local_save_flags(irq_flags);
1026         pc = preempt_count();
1027
1028         dsize = __get_data_size(&tk->tp, regs);
1029         size = sizeof(*entry) + tk->tp.size + dsize;
1030
1031         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1032                                                 call->event.type,
1033                                                 size, irq_flags, pc);
1034         if (!event)
1035                 return;
1036
1037         entry = ring_buffer_event_data(event);
1038         entry->func = (unsigned long)tk->rp.kp.addr;
1039         entry->ret_ip = (unsigned long)ri->ret_addr;
1040         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1041
1042         event_trigger_unlock_commit_regs(trace_file, buffer, event,
1043                                          entry, irq_flags, pc, regs);
1044 }
1045
1046 static void
1047 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1048                      struct pt_regs *regs)
1049 {
1050         struct event_file_link *link;
1051
1052         list_for_each_entry_rcu(link, &tk->tp.files, list)
1053                 __kretprobe_trace_func(tk, ri, regs, link->file);
1054 }
1055 NOKPROBE_SYMBOL(kretprobe_trace_func);
1056
1057 /* Event entry printers */
1058 static enum print_line_t
1059 print_kprobe_event(struct trace_iterator *iter, int flags,
1060                    struct trace_event *event)
1061 {
1062         struct kprobe_trace_entry_head *field;
1063         struct trace_seq *s = &iter->seq;
1064         struct trace_probe *tp;
1065         u8 *data;
1066         int i;
1067
1068         field = (struct kprobe_trace_entry_head *)iter->ent;
1069         tp = container_of(event, struct trace_probe, call.event);
1070
1071         trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1072
1073         if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1074                 goto out;
1075
1076         trace_seq_putc(s, ')');
1077
1078         data = (u8 *)&field[1];
1079         for (i = 0; i < tp->nr_args; i++)
1080                 if (!tp->args[i].type->print(s, tp->args[i].name,
1081                                              data + tp->args[i].offset, field))
1082                         goto out;
1083
1084         trace_seq_putc(s, '\n');
1085  out:
1086         return trace_handle_return(s);
1087 }
1088
1089 static enum print_line_t
1090 print_kretprobe_event(struct trace_iterator *iter, int flags,
1091                       struct trace_event *event)
1092 {
1093         struct kretprobe_trace_entry_head *field;
1094         struct trace_seq *s = &iter->seq;
1095         struct trace_probe *tp;
1096         u8 *data;
1097         int i;
1098
1099         field = (struct kretprobe_trace_entry_head *)iter->ent;
1100         tp = container_of(event, struct trace_probe, call.event);
1101
1102         trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1103
1104         if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1105                 goto out;
1106
1107         trace_seq_puts(s, " <- ");
1108
1109         if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1110                 goto out;
1111
1112         trace_seq_putc(s, ')');
1113
1114         data = (u8 *)&field[1];
1115         for (i = 0; i < tp->nr_args; i++)
1116                 if (!tp->args[i].type->print(s, tp->args[i].name,
1117                                              data + tp->args[i].offset, field))
1118                         goto out;
1119
1120         trace_seq_putc(s, '\n');
1121
1122  out:
1123         return trace_handle_return(s);
1124 }
1125
1126
1127 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1128 {
1129         int ret, i;
1130         struct kprobe_trace_entry_head field;
1131         struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1132
1133         DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1134         /* Set argument names as fields */
1135         for (i = 0; i < tk->tp.nr_args; i++) {
1136                 struct probe_arg *parg = &tk->tp.args[i];
1137
1138                 ret = trace_define_field(event_call, parg->type->fmttype,
1139                                          parg->name,
1140                                          sizeof(field) + parg->offset,
1141                                          parg->type->size,
1142                                          parg->type->is_signed,
1143                                          FILTER_OTHER);
1144                 if (ret)
1145                         return ret;
1146         }
1147         return 0;
1148 }
1149
1150 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1151 {
1152         int ret, i;
1153         struct kretprobe_trace_entry_head field;
1154         struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1155
1156         DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1157         DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1158         /* Set argument names as fields */
1159         for (i = 0; i < tk->tp.nr_args; i++) {
1160                 struct probe_arg *parg = &tk->tp.args[i];
1161
1162                 ret = trace_define_field(event_call, parg->type->fmttype,
1163                                          parg->name,
1164                                          sizeof(field) + parg->offset,
1165                                          parg->type->size,
1166                                          parg->type->is_signed,
1167                                          FILTER_OTHER);
1168                 if (ret)
1169                         return ret;
1170         }
1171         return 0;
1172 }
1173
1174 #ifdef CONFIG_PERF_EVENTS
1175
1176 /* Kprobe profile handler */
1177 static void
1178 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1179 {
1180         struct trace_event_call *call = &tk->tp.call;
1181         struct bpf_prog *prog = call->prog;
1182         struct kprobe_trace_entry_head *entry;
1183         struct hlist_head *head;
1184         int size, __size, dsize;
1185         int rctx;
1186
1187         if (prog && !trace_call_bpf(prog, regs))
1188                 return;
1189
1190         head = this_cpu_ptr(call->perf_events);
1191         if (hlist_empty(head))
1192                 return;
1193
1194         dsize = __get_data_size(&tk->tp, regs);
1195         __size = sizeof(*entry) + tk->tp.size + dsize;
1196         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1197         size -= sizeof(u32);
1198
1199         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1200         if (!entry)
1201                 return;
1202
1203         entry->ip = (unsigned long)tk->rp.kp.addr;
1204         memset(&entry[1], 0, dsize);
1205         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1206         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1207                               head, NULL);
1208 }
1209 NOKPROBE_SYMBOL(kprobe_perf_func);
1210
1211 /* Kretprobe profile handler */
1212 static void
1213 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1214                     struct pt_regs *regs)
1215 {
1216         struct trace_event_call *call = &tk->tp.call;
1217         struct bpf_prog *prog = call->prog;
1218         struct kretprobe_trace_entry_head *entry;
1219         struct hlist_head *head;
1220         int size, __size, dsize;
1221         int rctx;
1222
1223         if (prog && !trace_call_bpf(prog, regs))
1224                 return;
1225
1226         head = this_cpu_ptr(call->perf_events);
1227         if (hlist_empty(head))
1228                 return;
1229
1230         dsize = __get_data_size(&tk->tp, regs);
1231         __size = sizeof(*entry) + tk->tp.size + dsize;
1232         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1233         size -= sizeof(u32);
1234
1235         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1236         if (!entry)
1237                 return;
1238
1239         entry->func = (unsigned long)tk->rp.kp.addr;
1240         entry->ret_ip = (unsigned long)ri->ret_addr;
1241         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1242         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1243                               head, NULL);
1244 }
1245 NOKPROBE_SYMBOL(kretprobe_perf_func);
1246 #endif  /* CONFIG_PERF_EVENTS */
1247
1248 /*
1249  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1250  *
1251  * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1252  * lockless, but we can't race with this __init function.
1253  */
1254 static int kprobe_register(struct trace_event_call *event,
1255                            enum trace_reg type, void *data)
1256 {
1257         struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1258         struct trace_event_file *file = data;
1259
1260         switch (type) {
1261         case TRACE_REG_REGISTER:
1262                 return enable_trace_kprobe(tk, file);
1263         case TRACE_REG_UNREGISTER:
1264                 return disable_trace_kprobe(tk, file);
1265
1266 #ifdef CONFIG_PERF_EVENTS
1267         case TRACE_REG_PERF_REGISTER:
1268                 return enable_trace_kprobe(tk, NULL);
1269         case TRACE_REG_PERF_UNREGISTER:
1270                 return disable_trace_kprobe(tk, NULL);
1271         case TRACE_REG_PERF_OPEN:
1272         case TRACE_REG_PERF_CLOSE:
1273         case TRACE_REG_PERF_ADD:
1274         case TRACE_REG_PERF_DEL:
1275                 return 0;
1276 #endif
1277         }
1278         return 0;
1279 }
1280
1281 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1282 {
1283         struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1284
1285         raw_cpu_inc(*tk->nhit);
1286
1287         if (tk->tp.flags & TP_FLAG_TRACE)
1288                 kprobe_trace_func(tk, regs);
1289 #ifdef CONFIG_PERF_EVENTS
1290         if (tk->tp.flags & TP_FLAG_PROFILE)
1291                 kprobe_perf_func(tk, regs);
1292 #endif
1293         return 0;       /* We don't tweek kernel, so just return 0 */
1294 }
1295 NOKPROBE_SYMBOL(kprobe_dispatcher);
1296
1297 static int
1298 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1299 {
1300         struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1301
1302         raw_cpu_inc(*tk->nhit);
1303
1304         if (tk->tp.flags & TP_FLAG_TRACE)
1305                 kretprobe_trace_func(tk, ri, regs);
1306 #ifdef CONFIG_PERF_EVENTS
1307         if (tk->tp.flags & TP_FLAG_PROFILE)
1308                 kretprobe_perf_func(tk, ri, regs);
1309 #endif
1310         return 0;       /* We don't tweek kernel, so just return 0 */
1311 }
1312 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1313
1314 static struct trace_event_functions kretprobe_funcs = {
1315         .trace          = print_kretprobe_event
1316 };
1317
1318 static struct trace_event_functions kprobe_funcs = {
1319         .trace          = print_kprobe_event
1320 };
1321
1322 static int register_kprobe_event(struct trace_kprobe *tk)
1323 {
1324         struct trace_event_call *call = &tk->tp.call;
1325         int ret;
1326
1327         /* Initialize trace_event_call */
1328         INIT_LIST_HEAD(&call->class->fields);
1329         if (trace_kprobe_is_return(tk)) {
1330                 call->event.funcs = &kretprobe_funcs;
1331                 call->class->define_fields = kretprobe_event_define_fields;
1332         } else {
1333                 call->event.funcs = &kprobe_funcs;
1334                 call->class->define_fields = kprobe_event_define_fields;
1335         }
1336         if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
1337                 return -ENOMEM;
1338         ret = register_trace_event(&call->event);
1339         if (!ret) {
1340                 kfree(call->print_fmt);
1341                 return -ENODEV;
1342         }
1343         call->flags = TRACE_EVENT_FL_KPROBE;
1344         call->class->reg = kprobe_register;
1345         call->data = tk;
1346         ret = trace_add_event_call(call);
1347         if (ret) {
1348                 pr_info("Failed to register kprobe event: %s\n",
1349                         trace_event_name(call));
1350                 kfree(call->print_fmt);
1351                 unregister_trace_event(&call->event);
1352         }
1353         return ret;
1354 }
1355
1356 static int unregister_kprobe_event(struct trace_kprobe *tk)
1357 {
1358         int ret;
1359
1360         /* tp->event is unregistered in trace_remove_event_call() */
1361         ret = trace_remove_event_call(&tk->tp.call);
1362         if (!ret)
1363                 kfree(tk->tp.call.print_fmt);
1364         return ret;
1365 }
1366
1367 /* Make a tracefs interface for controlling probe points */
1368 static __init int init_kprobe_trace(void)
1369 {
1370         struct dentry *d_tracer;
1371         struct dentry *entry;
1372
1373         if (register_module_notifier(&trace_kprobe_module_nb))
1374                 return -EINVAL;
1375
1376         d_tracer = tracing_init_dentry();
1377         if (IS_ERR(d_tracer))
1378                 return 0;
1379
1380         entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1381                                     NULL, &kprobe_events_ops);
1382
1383         /* Event list interface */
1384         if (!entry)
1385                 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1386
1387         /* Profile interface */
1388         entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1389                                     NULL, &kprobe_profile_ops);
1390
1391         if (!entry)
1392                 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1393         return 0;
1394 }
1395 fs_initcall(init_kprobe_trace);
1396
1397
1398 #ifdef CONFIG_FTRACE_STARTUP_TEST
1399 /*
1400  * The "__used" keeps gcc from removing the function symbol
1401  * from the kallsyms table. 'noinline' makes sure that there
1402  * isn't an inlined version used by the test method below
1403  */
1404 static __used __init noinline int
1405 kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6)
1406 {
1407         return a1 + a2 + a3 + a4 + a5 + a6;
1408 }
1409
1410 static __init struct trace_event_file *
1411 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1412 {
1413         struct trace_event_file *file;
1414
1415         list_for_each_entry(file, &tr->events, list)
1416                 if (file->event_call == &tk->tp.call)
1417                         return file;
1418
1419         return NULL;
1420 }
1421
1422 /*
1423  * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1424  * stage, we can do this lockless.
1425  */
1426 static __init int kprobe_trace_self_tests_init(void)
1427 {
1428         int ret, warn = 0;
1429         int (*target)(int, int, int, int, int, int);
1430         struct trace_kprobe *tk;
1431         struct trace_event_file *file;
1432
1433         if (tracing_is_disabled())
1434                 return -ENODEV;
1435
1436         target = kprobe_trace_selftest_target;
1437
1438         pr_info("Testing kprobe tracing: ");
1439
1440         ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
1441                                   "$stack $stack0 +0($stack)",
1442                                   create_trace_kprobe);
1443         if (WARN_ON_ONCE(ret)) {
1444                 pr_warn("error on probing function entry.\n");
1445                 warn++;
1446         } else {
1447                 /* Enable trace point */
1448                 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1449                 if (WARN_ON_ONCE(tk == NULL)) {
1450                         pr_warn("error on getting new probe.\n");
1451                         warn++;
1452                 } else {
1453                         file = find_trace_probe_file(tk, top_trace_array());
1454                         if (WARN_ON_ONCE(file == NULL)) {
1455                                 pr_warn("error on getting probe file.\n");
1456                                 warn++;
1457                         } else
1458                                 enable_trace_kprobe(tk, file);
1459                 }
1460         }
1461
1462         ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1463                                   "$retval", create_trace_kprobe);
1464         if (WARN_ON_ONCE(ret)) {
1465                 pr_warn("error on probing function return.\n");
1466                 warn++;
1467         } else {
1468                 /* Enable trace point */
1469                 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1470                 if (WARN_ON_ONCE(tk == NULL)) {
1471                         pr_warn("error on getting 2nd new probe.\n");
1472                         warn++;
1473                 } else {
1474                         file = find_trace_probe_file(tk, top_trace_array());
1475                         if (WARN_ON_ONCE(file == NULL)) {
1476                                 pr_warn("error on getting probe file.\n");
1477                                 warn++;
1478                         } else
1479                                 enable_trace_kprobe(tk, file);
1480                 }
1481         }
1482
1483         if (warn)
1484                 goto end;
1485
1486         ret = target(1, 2, 3, 4, 5, 6);
1487
1488         /*
1489          * Not expecting an error here, the check is only to prevent the
1490          * optimizer from removing the call to target() as otherwise there
1491          * are no side-effects and the call is never performed.
1492          */
1493         if (ret != 21)
1494                 warn++;
1495
1496         /* Disable trace points before removing it */
1497         tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1498         if (WARN_ON_ONCE(tk == NULL)) {
1499                 pr_warn("error on getting test probe.\n");
1500                 warn++;
1501         } else {
1502                 if (trace_kprobe_nhit(tk) != 1) {
1503                         pr_warn("incorrect number of testprobe hits\n");
1504                         warn++;
1505                 }
1506
1507                 file = find_trace_probe_file(tk, top_trace_array());
1508                 if (WARN_ON_ONCE(file == NULL)) {
1509                         pr_warn("error on getting probe file.\n");
1510                         warn++;
1511                 } else
1512                         disable_trace_kprobe(tk, file);
1513         }
1514
1515         tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1516         if (WARN_ON_ONCE(tk == NULL)) {
1517                 pr_warn("error on getting 2nd test probe.\n");
1518                 warn++;
1519         } else {
1520                 if (trace_kprobe_nhit(tk) != 1) {
1521                         pr_warn("incorrect number of testprobe2 hits\n");
1522                         warn++;
1523                 }
1524
1525                 file = find_trace_probe_file(tk, top_trace_array());
1526                 if (WARN_ON_ONCE(file == NULL)) {
1527                         pr_warn("error on getting probe file.\n");
1528                         warn++;
1529                 } else
1530                         disable_trace_kprobe(tk, file);
1531         }
1532
1533         ret = traceprobe_command("-:testprobe", create_trace_kprobe);
1534         if (WARN_ON_ONCE(ret)) {
1535                 pr_warn("error on deleting a probe.\n");
1536                 warn++;
1537         }
1538
1539         ret = traceprobe_command("-:testprobe2", create_trace_kprobe);
1540         if (WARN_ON_ONCE(ret)) {
1541                 pr_warn("error on deleting a probe.\n");
1542                 warn++;
1543         }
1544
1545 end:
1546         release_all_trace_kprobes();
1547         /*
1548          * Wait for the optimizer work to finish. Otherwise it might fiddle
1549          * with probes in already freed __init text.
1550          */
1551         wait_for_kprobe_optimizer();
1552         if (warn)
1553                 pr_cont("NG: Some tests are failed. Please check them.\n");
1554         else
1555                 pr_cont("OK\n");
1556         return 0;
1557 }
1558
1559 late_initcall(kprobe_trace_self_tests_init);
1560
1561 #endif