4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/delay.h>
20 #include "trace_output.h"
22 #define TRACE_SYSTEM "TRACE_SYSTEM"
24 DEFINE_MUTEX(event_mutex);
26 LIST_HEAD(ftrace_events);
28 int trace_define_field(struct ftrace_event_call *call, char *type,
29 char *name, int offset, int size, int is_signed)
31 struct ftrace_event_field *field;
33 field = kzalloc(sizeof(*field), GFP_KERNEL);
37 field->name = kstrdup(name, GFP_KERNEL);
41 field->type = kstrdup(type, GFP_KERNEL);
45 field->offset = offset;
47 field->is_signed = is_signed;
48 list_add(&field->link, &call->fields);
61 EXPORT_SYMBOL_GPL(trace_define_field);
65 static void trace_destroy_fields(struct ftrace_event_call *call)
67 struct ftrace_event_field *field, *next;
69 list_for_each_entry_safe(field, next, &call->fields, link) {
70 list_del(&field->link);
77 #endif /* CONFIG_MODULES */
79 static void ftrace_clear_events(void)
81 struct ftrace_event_call *call;
83 mutex_lock(&event_mutex);
84 list_for_each_entry(call, &ftrace_events, list) {
88 tracing_stop_cmdline_record();
92 mutex_unlock(&event_mutex);
95 static void ftrace_event_enable_disable(struct ftrace_event_call *call,
103 tracing_stop_cmdline_record();
108 if (!call->enabled) {
110 tracing_start_cmdline_record();
118 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
120 static int __ftrace_set_clr_event(const char *match, const char *sub,
121 const char *event, int set)
123 struct ftrace_event_call *call;
126 mutex_lock(&event_mutex);
127 list_for_each_entry(call, &ftrace_events, list) {
129 if (!call->name || !call->regfunc)
133 strcmp(match, call->name) != 0 &&
134 strcmp(match, call->system) != 0)
137 if (sub && strcmp(sub, call->system) != 0)
140 if (event && strcmp(event, call->name) != 0)
143 ftrace_event_enable_disable(call, set);
147 mutex_unlock(&event_mutex);
152 static int ftrace_set_clr_event(char *buf, int set)
154 char *event = NULL, *sub = NULL, *match;
157 * The buf format can be <subsystem>:<event-name>
158 * *:<event-name> means any event by that name.
159 * :<event-name> is the same.
161 * <subsystem>:* means all events in that subsystem
162 * <subsystem>: means the same.
164 * <name> (no ':') means all events in a subsystem with
165 * the name <name> or any event that matches <name>
168 match = strsep(&buf, ":");
174 if (!strlen(sub) || strcmp(sub, "*") == 0)
176 if (!strlen(event) || strcmp(event, "*") == 0)
180 return __ftrace_set_clr_event(match, sub, event, set);
184 * trace_set_clr_event - enable or disable an event
185 * @system: system name to match (NULL for any system)
186 * @event: event name to match (NULL for all events, within system)
187 * @set: 1 to enable, 0 to disable
189 * This is a way for other parts of the kernel to enable or disable
192 * Returns 0 on success, -EINVAL if the parameters do not match any
195 int trace_set_clr_event(const char *system, const char *event, int set)
197 return __ftrace_set_clr_event(NULL, system, event, set);
200 /* 128 should be much more than enough */
201 #define EVENT_BUF_SIZE 127
204 ftrace_event_write(struct file *file, const char __user *ubuf,
205 size_t cnt, loff_t *ppos)
216 ret = tracing_update_buffers();
220 ret = get_user(ch, ubuf++);
226 /* skip white space */
227 while (cnt && isspace(ch)) {
228 ret = get_user(ch, ubuf++);
235 /* Only white space found? */
242 buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
246 if (cnt > EVENT_BUF_SIZE)
247 cnt = EVENT_BUF_SIZE;
250 while (cnt && !isspace(ch)) {
256 ret = get_user(ch, ubuf++);
266 ret = ftrace_set_clr_event(buf, set);
279 t_next(struct seq_file *m, void *v, loff_t *pos)
281 struct list_head *list = m->private;
282 struct ftrace_event_call *call;
287 if (list == &ftrace_events)
290 call = list_entry(list, struct ftrace_event_call, list);
293 * The ftrace subsystem is for showing formats only.
294 * They can not be enabled or disabled via the event files.
302 m->private = list->next;
307 static void *t_start(struct seq_file *m, loff_t *pos)
309 mutex_lock(&event_mutex);
311 m->private = ftrace_events.next;
312 return t_next(m, NULL, pos);
316 s_next(struct seq_file *m, void *v, loff_t *pos)
318 struct list_head *list = m->private;
319 struct ftrace_event_call *call;
324 if (list == &ftrace_events)
327 call = list_entry(list, struct ftrace_event_call, list);
329 if (!call->enabled) {
334 m->private = list->next;
339 static void *s_start(struct seq_file *m, loff_t *pos)
341 mutex_lock(&event_mutex);
343 m->private = ftrace_events.next;
344 return s_next(m, NULL, pos);
347 static int t_show(struct seq_file *m, void *v)
349 struct ftrace_event_call *call = v;
351 if (strcmp(call->system, TRACE_SYSTEM) != 0)
352 seq_printf(m, "%s:", call->system);
353 seq_printf(m, "%s\n", call->name);
358 static void t_stop(struct seq_file *m, void *p)
360 mutex_unlock(&event_mutex);
364 ftrace_event_seq_open(struct inode *inode, struct file *file)
366 const struct seq_operations *seq_ops;
368 if ((file->f_mode & FMODE_WRITE) &&
369 !(file->f_flags & O_APPEND))
370 ftrace_clear_events();
372 seq_ops = inode->i_private;
373 return seq_open(file, seq_ops);
377 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
380 struct ftrace_event_call *call = filp->private_data;
388 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
392 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
395 struct ftrace_event_call *call = filp->private_data;
400 if (cnt >= sizeof(buf))
403 if (copy_from_user(&buf, ubuf, cnt))
408 ret = strict_strtoul(buf, 10, &val);
412 ret = tracing_update_buffers();
419 mutex_lock(&event_mutex);
420 ftrace_event_enable_disable(call, val);
421 mutex_unlock(&event_mutex);
434 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
437 const char set_to_char[4] = { '?', '0', '1', 'X' };
438 const char *system = filp->private_data;
439 struct ftrace_event_call *call;
444 mutex_lock(&event_mutex);
445 list_for_each_entry(call, &ftrace_events, list) {
446 if (!call->name || !call->regfunc)
449 if (system && strcmp(call->system, system) != 0)
453 * We need to find out if all the events are set
454 * or if all events or cleared, or if we have
457 set |= (1 << !!call->enabled);
460 * If we have a mixture, no need to look further.
465 mutex_unlock(&event_mutex);
467 buf[0] = set_to_char[set];
470 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
476 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
479 const char *system = filp->private_data;
484 if (cnt >= sizeof(buf))
487 if (copy_from_user(&buf, ubuf, cnt))
492 ret = strict_strtoul(buf, 10, &val);
496 ret = tracing_update_buffers();
500 if (val != 0 && val != 1)
503 ret = __ftrace_set_clr_event(NULL, system, NULL, val);
515 extern char *__bad_type_size(void);
518 #define FIELD(type, name) \
519 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
520 #type, "common_" #name, offsetof(typeof(field), name), \
523 static int trace_write_header(struct trace_seq *s)
525 struct trace_entry field;
527 /* struct trace_entry */
528 return trace_seq_printf(s,
529 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
530 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
531 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
532 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
533 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
535 FIELD(unsigned short, type),
536 FIELD(unsigned char, flags),
537 FIELD(unsigned char, preempt_count),
543 event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
546 struct ftrace_event_call *call = filp->private_data;
554 s = kmalloc(sizeof(*s), GFP_KERNEL);
560 /* If any of the first writes fail, so will the show_format. */
562 trace_seq_printf(s, "name: %s\n", call->name);
563 trace_seq_printf(s, "ID: %d\n", call->id);
564 trace_seq_printf(s, "format:\n");
565 trace_write_header(s);
567 r = call->show_format(s);
570 * ug! The format output is bigger than a PAGE!!
572 buf = "FORMAT TOO BIG\n";
573 r = simple_read_from_buffer(ubuf, cnt, ppos,
578 r = simple_read_from_buffer(ubuf, cnt, ppos,
586 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
588 struct ftrace_event_call *call = filp->private_data;
595 s = kmalloc(sizeof(*s), GFP_KERNEL);
600 trace_seq_printf(s, "%d\n", call->id);
602 r = simple_read_from_buffer(ubuf, cnt, ppos,
609 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
612 struct ftrace_event_call *call = filp->private_data;
619 s = kmalloc(sizeof(*s), GFP_KERNEL);
625 print_event_filter(call, s);
626 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
634 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
637 struct ftrace_event_call *call = filp->private_data;
641 if (cnt >= PAGE_SIZE)
644 buf = (char *)__get_free_page(GFP_TEMPORARY);
648 if (copy_from_user(buf, ubuf, cnt)) {
649 free_page((unsigned long) buf);
654 err = apply_event_filter(call, buf);
655 free_page((unsigned long) buf);
665 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
668 struct event_subsystem *system = filp->private_data;
675 s = kmalloc(sizeof(*s), GFP_KERNEL);
681 print_subsystem_event_filter(system, s);
682 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
690 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
693 struct event_subsystem *system = filp->private_data;
697 if (cnt >= PAGE_SIZE)
700 buf = (char *)__get_free_page(GFP_TEMPORARY);
704 if (copy_from_user(buf, ubuf, cnt)) {
705 free_page((unsigned long) buf);
710 err = apply_subsystem_event_filter(system, buf);
711 free_page((unsigned long) buf);
721 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
723 int (*func)(struct trace_seq *s) = filp->private_data;
730 s = kmalloc(sizeof(*s), GFP_KERNEL);
737 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
744 static const struct seq_operations show_event_seq_ops = {
751 static const struct seq_operations show_set_event_seq_ops = {
758 static const struct file_operations ftrace_avail_fops = {
759 .open = ftrace_event_seq_open,
762 .release = seq_release,
765 static const struct file_operations ftrace_set_event_fops = {
766 .open = ftrace_event_seq_open,
768 .write = ftrace_event_write,
770 .release = seq_release,
773 static const struct file_operations ftrace_enable_fops = {
774 .open = tracing_open_generic,
775 .read = event_enable_read,
776 .write = event_enable_write,
779 static const struct file_operations ftrace_event_format_fops = {
780 .open = tracing_open_generic,
781 .read = event_format_read,
784 static const struct file_operations ftrace_event_id_fops = {
785 .open = tracing_open_generic,
786 .read = event_id_read,
789 static const struct file_operations ftrace_event_filter_fops = {
790 .open = tracing_open_generic,
791 .read = event_filter_read,
792 .write = event_filter_write,
795 static const struct file_operations ftrace_subsystem_filter_fops = {
796 .open = tracing_open_generic,
797 .read = subsystem_filter_read,
798 .write = subsystem_filter_write,
801 static const struct file_operations ftrace_system_enable_fops = {
802 .open = tracing_open_generic,
803 .read = system_enable_read,
804 .write = system_enable_write,
807 static const struct file_operations ftrace_show_header_fops = {
808 .open = tracing_open_generic,
812 static struct dentry *event_trace_events_dir(void)
814 static struct dentry *d_tracer;
815 static struct dentry *d_events;
820 d_tracer = tracing_init_dentry();
824 d_events = debugfs_create_dir("events", d_tracer);
826 pr_warning("Could not create debugfs "
827 "'events' directory\n");
832 static LIST_HEAD(event_subsystems);
834 static struct dentry *
835 event_subsystem_dir(const char *name, struct dentry *d_events)
837 struct event_subsystem *system;
838 struct dentry *entry;
840 /* First see if we did not already create this dir */
841 list_for_each_entry(system, &event_subsystems, list) {
842 if (strcmp(system->name, name) == 0)
843 return system->entry;
846 /* need to create new entry */
847 system = kmalloc(sizeof(*system), GFP_KERNEL);
849 pr_warning("No memory to create event subsystem %s\n",
854 system->entry = debugfs_create_dir(name, d_events);
855 if (!system->entry) {
856 pr_warning("Could not create event subsystem %s\n",
862 system->name = kstrdup(name, GFP_KERNEL);
864 debugfs_remove(system->entry);
869 list_add(&system->list, &event_subsystems);
871 system->filter = NULL;
873 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
874 if (!system->filter) {
875 pr_warning("Could not allocate filter for subsystem "
877 return system->entry;
880 entry = debugfs_create_file("filter", 0644, system->entry, system,
881 &ftrace_subsystem_filter_fops);
883 kfree(system->filter);
884 system->filter = NULL;
885 pr_warning("Could not create debugfs "
886 "'%s/filter' entry\n", name);
889 entry = trace_create_file("enable", 0644, system->entry,
890 (void *)system->name,
891 &ftrace_system_enable_fops);
893 return system->entry;
897 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
898 const struct file_operations *id,
899 const struct file_operations *enable,
900 const struct file_operations *filter,
901 const struct file_operations *format)
903 struct dentry *entry;
907 * If the trace point header did not define TRACE_SYSTEM
908 * then the system would be called "TRACE_SYSTEM".
910 if (strcmp(call->system, TRACE_SYSTEM) != 0)
911 d_events = event_subsystem_dir(call->system, d_events);
913 if (call->raw_init) {
914 ret = call->raw_init();
916 pr_warning("Could not initialize trace point"
917 " events/%s\n", call->name);
922 call->dir = debugfs_create_dir(call->name, d_events);
924 pr_warning("Could not create debugfs "
925 "'%s' directory\n", call->name);
930 entry = trace_create_file("enable", 0644, call->dir, call,
934 entry = trace_create_file("id", 0444, call->dir, call,
937 if (call->define_fields) {
938 ret = call->define_fields();
940 pr_warning("Could not initialize trace point"
941 " events/%s\n", call->name);
944 entry = trace_create_file("filter", 0644, call->dir, call,
948 /* A trace may not want to export its format */
949 if (!call->show_format)
952 entry = trace_create_file("format", 0444, call->dir, call,
958 #define for_each_event(event, start, end) \
959 for (event = start; \
960 (unsigned long)event < (unsigned long)end; \
963 #ifdef CONFIG_MODULES
965 static LIST_HEAD(ftrace_module_file_list);
968 * Modules must own their file_operations to keep up with
969 * reference counting.
971 struct ftrace_module_file_ops {
972 struct list_head list;
974 struct file_operations id;
975 struct file_operations enable;
976 struct file_operations format;
977 struct file_operations filter;
980 static struct ftrace_module_file_ops *
981 trace_create_file_ops(struct module *mod)
983 struct ftrace_module_file_ops *file_ops;
986 * This is a bit of a PITA. To allow for correct reference
987 * counting, modules must "own" their file_operations.
988 * To do this, we allocate the file operations that will be
989 * used in the event directory.
992 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
998 file_ops->id = ftrace_event_id_fops;
999 file_ops->id.owner = mod;
1001 file_ops->enable = ftrace_enable_fops;
1002 file_ops->enable.owner = mod;
1004 file_ops->filter = ftrace_event_filter_fops;
1005 file_ops->filter.owner = mod;
1007 file_ops->format = ftrace_event_format_fops;
1008 file_ops->format.owner = mod;
1010 list_add(&file_ops->list, &ftrace_module_file_list);
1015 static void trace_module_add_events(struct module *mod)
1017 struct ftrace_module_file_ops *file_ops = NULL;
1018 struct ftrace_event_call *call, *start, *end;
1019 struct dentry *d_events;
1021 start = mod->trace_events;
1022 end = mod->trace_events + mod->num_trace_events;
1027 d_events = event_trace_events_dir();
1031 for_each_event(call, start, end) {
1032 /* The linker may leave blanks */
1037 * This module has events, create file ops for this module
1038 * if not already done.
1041 file_ops = trace_create_file_ops(mod);
1046 list_add(&call->list, &ftrace_events);
1047 event_create_dir(call, d_events,
1048 &file_ops->id, &file_ops->enable,
1049 &file_ops->filter, &file_ops->format);
1053 static void trace_module_remove_events(struct module *mod)
1055 struct ftrace_module_file_ops *file_ops;
1056 struct ftrace_event_call *call, *p;
1059 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1060 if (call->mod == mod) {
1062 if (call->enabled) {
1064 tracing_stop_cmdline_record();
1068 unregister_ftrace_event(call->event);
1069 debugfs_remove_recursive(call->dir);
1070 list_del(&call->list);
1071 trace_destroy_fields(call);
1072 destroy_preds(call);
1076 /* Now free the file_operations */
1077 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1078 if (file_ops->mod == mod)
1081 if (&file_ops->list != &ftrace_module_file_list) {
1082 list_del(&file_ops->list);
1087 * It is safest to reset the ring buffer if the module being unloaded
1088 * registered any events.
1091 tracing_reset_current_online_cpus();
1094 static int trace_module_notify(struct notifier_block *self,
1095 unsigned long val, void *data)
1097 struct module *mod = data;
1099 mutex_lock(&event_mutex);
1101 case MODULE_STATE_COMING:
1102 trace_module_add_events(mod);
1104 case MODULE_STATE_GOING:
1105 trace_module_remove_events(mod);
1108 mutex_unlock(&event_mutex);
1113 static int trace_module_notify(struct notifier_block *self,
1114 unsigned long val, void *data)
1118 #endif /* CONFIG_MODULES */
1120 struct notifier_block trace_module_nb = {
1121 .notifier_call = trace_module_notify,
1125 extern struct ftrace_event_call __start_ftrace_events[];
1126 extern struct ftrace_event_call __stop_ftrace_events[];
1128 static __init int event_trace_init(void)
1130 struct ftrace_event_call *call;
1131 struct dentry *d_tracer;
1132 struct dentry *entry;
1133 struct dentry *d_events;
1136 d_tracer = tracing_init_dentry();
1140 entry = debugfs_create_file("available_events", 0444, d_tracer,
1141 (void *)&show_event_seq_ops,
1142 &ftrace_avail_fops);
1144 pr_warning("Could not create debugfs "
1145 "'available_events' entry\n");
1147 entry = debugfs_create_file("set_event", 0644, d_tracer,
1148 (void *)&show_set_event_seq_ops,
1149 &ftrace_set_event_fops);
1151 pr_warning("Could not create debugfs "
1152 "'set_event' entry\n");
1154 d_events = event_trace_events_dir();
1158 /* ring buffer internal formats */
1159 trace_create_file("header_page", 0444, d_events,
1160 ring_buffer_print_page_header,
1161 &ftrace_show_header_fops);
1163 trace_create_file("header_event", 0444, d_events,
1164 ring_buffer_print_entry_header,
1165 &ftrace_show_header_fops);
1167 trace_create_file("enable", 0644, d_events,
1168 NULL, &ftrace_system_enable_fops);
1170 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1171 /* The linker may leave blanks */
1174 list_add(&call->list, &ftrace_events);
1175 event_create_dir(call, d_events, &ftrace_event_id_fops,
1176 &ftrace_enable_fops, &ftrace_event_filter_fops,
1177 &ftrace_event_format_fops);
1180 ret = register_module_notifier(&trace_module_nb);
1182 pr_warning("Failed to register trace events module notifier\n");
1186 fs_initcall(event_trace_init);
1188 #ifdef CONFIG_FTRACE_STARTUP_TEST
1190 static DEFINE_SPINLOCK(test_spinlock);
1191 static DEFINE_SPINLOCK(test_spinlock_irq);
1192 static DEFINE_MUTEX(test_mutex);
1194 static __init void test_work(struct work_struct *dummy)
1196 spin_lock(&test_spinlock);
1197 spin_lock_irq(&test_spinlock_irq);
1199 spin_unlock_irq(&test_spinlock_irq);
1200 spin_unlock(&test_spinlock);
1202 mutex_lock(&test_mutex);
1204 mutex_unlock(&test_mutex);
1207 static __init int event_test_thread(void *unused)
1211 test_malloc = kmalloc(1234, GFP_KERNEL);
1213 pr_info("failed to kmalloc\n");
1215 schedule_on_each_cpu(test_work);
1219 set_current_state(TASK_INTERRUPTIBLE);
1220 while (!kthread_should_stop())
1227 * Do various things that may trigger events.
1229 static __init void event_test_stuff(void)
1231 struct task_struct *test_thread;
1233 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1235 kthread_stop(test_thread);
1239 * For every trace event defined, we will test each trace point separately,
1240 * and then by groups, and finally all trace points.
1242 static __init void event_trace_self_tests(void)
1244 struct ftrace_event_call *call;
1245 struct event_subsystem *system;
1248 pr_info("Running tests on trace events:\n");
1250 list_for_each_entry(call, &ftrace_events, list) {
1252 /* Only test those that have a regfunc */
1256 pr_info("Testing event %s: ", call->name);
1259 * If an event is already enabled, someone is using
1260 * it and the self test should not be on.
1262 if (call->enabled) {
1263 pr_warning("Enabled event during self test!\n");
1269 tracing_start_cmdline_record();
1275 tracing_stop_cmdline_record();
1281 /* Now test at the sub system level */
1283 pr_info("Running tests on trace event systems:\n");
1285 list_for_each_entry(system, &event_subsystems, list) {
1287 /* the ftrace system is special, skip it */
1288 if (strcmp(system->name, "ftrace") == 0)
1291 pr_info("Testing event system %s: ", system->name);
1293 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
1294 if (WARN_ON_ONCE(ret)) {
1295 pr_warning("error enabling system %s\n",
1302 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
1303 if (WARN_ON_ONCE(ret))
1304 pr_warning("error disabling system %s\n",
1310 /* Test with all events enabled */
1312 pr_info("Running tests on all trace events:\n");
1313 pr_info("Testing all events: ");
1315 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
1316 if (WARN_ON_ONCE(ret)) {
1317 pr_warning("error enabling all events\n");
1324 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
1325 if (WARN_ON_ONCE(ret)) {
1326 pr_warning("error disabling all events\n");
1333 #ifdef CONFIG_FUNCTION_TRACER
1335 static DEFINE_PER_CPU(atomic_t, test_event_disable);
1338 function_test_events_call(unsigned long ip, unsigned long parent_ip)
1340 struct ring_buffer_event *event;
1341 struct ftrace_entry *entry;
1342 unsigned long flags;
1348 pc = preempt_count();
1349 resched = ftrace_preempt_disable();
1350 cpu = raw_smp_processor_id();
1351 disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
1356 local_save_flags(flags);
1358 event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
1362 entry = ring_buffer_event_data(event);
1364 entry->parent_ip = parent_ip;
1366 trace_nowake_buffer_unlock_commit(event, flags, pc);
1369 atomic_dec(&per_cpu(test_event_disable, cpu));
1370 ftrace_preempt_enable(resched);
1373 static struct ftrace_ops trace_ops __initdata =
1375 .func = function_test_events_call,
1378 static __init void event_trace_self_test_with_function(void)
1380 register_ftrace_function(&trace_ops);
1381 pr_info("Running tests again, along with the function tracer\n");
1382 event_trace_self_tests();
1383 unregister_ftrace_function(&trace_ops);
1386 static __init void event_trace_self_test_with_function(void)
1391 static __init int event_trace_self_tests_init(void)
1394 event_trace_self_tests();
1396 event_trace_self_test_with_function();
1401 late_initcall(event_trace_self_tests_init);