2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/tracefs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/mount.h>
36 #include <linux/rwsem.h>
37 #include <linux/slab.h>
38 #include <linux/ctype.h>
39 #include <linux/init.h>
40 #include <linux/poll.h>
41 #include <linux/nmi.h>
43 #include <linux/trace.h>
44 #include <linux/sched/rt.h>
47 #include "trace_output.h"
50 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
53 bool ring_buffer_expanded;
56 * We need to change this state when a selftest is running.
57 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
59 * insertions into the ring-buffer such as trace_printk could occurred
60 * at the same time, giving false positive or negative results.
62 static bool __read_mostly tracing_selftest_running;
65 * If a tracer is running, we do not want to run SELFTEST.
67 bool __read_mostly tracing_selftest_disabled;
69 /* Pipe tracepoints to printk */
70 struct trace_iterator *tracepoint_print_iter;
71 int tracepoint_printk;
72 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
74 /* For tracers that don't implement custom flags */
75 static struct tracer_opt dummy_tracer_opt[] = {
80 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
86 * To prevent the comm cache from being overwritten when no
87 * tracing is active, only save the comm when a trace event
90 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
93 * Kill all tracing for good (never come back).
94 * It is initialized to 1 but will turn to zero if the initialization
95 * of the tracer is successful. But that is the only place that sets
98 static int tracing_disabled = 1;
100 cpumask_var_t __read_mostly tracing_buffer_mask;
103 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
105 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
106 * is set, then ftrace_dump is called. This will output the contents
107 * of the ftrace buffers to the console. This is very useful for
108 * capturing traces that lead to crashes and outputing it to a
111 * It is default off, but you can enable it with either specifying
112 * "ftrace_dump_on_oops" in the kernel command line, or setting
113 * /proc/sys/kernel/ftrace_dump_on_oops
114 * Set 1 if you want to dump buffers of all CPUs
115 * Set 2 if you want to dump the buffer of the CPU that triggered oops
118 enum ftrace_dump_mode ftrace_dump_on_oops;
120 /* When set, tracing will stop when a WARN*() is hit */
121 int __disable_trace_on_warning;
123 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
124 /* Map of enums to their values, for "eval_map" file */
125 struct trace_eval_map_head {
127 unsigned long length;
130 union trace_eval_map_item;
132 struct trace_eval_map_tail {
134 * "end" is first and points to NULL as it must be different
135 * than "mod" or "eval_string"
137 union trace_eval_map_item *next;
138 const char *end; /* points to NULL */
141 static DEFINE_MUTEX(trace_eval_mutex);
144 * The trace_eval_maps are saved in an array with two extra elements,
145 * one at the beginning, and one at the end. The beginning item contains
146 * the count of the saved maps (head.length), and the module they
147 * belong to if not built in (head.mod). The ending item contains a
148 * pointer to the next array of saved eval_map items.
150 union trace_eval_map_item {
151 struct trace_eval_map map;
152 struct trace_eval_map_head head;
153 struct trace_eval_map_tail tail;
156 static union trace_eval_map_item *trace_eval_maps;
157 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
159 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
161 #define MAX_TRACER_SIZE 100
162 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
163 static char *default_bootup_tracer;
165 static bool allocate_snapshot;
167 static int __init set_cmdline_ftrace(char *str)
169 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
170 default_bootup_tracer = bootup_tracer_buf;
171 /* We are using ftrace early, expand it */
172 ring_buffer_expanded = true;
175 __setup("ftrace=", set_cmdline_ftrace);
177 static int __init set_ftrace_dump_on_oops(char *str)
179 if (*str++ != '=' || !*str) {
180 ftrace_dump_on_oops = DUMP_ALL;
184 if (!strcmp("orig_cpu", str)) {
185 ftrace_dump_on_oops = DUMP_ORIG;
191 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
193 static int __init stop_trace_on_warning(char *str)
195 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
196 __disable_trace_on_warning = 1;
199 __setup("traceoff_on_warning", stop_trace_on_warning);
201 static int __init boot_alloc_snapshot(char *str)
203 allocate_snapshot = true;
204 /* We also need the main ring buffer expanded */
205 ring_buffer_expanded = true;
208 __setup("alloc_snapshot", boot_alloc_snapshot);
211 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
213 static int __init set_trace_boot_options(char *str)
215 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
218 __setup("trace_options=", set_trace_boot_options);
220 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
221 static char *trace_boot_clock __initdata;
223 static int __init set_trace_boot_clock(char *str)
225 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
226 trace_boot_clock = trace_boot_clock_buf;
229 __setup("trace_clock=", set_trace_boot_clock);
231 static int __init set_tracepoint_printk(char *str)
233 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
234 tracepoint_printk = 1;
237 __setup("tp_printk", set_tracepoint_printk);
239 unsigned long long ns2usecs(u64 nsec)
246 /* trace_flags holds trace_options default values */
247 #define TRACE_DEFAULT_FLAGS \
248 (FUNCTION_DEFAULT_FLAGS | \
249 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
250 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
251 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
252 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
254 /* trace_options that are only supported by global_trace */
255 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
256 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
258 /* trace_flags that are default zero for instances */
259 #define ZEROED_TRACE_FLAGS \
260 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
263 * The global_trace is the descriptor that holds the top-level tracing
264 * buffers for the live tracing.
266 static struct trace_array global_trace = {
267 .trace_flags = TRACE_DEFAULT_FLAGS,
270 LIST_HEAD(ftrace_trace_arrays);
272 int trace_array_get(struct trace_array *this_tr)
274 struct trace_array *tr;
277 mutex_lock(&trace_types_lock);
278 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
285 mutex_unlock(&trace_types_lock);
290 static void __trace_array_put(struct trace_array *this_tr)
292 WARN_ON(!this_tr->ref);
296 void trace_array_put(struct trace_array *this_tr)
298 mutex_lock(&trace_types_lock);
299 __trace_array_put(this_tr);
300 mutex_unlock(&trace_types_lock);
303 int call_filter_check_discard(struct trace_event_call *call, void *rec,
304 struct ring_buffer *buffer,
305 struct ring_buffer_event *event)
307 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
308 !filter_match_preds(call->filter, rec)) {
309 __trace_event_discard_commit(buffer, event);
316 void trace_free_pid_list(struct trace_pid_list *pid_list)
318 vfree(pid_list->pids);
323 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
324 * @filtered_pids: The list of pids to check
325 * @search_pid: The PID to find in @filtered_pids
327 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
330 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
333 * If pid_max changed after filtered_pids was created, we
334 * by default ignore all pids greater than the previous pid_max.
336 if (search_pid >= filtered_pids->pid_max)
339 return test_bit(search_pid, filtered_pids->pids);
343 * trace_ignore_this_task - should a task be ignored for tracing
344 * @filtered_pids: The list of pids to check
345 * @task: The task that should be ignored if not filtered
347 * Checks if @task should be traced or not from @filtered_pids.
348 * Returns true if @task should *NOT* be traced.
349 * Returns false if @task should be traced.
352 trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
355 * Return false, because if filtered_pids does not exist,
356 * all pids are good to trace.
361 return !trace_find_filtered_pid(filtered_pids, task->pid);
365 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove
370 * If adding a task, if @self is defined, the task is only added if @self
371 * is also included in @pid_list. This happens on fork and tasks should
372 * only be added when the parent is listed. If @self is NULL, then the
373 * @task pid will be removed from the list, which would happen on exit
376 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
377 struct task_struct *self,
378 struct task_struct *task)
383 /* For forks, we only add if the forking task is listed */
385 if (!trace_find_filtered_pid(pid_list, self->pid))
389 /* Sorry, but we don't support pid_max changing after setting */
390 if (task->pid >= pid_list->pid_max)
393 /* "self" is set for forks, and NULL for exits */
395 set_bit(task->pid, pid_list->pids);
397 clear_bit(task->pid, pid_list->pids);
401 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
402 * @pid_list: The pid list to show
403 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
404 * @pos: The position of the file
406 * This is used by the seq_file "next" operation to iterate the pids
407 * listed in a trace_pid_list structure.
409 * Returns the pid+1 as we want to display pid of zero, but NULL would
410 * stop the iteration.
412 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
414 unsigned long pid = (unsigned long)v;
418 /* pid already is +1 of the actual prevous bit */
419 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
421 /* Return pid + 1 to allow zero to be represented */
422 if (pid < pid_list->pid_max)
423 return (void *)(pid + 1);
429 * trace_pid_start - Used for seq_file to start reading pid lists
430 * @pid_list: The pid list to show
431 * @pos: The position of the file
433 * This is used by seq_file "start" operation to start the iteration
436 * Returns the pid+1 as we want to display pid of zero, but NULL would
437 * stop the iteration.
439 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
444 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
445 if (pid >= pid_list->pid_max)
448 /* Return pid + 1 so that zero can be the exit value */
449 for (pid++; pid && l < *pos;
450 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
456 * trace_pid_show - show the current pid in seq_file processing
457 * @m: The seq_file structure to write into
458 * @v: A void pointer of the pid (+1) value to display
460 * Can be directly used by seq_file operations to display the current
463 int trace_pid_show(struct seq_file *m, void *v)
465 unsigned long pid = (unsigned long)v - 1;
467 seq_printf(m, "%lu\n", pid);
471 /* 128 should be much more than enough */
472 #define PID_BUF_SIZE 127
474 int trace_pid_write(struct trace_pid_list *filtered_pids,
475 struct trace_pid_list **new_pid_list,
476 const char __user *ubuf, size_t cnt)
478 struct trace_pid_list *pid_list;
479 struct trace_parser parser;
487 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
491 * Always recreate a new array. The write is an all or nothing
492 * operation. Always create a new array when adding new pids by
493 * the user. If the operation fails, then the current list is
496 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
500 pid_list->pid_max = READ_ONCE(pid_max);
502 /* Only truncating will shrink pid_max */
503 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
504 pid_list->pid_max = filtered_pids->pid_max;
506 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
507 if (!pid_list->pids) {
513 /* copy the current bits to the new max */
514 for_each_set_bit(pid, filtered_pids->pids,
515 filtered_pids->pid_max) {
516 set_bit(pid, pid_list->pids);
525 ret = trace_get_user(&parser, ubuf, cnt, &pos);
526 if (ret < 0 || !trace_parser_loaded(&parser))
533 parser.buffer[parser.idx] = 0;
536 if (kstrtoul(parser.buffer, 0, &val))
538 if (val >= pid_list->pid_max)
543 set_bit(pid, pid_list->pids);
546 trace_parser_clear(&parser);
549 trace_parser_put(&parser);
552 trace_free_pid_list(pid_list);
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list);
563 *new_pid_list = pid_list;
568 static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
572 /* Early boot up does not have a buffer yet */
574 return trace_clock_local();
576 ts = ring_buffer_time_stamp(buf->buffer, cpu);
577 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
582 u64 ftrace_now(int cpu)
584 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
588 * tracing_is_enabled - Show if global_trace has been disabled
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
596 int tracing_is_enabled(void)
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
604 return !global_trace.buffer_disabled;
608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
617 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
619 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
621 /* trace_types holds a link list of available tracers. */
622 static struct tracer *trace_types __read_mostly;
625 * trace_types_lock is used to protect the trace_types list.
627 DEFINE_MUTEX(trace_types_lock);
630 * serialize the access of the ring buffer
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
644 * These primitives allow multi process access to different cpu ring buffer
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
652 static DECLARE_RWSEM(all_cpu_access_lock);
653 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
655 static inline void trace_access_lock(int cpu)
657 if (cpu == RING_BUFFER_ALL_CPUS) {
658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock);
661 /* gain it for accessing a cpu ring buffer. */
663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
664 down_read(&all_cpu_access_lock);
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock, cpu));
671 static inline void trace_access_unlock(int cpu)
673 if (cpu == RING_BUFFER_ALL_CPUS) {
674 up_write(&all_cpu_access_lock);
676 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677 up_read(&all_cpu_access_lock);
681 static inline void trace_access_lock_init(void)
685 for_each_possible_cpu(cpu)
686 mutex_init(&per_cpu(cpu_access_lock, cpu));
691 static DEFINE_MUTEX(access_lock);
693 static inline void trace_access_lock(int cpu)
696 mutex_lock(&access_lock);
699 static inline void trace_access_unlock(int cpu)
702 mutex_unlock(&access_lock);
705 static inline void trace_access_lock_init(void)
711 #ifdef CONFIG_STACKTRACE
712 static void __ftrace_trace_stack(struct ring_buffer *buffer,
714 int skip, int pc, struct pt_regs *regs);
715 static inline void ftrace_trace_stack(struct trace_array *tr,
716 struct ring_buffer *buffer,
718 int skip, int pc, struct pt_regs *regs);
721 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
723 int skip, int pc, struct pt_regs *regs)
726 static inline void ftrace_trace_stack(struct trace_array *tr,
727 struct ring_buffer *buffer,
729 int skip, int pc, struct pt_regs *regs)
735 static __always_inline void
736 trace_event_setup(struct ring_buffer_event *event,
737 int type, unsigned long flags, int pc)
739 struct trace_entry *ent = ring_buffer_event_data(event);
741 tracing_generic_entry_update(ent, flags, pc);
745 static __always_inline struct ring_buffer_event *
746 __trace_buffer_lock_reserve(struct ring_buffer *buffer,
749 unsigned long flags, int pc)
751 struct ring_buffer_event *event;
753 event = ring_buffer_lock_reserve(buffer, len);
755 trace_event_setup(event, type, flags, pc);
760 void tracer_tracing_on(struct trace_array *tr)
762 if (tr->trace_buffer.buffer)
763 ring_buffer_record_on(tr->trace_buffer.buffer);
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
772 tr->buffer_disabled = 0;
773 /* Make the flag seen by readers */
778 * tracing_on - enable tracing buffers
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
783 void tracing_on(void)
785 tracer_tracing_on(&global_trace);
787 EXPORT_SYMBOL_GPL(tracing_on);
790 static __always_inline void
791 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
793 __this_cpu_write(trace_taskinfo_save, true);
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event) == event) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt);
802 ring_buffer_unlock_commit(buffer, event);
806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
811 int __trace_puts(unsigned long ip, const char *str, int size)
813 struct ring_buffer_event *event;
814 struct ring_buffer *buffer;
815 struct print_entry *entry;
816 unsigned long irq_flags;
820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
823 pc = preempt_count();
825 if (unlikely(tracing_selftest_running || tracing_disabled))
828 alloc = sizeof(*entry) + size + 2; /* possible \n added */
830 local_save_flags(irq_flags);
831 buffer = global_trace.trace_buffer.buffer;
832 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
837 entry = ring_buffer_event_data(event);
840 memcpy(&entry->buf, str, size);
842 /* Add a newline if necessary */
843 if (entry->buf[size - 1] != '\n') {
844 entry->buf[size] = '\n';
845 entry->buf[size + 1] = '\0';
847 entry->buf[size] = '\0';
849 __buffer_unlock_commit(buffer, event);
850 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
854 EXPORT_SYMBOL_GPL(__trace_puts);
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
861 int __trace_bputs(unsigned long ip, const char *str)
863 struct ring_buffer_event *event;
864 struct ring_buffer *buffer;
865 struct bputs_entry *entry;
866 unsigned long irq_flags;
867 int size = sizeof(struct bputs_entry);
870 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
873 pc = preempt_count();
875 if (unlikely(tracing_selftest_running || tracing_disabled))
878 local_save_flags(irq_flags);
879 buffer = global_trace.trace_buffer.buffer;
880 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
885 entry = ring_buffer_event_data(event);
889 __buffer_unlock_commit(buffer, event);
890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
894 EXPORT_SYMBOL_GPL(__trace_bputs);
896 #ifdef CONFIG_TRACER_SNAPSHOT
897 static void tracing_snapshot_instance(struct trace_array *tr)
899 struct tracer *tracer = tr->current_trace;
903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904 internal_trace_puts("*** snapshot is being ignored ***\n");
908 if (!tr->allocated_snapshot) {
909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910 internal_trace_puts("*** stopping trace here! ***\n");
915 /* Note, snapshot can not be used when the tracer uses it */
916 if (tracer->use_max_tr) {
917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
922 local_irq_save(flags);
923 update_max_tr(tr, current, smp_processor_id());
924 local_irq_restore(flags);
928 * trace_snapshot - take a snapshot of the current buffer.
930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live
932 * trace when some condition is triggered, but continue to trace.
934 * Note, make sure to allocate the snapshot with either
935 * a tracing_snapshot_alloc(), or by doing it manually
936 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
938 * If the snapshot buffer is not allocated, it will stop tracing.
939 * Basically making a permanent snapshot.
941 void tracing_snapshot(void)
943 struct trace_array *tr = &global_trace;
945 tracing_snapshot_instance(tr);
947 EXPORT_SYMBOL_GPL(tracing_snapshot);
949 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
950 struct trace_buffer *size_buf, int cpu_id);
951 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
953 static int alloc_snapshot(struct trace_array *tr)
957 if (!tr->allocated_snapshot) {
959 /* allocate spare buffer */
960 ret = resize_buffer_duplicate_size(&tr->max_buffer,
961 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
965 tr->allocated_snapshot = true;
971 static void free_snapshot(struct trace_array *tr)
974 * We don't free the ring buffer. instead, resize it because
975 * The max_tr ring buffer has some state (e.g. ring->clock) and
976 * we want preserve it.
978 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
979 set_buffer_entries(&tr->max_buffer, 1);
980 tracing_reset_online_cpus(&tr->max_buffer);
981 tr->allocated_snapshot = false;
985 * tracing_alloc_snapshot - allocate snapshot buffer.
987 * This only allocates the snapshot buffer if it isn't already
988 * allocated - it doesn't also take a snapshot.
990 * This is meant to be used in cases where the snapshot buffer needs
991 * to be set up for events that can't sleep but need to be able to
992 * trigger a snapshot.
994 int tracing_alloc_snapshot(void)
996 struct trace_array *tr = &global_trace;
999 ret = alloc_snapshot(tr);
1004 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1007 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
1009 * This is similar to trace_snapshot(), but it will allocate the
1010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep.
1013 * This causes a swap between the snapshot buffer and the current live
1014 * tracing buffer. You can use this to take snapshots of the live
1015 * trace when some condition is triggered, but continue to trace.
1017 void tracing_snapshot_alloc(void)
1021 ret = tracing_alloc_snapshot();
1027 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1029 void tracing_snapshot(void)
1031 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1033 EXPORT_SYMBOL_GPL(tracing_snapshot);
1034 int tracing_alloc_snapshot(void)
1036 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1039 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1040 void tracing_snapshot_alloc(void)
1045 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1046 #endif /* CONFIG_TRACER_SNAPSHOT */
1048 void tracer_tracing_off(struct trace_array *tr)
1050 if (tr->trace_buffer.buffer)
1051 ring_buffer_record_off(tr->trace_buffer.buffer);
1053 * This flag is looked at when buffers haven't been allocated
1054 * yet, or by some tracers (like irqsoff), that just want to
1055 * know if the ring buffer has been disabled, but it can handle
1056 * races of where it gets disabled but we still do a record.
1057 * As the check is in the fast path of the tracers, it is more
1058 * important to be fast than accurate.
1060 tr->buffer_disabled = 1;
1061 /* Make the flag seen by readers */
1066 * tracing_off - turn off tracing buffers
1068 * This function stops the tracing buffers from recording data.
1069 * It does not disable any overhead the tracers themselves may
1070 * be causing. This function simply causes all recording to
1071 * the ring buffers to fail.
1073 void tracing_off(void)
1075 tracer_tracing_off(&global_trace);
1077 EXPORT_SYMBOL_GPL(tracing_off);
1079 void disable_trace_on_warning(void)
1081 if (__disable_trace_on_warning)
1086 * tracer_tracing_is_on - show real state of ring buffer enabled
1087 * @tr : the trace array to know if ring buffer is enabled
1089 * Shows real state of the ring buffer if it is enabled or not.
1091 int tracer_tracing_is_on(struct trace_array *tr)
1093 if (tr->trace_buffer.buffer)
1094 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1095 return !tr->buffer_disabled;
1099 * tracing_is_on - show state of ring buffers enabled
1101 int tracing_is_on(void)
1103 return tracer_tracing_is_on(&global_trace);
1105 EXPORT_SYMBOL_GPL(tracing_is_on);
1107 static int __init set_buf_size(char *str)
1109 unsigned long buf_size;
1113 buf_size = memparse(str, &str);
1114 /* nr_entries can not be zero */
1117 trace_buf_size = buf_size;
1120 __setup("trace_buf_size=", set_buf_size);
1122 static int __init set_tracing_thresh(char *str)
1124 unsigned long threshold;
1129 ret = kstrtoul(str, 0, &threshold);
1132 tracing_thresh = threshold * 1000;
1135 __setup("tracing_thresh=", set_tracing_thresh);
1137 unsigned long nsecs_to_usecs(unsigned long nsecs)
1139 return nsecs / 1000;
1143 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1144 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1145 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1146 * of strings in the order that the evals (enum) were defined.
1151 /* These must match the bit postions in trace_iterator_flags */
1152 static const char *trace_options[] = {
1160 int in_ns; /* is this clock in nanoseconds? */
1161 } trace_clocks[] = {
1162 { trace_clock_local, "local", 1 },
1163 { trace_clock_global, "global", 1 },
1164 { trace_clock_counter, "counter", 0 },
1165 { trace_clock_jiffies, "uptime", 0 },
1166 { trace_clock, "perf", 1 },
1167 { ktime_get_mono_fast_ns, "mono", 1 },
1168 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1169 { ktime_get_boot_fast_ns, "boot", 1 },
1174 * trace_parser_get_init - gets the buffer for trace parser
1176 int trace_parser_get_init(struct trace_parser *parser, int size)
1178 memset(parser, 0, sizeof(*parser));
1180 parser->buffer = kmalloc(size, GFP_KERNEL);
1181 if (!parser->buffer)
1184 parser->size = size;
1189 * trace_parser_put - frees the buffer for trace parser
1191 void trace_parser_put(struct trace_parser *parser)
1193 kfree(parser->buffer);
1194 parser->buffer = NULL;
1198 * trace_get_user - reads the user input string separated by space
1199 * (matched by isspace(ch))
1201 * For each string found the 'struct trace_parser' is updated,
1202 * and the function returns.
1204 * Returns number of bytes read.
1206 * See kernel/trace/trace.h for 'struct trace_parser' details.
1208 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1209 size_t cnt, loff_t *ppos)
1216 trace_parser_clear(parser);
1218 ret = get_user(ch, ubuf++);
1226 * The parser is not finished with the last write,
1227 * continue reading the user input without skipping spaces.
1229 if (!parser->cont) {
1230 /* skip white space */
1231 while (cnt && isspace(ch)) {
1232 ret = get_user(ch, ubuf++);
1239 /* only spaces were written */
1249 /* read the non-space input */
1250 while (cnt && !isspace(ch)) {
1251 if (parser->idx < parser->size - 1)
1252 parser->buffer[parser->idx++] = ch;
1257 ret = get_user(ch, ubuf++);
1264 /* We either got finished input or we have to wait for another call. */
1266 parser->buffer[parser->idx] = 0;
1267 parser->cont = false;
1268 } else if (parser->idx < parser->size - 1) {
1269 parser->cont = true;
1270 parser->buffer[parser->idx++] = ch;
1283 /* TODO add a seq_buf_to_buffer() */
1284 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1288 if (trace_seq_used(s) <= s->seq.readpos)
1291 len = trace_seq_used(s) - s->seq.readpos;
1294 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1296 s->seq.readpos += cnt;
1300 unsigned long __read_mostly tracing_thresh;
1302 #ifdef CONFIG_TRACER_MAX_TRACE
1304 * Copy the new maximum trace into the separate maximum-trace
1305 * structure. (this way the maximum trace is permanently saved,
1306 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1309 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1311 struct trace_buffer *trace_buf = &tr->trace_buffer;
1312 struct trace_buffer *max_buf = &tr->max_buffer;
1313 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1314 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1317 max_buf->time_start = data->preempt_timestamp;
1319 max_data->saved_latency = tr->max_latency;
1320 max_data->critical_start = data->critical_start;
1321 max_data->critical_end = data->critical_end;
1323 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1324 max_data->pid = tsk->pid;
1326 * If tsk == current, then use current_uid(), as that does not use
1327 * RCU. The irq tracer can be called out of RCU scope.
1330 max_data->uid = current_uid();
1332 max_data->uid = task_uid(tsk);
1334 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1335 max_data->policy = tsk->policy;
1336 max_data->rt_priority = tsk->rt_priority;
1338 /* record this tasks comm */
1339 tracing_record_cmdline(tsk);
1343 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1345 * @tsk: the task with the latency
1346 * @cpu: The cpu that initiated the trace.
1348 * Flip the buffers between the @tr and the max_tr and record information
1349 * about which task was the cause of this latency.
1352 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1354 struct ring_buffer *buf;
1359 WARN_ON_ONCE(!irqs_disabled());
1361 if (!tr->allocated_snapshot) {
1362 /* Only the nop tracer should hit this when disabling */
1363 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1367 arch_spin_lock(&tr->max_lock);
1369 buf = tr->trace_buffer.buffer;
1370 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1371 tr->max_buffer.buffer = buf;
1373 __update_max_tr(tr, tsk, cpu);
1374 arch_spin_unlock(&tr->max_lock);
1378 * update_max_tr_single - only copy one trace over, and reset the rest
1380 * @tsk - task with the latency
1381 * @cpu - the cpu of the buffer to copy.
1383 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1386 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1393 WARN_ON_ONCE(!irqs_disabled());
1394 if (!tr->allocated_snapshot) {
1395 /* Only the nop tracer should hit this when disabling */
1396 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1400 arch_spin_lock(&tr->max_lock);
1402 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1404 if (ret == -EBUSY) {
1406 * We failed to swap the buffer due to a commit taking
1407 * place on this CPU. We fail to record, but we reset
1408 * the max trace buffer (no one writes directly to it)
1409 * and flag that it failed.
1411 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1412 "Failed to swap buffers due to commit in progress\n");
1415 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1417 __update_max_tr(tr, tsk, cpu);
1418 arch_spin_unlock(&tr->max_lock);
1420 #endif /* CONFIG_TRACER_MAX_TRACE */
1422 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1424 /* Iterators are static, they should be filled or empty */
1425 if (trace_buffer_iter(iter, iter->cpu_file))
1428 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1432 #ifdef CONFIG_FTRACE_STARTUP_TEST
1433 static bool selftests_can_run;
1435 struct trace_selftests {
1436 struct list_head list;
1437 struct tracer *type;
1440 static LIST_HEAD(postponed_selftests);
1442 static int save_selftest(struct tracer *type)
1444 struct trace_selftests *selftest;
1446 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1450 selftest->type = type;
1451 list_add(&selftest->list, &postponed_selftests);
1455 static int run_tracer_selftest(struct tracer *type)
1457 struct trace_array *tr = &global_trace;
1458 struct tracer *saved_tracer = tr->current_trace;
1461 if (!type->selftest || tracing_selftest_disabled)
1465 * If a tracer registers early in boot up (before scheduling is
1466 * initialized and such), then do not run its selftests yet.
1467 * Instead, run it a little later in the boot process.
1469 if (!selftests_can_run)
1470 return save_selftest(type);
1473 * Run a selftest on this tracer.
1474 * Here we reset the trace buffer, and set the current
1475 * tracer to be this tracer. The tracer can then run some
1476 * internal tracing to verify that everything is in order.
1477 * If we fail, we do not register this tracer.
1479 tracing_reset_online_cpus(&tr->trace_buffer);
1481 tr->current_trace = type;
1483 #ifdef CONFIG_TRACER_MAX_TRACE
1484 if (type->use_max_tr) {
1485 /* If we expanded the buffers, make sure the max is expanded too */
1486 if (ring_buffer_expanded)
1487 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1488 RING_BUFFER_ALL_CPUS);
1489 tr->allocated_snapshot = true;
1493 /* the test is responsible for initializing and enabling */
1494 pr_info("Testing tracer %s: ", type->name);
1495 ret = type->selftest(type, tr);
1496 /* the test is responsible for resetting too */
1497 tr->current_trace = saved_tracer;
1499 printk(KERN_CONT "FAILED!\n");
1500 /* Add the warning after printing 'FAILED' */
1504 /* Only reset on passing, to avoid touching corrupted buffers */
1505 tracing_reset_online_cpus(&tr->trace_buffer);
1507 #ifdef CONFIG_TRACER_MAX_TRACE
1508 if (type->use_max_tr) {
1509 tr->allocated_snapshot = false;
1511 /* Shrink the max buffer again */
1512 if (ring_buffer_expanded)
1513 ring_buffer_resize(tr->max_buffer.buffer, 1,
1514 RING_BUFFER_ALL_CPUS);
1518 printk(KERN_CONT "PASSED\n");
1522 static __init int init_trace_selftests(void)
1524 struct trace_selftests *p, *n;
1525 struct tracer *t, **last;
1528 selftests_can_run = true;
1530 mutex_lock(&trace_types_lock);
1532 if (list_empty(&postponed_selftests))
1535 pr_info("Running postponed tracer tests:\n");
1537 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1538 ret = run_tracer_selftest(p->type);
1539 /* If the test fails, then warn and remove from available_tracers */
1541 WARN(1, "tracer: %s failed selftest, disabling\n",
1543 last = &trace_types;
1544 for (t = trace_types; t; t = t->next) {
1557 mutex_unlock(&trace_types_lock);
1561 core_initcall(init_trace_selftests);
1563 static inline int run_tracer_selftest(struct tracer *type)
1567 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1569 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1571 static void __init apply_trace_boot_options(void);
1574 * register_tracer - register a tracer with the ftrace system.
1575 * @type - the plugin for the tracer
1577 * Register a new plugin tracer.
1579 int __init register_tracer(struct tracer *type)
1585 pr_info("Tracer must have a name\n");
1589 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1590 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1594 mutex_lock(&trace_types_lock);
1596 tracing_selftest_running = true;
1598 for (t = trace_types; t; t = t->next) {
1599 if (strcmp(type->name, t->name) == 0) {
1601 pr_info("Tracer %s already registered\n",
1608 if (!type->set_flag)
1609 type->set_flag = &dummy_set_flag;
1611 /*allocate a dummy tracer_flags*/
1612 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1617 type->flags->val = 0;
1618 type->flags->opts = dummy_tracer_opt;
1620 if (!type->flags->opts)
1621 type->flags->opts = dummy_tracer_opt;
1623 /* store the tracer for __set_tracer_option */
1624 type->flags->trace = type;
1626 ret = run_tracer_selftest(type);
1630 type->next = trace_types;
1632 add_tracer_options(&global_trace, type);
1635 tracing_selftest_running = false;
1636 mutex_unlock(&trace_types_lock);
1638 if (ret || !default_bootup_tracer)
1641 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1644 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1645 /* Do we want this tracer to start on bootup? */
1646 tracing_set_tracer(&global_trace, type->name);
1647 default_bootup_tracer = NULL;
1649 apply_trace_boot_options();
1651 /* disable other selftests, since this will break it. */
1652 tracing_selftest_disabled = true;
1653 #ifdef CONFIG_FTRACE_STARTUP_TEST
1654 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1662 void tracing_reset(struct trace_buffer *buf, int cpu)
1664 struct ring_buffer *buffer = buf->buffer;
1669 ring_buffer_record_disable(buffer);
1671 /* Make sure all commits have finished */
1672 synchronize_sched();
1673 ring_buffer_reset_cpu(buffer, cpu);
1675 ring_buffer_record_enable(buffer);
1678 void tracing_reset_online_cpus(struct trace_buffer *buf)
1680 struct ring_buffer *buffer = buf->buffer;
1686 ring_buffer_record_disable(buffer);
1688 /* Make sure all commits have finished */
1689 synchronize_sched();
1691 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1693 for_each_online_cpu(cpu)
1694 ring_buffer_reset_cpu(buffer, cpu);
1696 ring_buffer_record_enable(buffer);
1699 /* Must have trace_types_lock held */
1700 void tracing_reset_all_online_cpus(void)
1702 struct trace_array *tr;
1704 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1705 tracing_reset_online_cpus(&tr->trace_buffer);
1706 #ifdef CONFIG_TRACER_MAX_TRACE
1707 tracing_reset_online_cpus(&tr->max_buffer);
1712 static int *tgid_map;
1714 #define SAVED_CMDLINES_DEFAULT 128
1715 #define NO_CMDLINE_MAP UINT_MAX
1716 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1717 struct saved_cmdlines_buffer {
1718 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1719 unsigned *map_cmdline_to_pid;
1720 unsigned cmdline_num;
1722 char *saved_cmdlines;
1724 static struct saved_cmdlines_buffer *savedcmd;
1726 /* temporary disable recording */
1727 static atomic_t trace_record_taskinfo_disabled __read_mostly;
1729 static inline char *get_saved_cmdlines(int idx)
1731 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1734 static inline void set_cmdline(int idx, const char *cmdline)
1736 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1739 static int allocate_cmdlines_buffer(unsigned int val,
1740 struct saved_cmdlines_buffer *s)
1742 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1744 if (!s->map_cmdline_to_pid)
1747 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1748 if (!s->saved_cmdlines) {
1749 kfree(s->map_cmdline_to_pid);
1754 s->cmdline_num = val;
1755 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1756 sizeof(s->map_pid_to_cmdline));
1757 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1758 val * sizeof(*s->map_cmdline_to_pid));
1763 static int trace_create_savedcmd(void)
1767 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1771 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1781 int is_tracing_stopped(void)
1783 return global_trace.stop_count;
1787 * tracing_start - quick start of the tracer
1789 * If tracing is enabled but was stopped by tracing_stop,
1790 * this will start the tracer back up.
1792 void tracing_start(void)
1794 struct ring_buffer *buffer;
1795 unsigned long flags;
1797 if (tracing_disabled)
1800 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1801 if (--global_trace.stop_count) {
1802 if (global_trace.stop_count < 0) {
1803 /* Someone screwed up their debugging */
1805 global_trace.stop_count = 0;
1810 /* Prevent the buffers from switching */
1811 arch_spin_lock(&global_trace.max_lock);
1813 buffer = global_trace.trace_buffer.buffer;
1815 ring_buffer_record_enable(buffer);
1817 #ifdef CONFIG_TRACER_MAX_TRACE
1818 buffer = global_trace.max_buffer.buffer;
1820 ring_buffer_record_enable(buffer);
1823 arch_spin_unlock(&global_trace.max_lock);
1826 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1829 static void tracing_start_tr(struct trace_array *tr)
1831 struct ring_buffer *buffer;
1832 unsigned long flags;
1834 if (tracing_disabled)
1837 /* If global, we need to also start the max tracer */
1838 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1839 return tracing_start();
1841 raw_spin_lock_irqsave(&tr->start_lock, flags);
1843 if (--tr->stop_count) {
1844 if (tr->stop_count < 0) {
1845 /* Someone screwed up their debugging */
1852 buffer = tr->trace_buffer.buffer;
1854 ring_buffer_record_enable(buffer);
1857 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1861 * tracing_stop - quick stop of the tracer
1863 * Light weight way to stop tracing. Use in conjunction with
1866 void tracing_stop(void)
1868 struct ring_buffer *buffer;
1869 unsigned long flags;
1871 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1872 if (global_trace.stop_count++)
1875 /* Prevent the buffers from switching */
1876 arch_spin_lock(&global_trace.max_lock);
1878 buffer = global_trace.trace_buffer.buffer;
1880 ring_buffer_record_disable(buffer);
1882 #ifdef CONFIG_TRACER_MAX_TRACE
1883 buffer = global_trace.max_buffer.buffer;
1885 ring_buffer_record_disable(buffer);
1888 arch_spin_unlock(&global_trace.max_lock);
1891 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1894 static void tracing_stop_tr(struct trace_array *tr)
1896 struct ring_buffer *buffer;
1897 unsigned long flags;
1899 /* If global, we need to also stop the max tracer */
1900 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1901 return tracing_stop();
1903 raw_spin_lock_irqsave(&tr->start_lock, flags);
1904 if (tr->stop_count++)
1907 buffer = tr->trace_buffer.buffer;
1909 ring_buffer_record_disable(buffer);
1912 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1915 static int trace_save_cmdline(struct task_struct *tsk)
1919 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1923 * It's not the end of the world if we don't get
1924 * the lock, but we also don't want to spin
1925 * nor do we want to disable interrupts,
1926 * so if we miss here, then better luck next time.
1928 if (!arch_spin_trylock(&trace_cmdline_lock))
1931 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1932 if (idx == NO_CMDLINE_MAP) {
1933 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1936 * Check whether the cmdline buffer at idx has a pid
1937 * mapped. We are going to overwrite that entry so we
1938 * need to clear the map_pid_to_cmdline. Otherwise we
1939 * would read the new comm for the old pid.
1941 pid = savedcmd->map_cmdline_to_pid[idx];
1942 if (pid != NO_CMDLINE_MAP)
1943 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1945 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1946 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1948 savedcmd->cmdline_idx = idx;
1951 set_cmdline(idx, tsk->comm);
1953 arch_spin_unlock(&trace_cmdline_lock);
1958 static void __trace_find_cmdline(int pid, char comm[])
1963 strcpy(comm, "<idle>");
1967 if (WARN_ON_ONCE(pid < 0)) {
1968 strcpy(comm, "<XXX>");
1972 if (pid > PID_MAX_DEFAULT) {
1973 strcpy(comm, "<...>");
1977 map = savedcmd->map_pid_to_cmdline[pid];
1978 if (map != NO_CMDLINE_MAP)
1979 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
1981 strcpy(comm, "<...>");
1984 void trace_find_cmdline(int pid, char comm[])
1987 arch_spin_lock(&trace_cmdline_lock);
1989 __trace_find_cmdline(pid, comm);
1991 arch_spin_unlock(&trace_cmdline_lock);
1995 int trace_find_tgid(int pid)
1997 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2000 return tgid_map[pid];
2003 static int trace_save_tgid(struct task_struct *tsk)
2005 if (unlikely(!tgid_map || !tsk->pid || tsk->pid > PID_MAX_DEFAULT))
2008 tgid_map[tsk->pid] = tsk->tgid;
2012 static bool tracing_record_taskinfo_skip(int flags)
2014 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2016 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2018 if (!__this_cpu_read(trace_taskinfo_save))
2024 * tracing_record_taskinfo - record the task info of a task
2026 * @task - task to record
2027 * @flags - TRACE_RECORD_CMDLINE for recording comm
2028 * - TRACE_RECORD_TGID for recording tgid
2030 void tracing_record_taskinfo(struct task_struct *task, int flags)
2032 if (tracing_record_taskinfo_skip(flags))
2034 if ((flags & TRACE_RECORD_CMDLINE) && !trace_save_cmdline(task))
2036 if ((flags & TRACE_RECORD_TGID) && !trace_save_tgid(task))
2039 __this_cpu_write(trace_taskinfo_save, false);
2043 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2045 * @prev - previous task during sched_switch
2046 * @next - next task during sched_switch
2047 * @flags - TRACE_RECORD_CMDLINE for recording comm
2048 * TRACE_RECORD_TGID for recording tgid
2050 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2051 struct task_struct *next, int flags)
2053 if (tracing_record_taskinfo_skip(flags))
2056 if ((flags & TRACE_RECORD_CMDLINE) &&
2057 (!trace_save_cmdline(prev) || !trace_save_cmdline(next)))
2060 if ((flags & TRACE_RECORD_TGID) &&
2061 (!trace_save_tgid(prev) || !trace_save_tgid(next)))
2064 __this_cpu_write(trace_taskinfo_save, false);
2067 /* Helpers to record a specific task information */
2068 void tracing_record_cmdline(struct task_struct *task)
2070 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2073 void tracing_record_tgid(struct task_struct *task)
2075 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2079 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2080 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2081 * simplifies those functions and keeps them in sync.
2083 enum print_line_t trace_handle_return(struct trace_seq *s)
2085 return trace_seq_has_overflowed(s) ?
2086 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2088 EXPORT_SYMBOL_GPL(trace_handle_return);
2091 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2094 struct task_struct *tsk = current;
2096 entry->preempt_count = pc & 0xff;
2097 entry->pid = (tsk) ? tsk->pid : 0;
2099 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2100 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2102 TRACE_FLAG_IRQS_NOSUPPORT |
2104 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2105 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2106 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2107 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2108 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2110 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2112 struct ring_buffer_event *
2113 trace_buffer_lock_reserve(struct ring_buffer *buffer,
2116 unsigned long flags, int pc)
2118 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2121 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2122 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2123 static int trace_buffered_event_ref;
2126 * trace_buffered_event_enable - enable buffering events
2128 * When events are being filtered, it is quicker to use a temporary
2129 * buffer to write the event data into if there's a likely chance
2130 * that it will not be committed. The discard of the ring buffer
2131 * is not as fast as committing, and is much slower than copying
2134 * When an event is to be filtered, allocate per cpu buffers to
2135 * write the event data into, and if the event is filtered and discarded
2136 * it is simply dropped, otherwise, the entire data is to be committed
2139 void trace_buffered_event_enable(void)
2141 struct ring_buffer_event *event;
2145 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2147 if (trace_buffered_event_ref++)
2150 for_each_tracing_cpu(cpu) {
2151 page = alloc_pages_node(cpu_to_node(cpu),
2152 GFP_KERNEL | __GFP_NORETRY, 0);
2156 event = page_address(page);
2157 memset(event, 0, sizeof(*event));
2159 per_cpu(trace_buffered_event, cpu) = event;
2162 if (cpu == smp_processor_id() &&
2163 this_cpu_read(trace_buffered_event) !=
2164 per_cpu(trace_buffered_event, cpu))
2171 trace_buffered_event_disable();
2174 static void enable_trace_buffered_event(void *data)
2176 /* Probably not needed, but do it anyway */
2178 this_cpu_dec(trace_buffered_event_cnt);
2181 static void disable_trace_buffered_event(void *data)
2183 this_cpu_inc(trace_buffered_event_cnt);
2187 * trace_buffered_event_disable - disable buffering events
2189 * When a filter is removed, it is faster to not use the buffered
2190 * events, and to commit directly into the ring buffer. Free up
2191 * the temp buffers when there are no more users. This requires
2192 * special synchronization with current events.
2194 void trace_buffered_event_disable(void)
2198 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2200 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2203 if (--trace_buffered_event_ref)
2207 /* For each CPU, set the buffer as used. */
2208 smp_call_function_many(tracing_buffer_mask,
2209 disable_trace_buffered_event, NULL, 1);
2212 /* Wait for all current users to finish */
2213 synchronize_sched();
2215 for_each_tracing_cpu(cpu) {
2216 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2217 per_cpu(trace_buffered_event, cpu) = NULL;
2220 * Make sure trace_buffered_event is NULL before clearing
2221 * trace_buffered_event_cnt.
2226 /* Do the work on each cpu */
2227 smp_call_function_many(tracing_buffer_mask,
2228 enable_trace_buffered_event, NULL, 1);
2232 static struct ring_buffer *temp_buffer;
2234 struct ring_buffer_event *
2235 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
2236 struct trace_event_file *trace_file,
2237 int type, unsigned long len,
2238 unsigned long flags, int pc)
2240 struct ring_buffer_event *entry;
2243 *current_rb = trace_file->tr->trace_buffer.buffer;
2245 if ((trace_file->flags &
2246 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2247 (entry = this_cpu_read(trace_buffered_event))) {
2248 /* Try to use the per cpu buffer first */
2249 val = this_cpu_inc_return(trace_buffered_event_cnt);
2251 trace_event_setup(entry, type, flags, pc);
2252 entry->array[0] = len;
2255 this_cpu_dec(trace_buffered_event_cnt);
2258 entry = __trace_buffer_lock_reserve(*current_rb,
2259 type, len, flags, pc);
2261 * If tracing is off, but we have triggers enabled
2262 * we still need to look at the event data. Use the temp_buffer
2263 * to store the trace event for the tigger to use. It's recusive
2264 * safe and will not be recorded anywhere.
2266 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2267 *current_rb = temp_buffer;
2268 entry = __trace_buffer_lock_reserve(*current_rb,
2269 type, len, flags, pc);
2273 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2275 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2276 static DEFINE_MUTEX(tracepoint_printk_mutex);
2278 static void output_printk(struct trace_event_buffer *fbuffer)
2280 struct trace_event_call *event_call;
2281 struct trace_event *event;
2282 unsigned long flags;
2283 struct trace_iterator *iter = tracepoint_print_iter;
2285 /* We should never get here if iter is NULL */
2286 if (WARN_ON_ONCE(!iter))
2289 event_call = fbuffer->trace_file->event_call;
2290 if (!event_call || !event_call->event.funcs ||
2291 !event_call->event.funcs->trace)
2294 event = &fbuffer->trace_file->event_call->event;
2296 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2297 trace_seq_init(&iter->seq);
2298 iter->ent = fbuffer->entry;
2299 event_call->event.funcs->trace(iter, 0, event);
2300 trace_seq_putc(&iter->seq, 0);
2301 printk("%s", iter->seq.buffer);
2303 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2306 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2307 void __user *buffer, size_t *lenp,
2310 int save_tracepoint_printk;
2313 mutex_lock(&tracepoint_printk_mutex);
2314 save_tracepoint_printk = tracepoint_printk;
2316 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2319 * This will force exiting early, as tracepoint_printk
2320 * is always zero when tracepoint_printk_iter is not allocated
2322 if (!tracepoint_print_iter)
2323 tracepoint_printk = 0;
2325 if (save_tracepoint_printk == tracepoint_printk)
2328 if (tracepoint_printk)
2329 static_key_enable(&tracepoint_printk_key.key);
2331 static_key_disable(&tracepoint_printk_key.key);
2334 mutex_unlock(&tracepoint_printk_mutex);
2339 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2341 if (static_key_false(&tracepoint_printk_key.key))
2342 output_printk(fbuffer);
2344 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2345 fbuffer->event, fbuffer->entry,
2346 fbuffer->flags, fbuffer->pc);
2348 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2350 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2351 struct ring_buffer *buffer,
2352 struct ring_buffer_event *event,
2353 unsigned long flags, int pc,
2354 struct pt_regs *regs)
2356 __buffer_unlock_commit(buffer, event);
2359 * If regs is not set, then skip the following callers:
2360 * trace_buffer_unlock_commit_regs
2361 * event_trigger_unlock_commit
2362 * trace_event_buffer_commit
2363 * trace_event_raw_event_sched_switch
2364 * Note, we can still get here via blktrace, wakeup tracer
2365 * and mmiotrace, but that's ok if they lose a function or
2366 * two. They are that meaningful.
2368 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
2369 ftrace_trace_userstack(buffer, flags, pc);
2373 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2376 trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2377 struct ring_buffer_event *event)
2379 __buffer_unlock_commit(buffer, event);
2383 trace_process_export(struct trace_export *export,
2384 struct ring_buffer_event *event)
2386 struct trace_entry *entry;
2387 unsigned int size = 0;
2389 entry = ring_buffer_event_data(event);
2390 size = ring_buffer_event_length(event);
2391 export->write(entry, size);
2394 static DEFINE_MUTEX(ftrace_export_lock);
2396 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2398 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2400 static inline void ftrace_exports_enable(void)
2402 static_branch_enable(&ftrace_exports_enabled);
2405 static inline void ftrace_exports_disable(void)
2407 static_branch_disable(&ftrace_exports_enabled);
2410 void ftrace_exports(struct ring_buffer_event *event)
2412 struct trace_export *export;
2414 preempt_disable_notrace();
2416 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2418 trace_process_export(export, event);
2419 export = rcu_dereference_raw_notrace(export->next);
2422 preempt_enable_notrace();
2426 add_trace_export(struct trace_export **list, struct trace_export *export)
2428 rcu_assign_pointer(export->next, *list);
2430 * We are entering export into the list but another
2431 * CPU might be walking that list. We need to make sure
2432 * the export->next pointer is valid before another CPU sees
2433 * the export pointer included into the list.
2435 rcu_assign_pointer(*list, export);
2439 rm_trace_export(struct trace_export **list, struct trace_export *export)
2441 struct trace_export **p;
2443 for (p = list; *p != NULL; p = &(*p)->next)
2450 rcu_assign_pointer(*p, (*p)->next);
2456 add_ftrace_export(struct trace_export **list, struct trace_export *export)
2459 ftrace_exports_enable();
2461 add_trace_export(list, export);
2465 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2469 ret = rm_trace_export(list, export);
2471 ftrace_exports_disable();
2476 int register_ftrace_export(struct trace_export *export)
2478 if (WARN_ON_ONCE(!export->write))
2481 mutex_lock(&ftrace_export_lock);
2483 add_ftrace_export(&ftrace_exports_list, export);
2485 mutex_unlock(&ftrace_export_lock);
2489 EXPORT_SYMBOL_GPL(register_ftrace_export);
2491 int unregister_ftrace_export(struct trace_export *export)
2495 mutex_lock(&ftrace_export_lock);
2497 ret = rm_ftrace_export(&ftrace_exports_list, export);
2499 mutex_unlock(&ftrace_export_lock);
2503 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2506 trace_function(struct trace_array *tr,
2507 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2510 struct trace_event_call *call = &event_function;
2511 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2512 struct ring_buffer_event *event;
2513 struct ftrace_entry *entry;
2515 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2519 entry = ring_buffer_event_data(event);
2521 entry->parent_ip = parent_ip;
2523 if (!call_filter_check_discard(call, entry, buffer, event)) {
2524 if (static_branch_unlikely(&ftrace_exports_enabled))
2525 ftrace_exports(event);
2526 __buffer_unlock_commit(buffer, event);
2530 #ifdef CONFIG_STACKTRACE
2532 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2533 struct ftrace_stack {
2534 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2537 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2538 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2540 static void __ftrace_trace_stack(struct ring_buffer *buffer,
2541 unsigned long flags,
2542 int skip, int pc, struct pt_regs *regs)
2544 struct trace_event_call *call = &event_kernel_stack;
2545 struct ring_buffer_event *event;
2546 struct stack_entry *entry;
2547 struct stack_trace trace;
2549 int size = FTRACE_STACK_ENTRIES;
2551 trace.nr_entries = 0;
2555 * Add two, for this function and the call to save_stack_trace()
2556 * If regs is set, then these functions will not be in the way.
2562 * Since events can happen in NMIs there's no safe way to
2563 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2564 * or NMI comes in, it will just have to use the default
2565 * FTRACE_STACK_SIZE.
2567 preempt_disable_notrace();
2569 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
2571 * We don't need any atomic variables, just a barrier.
2572 * If an interrupt comes in, we don't care, because it would
2573 * have exited and put the counter back to what we want.
2574 * We just need a barrier to keep gcc from moving things
2578 if (use_stack == 1) {
2579 trace.entries = this_cpu_ptr(ftrace_stack.calls);
2580 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2583 save_stack_trace_regs(regs, &trace);
2585 save_stack_trace(&trace);
2587 if (trace.nr_entries > size)
2588 size = trace.nr_entries;
2590 /* From now on, use_stack is a boolean */
2593 size *= sizeof(unsigned long);
2595 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2596 sizeof(*entry) + size, flags, pc);
2599 entry = ring_buffer_event_data(event);
2601 memset(&entry->caller, 0, size);
2604 memcpy(&entry->caller, trace.entries,
2605 trace.nr_entries * sizeof(unsigned long));
2607 trace.max_entries = FTRACE_STACK_ENTRIES;
2608 trace.entries = entry->caller;
2610 save_stack_trace_regs(regs, &trace);
2612 save_stack_trace(&trace);
2615 entry->size = trace.nr_entries;
2617 if (!call_filter_check_discard(call, entry, buffer, event))
2618 __buffer_unlock_commit(buffer, event);
2621 /* Again, don't let gcc optimize things here */
2623 __this_cpu_dec(ftrace_stack_reserve);
2624 preempt_enable_notrace();
2628 static inline void ftrace_trace_stack(struct trace_array *tr,
2629 struct ring_buffer *buffer,
2630 unsigned long flags,
2631 int skip, int pc, struct pt_regs *regs)
2633 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2636 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
2639 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2642 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2644 if (rcu_is_watching()) {
2645 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2650 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2651 * but if the above rcu_is_watching() failed, then the NMI
2652 * triggered someplace critical, and rcu_irq_enter() should
2653 * not be called from NMI.
2655 if (unlikely(in_nmi()))
2659 * It is possible that a function is being traced in a
2660 * location that RCU is not watching. A call to
2661 * rcu_irq_enter() will make sure that it is, but there's
2662 * a few internal rcu functions that could be traced
2663 * where that wont work either. In those cases, we just
2666 if (unlikely(rcu_irq_enter_disabled()))
2669 rcu_irq_enter_irqson();
2670 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2671 rcu_irq_exit_irqson();
2675 * trace_dump_stack - record a stack back trace in the trace buffer
2676 * @skip: Number of functions to skip (helper handlers)
2678 void trace_dump_stack(int skip)
2680 unsigned long flags;
2682 if (tracing_disabled || tracing_selftest_running)
2685 local_save_flags(flags);
2688 * Skip 3 more, seems to get us at the caller of
2692 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2693 flags, skip, preempt_count(), NULL);
2696 static DEFINE_PER_CPU(int, user_stack_count);
2699 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
2701 struct trace_event_call *call = &event_user_stack;
2702 struct ring_buffer_event *event;
2703 struct userstack_entry *entry;
2704 struct stack_trace trace;
2706 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
2710 * NMIs can not handle page faults, even with fix ups.
2711 * The save user stack can (and often does) fault.
2713 if (unlikely(in_nmi()))
2717 * prevent recursion, since the user stack tracing may
2718 * trigger other kernel events.
2721 if (__this_cpu_read(user_stack_count))
2724 __this_cpu_inc(user_stack_count);
2726 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2727 sizeof(*entry), flags, pc);
2729 goto out_drop_count;
2730 entry = ring_buffer_event_data(event);
2732 entry->tgid = current->tgid;
2733 memset(&entry->caller, 0, sizeof(entry->caller));
2735 trace.nr_entries = 0;
2736 trace.max_entries = FTRACE_STACK_ENTRIES;
2738 trace.entries = entry->caller;
2740 save_stack_trace_user(&trace);
2741 if (!call_filter_check_discard(call, entry, buffer, event))
2742 __buffer_unlock_commit(buffer, event);
2745 __this_cpu_dec(user_stack_count);
2751 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
2753 ftrace_trace_userstack(tr, flags, preempt_count());
2757 #endif /* CONFIG_STACKTRACE */
2759 /* created for use with alloc_percpu */
2760 struct trace_buffer_struct {
2762 char buffer[4][TRACE_BUF_SIZE];
2765 static struct trace_buffer_struct *trace_percpu_buffer;
2768 * Thise allows for lockless recording. If we're nested too deeply, then
2769 * this returns NULL.
2771 static char *get_trace_buf(void)
2773 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
2775 if (!buffer || buffer->nesting >= 4)
2778 return &buffer->buffer[buffer->nesting++][0];
2781 static void put_trace_buf(void)
2783 this_cpu_dec(trace_percpu_buffer->nesting);
2786 static int alloc_percpu_trace_buffer(void)
2788 struct trace_buffer_struct *buffers;
2790 buffers = alloc_percpu(struct trace_buffer_struct);
2791 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2794 trace_percpu_buffer = buffers;
2798 static int buffers_allocated;
2800 void trace_printk_init_buffers(void)
2802 if (buffers_allocated)
2805 if (alloc_percpu_trace_buffer())
2808 /* trace_printk() is for debug use only. Don't use it in production. */
2811 pr_warn("**********************************************************\n");
2812 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2814 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2816 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2817 pr_warn("** unsafe for production use. **\n");
2819 pr_warn("** If you see this message and you are not debugging **\n");
2820 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2822 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2823 pr_warn("**********************************************************\n");
2825 /* Expand the buffers to set size */
2826 tracing_update_buffers();
2828 buffers_allocated = 1;
2831 * trace_printk_init_buffers() can be called by modules.
2832 * If that happens, then we need to start cmdline recording
2833 * directly here. If the global_trace.buffer is already
2834 * allocated here, then this was called by module code.
2836 if (global_trace.trace_buffer.buffer)
2837 tracing_start_cmdline_record();
2840 void trace_printk_start_comm(void)
2842 /* Start tracing comms if trace printk is set */
2843 if (!buffers_allocated)
2845 tracing_start_cmdline_record();
2848 static void trace_printk_start_stop_comm(int enabled)
2850 if (!buffers_allocated)
2854 tracing_start_cmdline_record();
2856 tracing_stop_cmdline_record();
2860 * trace_vbprintk - write binary msg to tracing buffer
2863 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2865 struct trace_event_call *call = &event_bprint;
2866 struct ring_buffer_event *event;
2867 struct ring_buffer *buffer;
2868 struct trace_array *tr = &global_trace;
2869 struct bprint_entry *entry;
2870 unsigned long flags;
2872 int len = 0, size, pc;
2874 if (unlikely(tracing_selftest_running || tracing_disabled))
2877 /* Don't pollute graph traces with trace_vprintk internals */
2878 pause_graph_tracing();
2880 pc = preempt_count();
2881 preempt_disable_notrace();
2883 tbuffer = get_trace_buf();
2889 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2891 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2894 local_save_flags(flags);
2895 size = sizeof(*entry) + sizeof(u32) * len;
2896 buffer = tr->trace_buffer.buffer;
2897 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2901 entry = ring_buffer_event_data(event);
2905 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2906 if (!call_filter_check_discard(call, entry, buffer, event)) {
2907 __buffer_unlock_commit(buffer, event);
2908 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2915 preempt_enable_notrace();
2916 unpause_graph_tracing();
2920 EXPORT_SYMBOL_GPL(trace_vbprintk);
2923 __trace_array_vprintk(struct ring_buffer *buffer,
2924 unsigned long ip, const char *fmt, va_list args)
2926 struct trace_event_call *call = &event_print;
2927 struct ring_buffer_event *event;
2928 int len = 0, size, pc;
2929 struct print_entry *entry;
2930 unsigned long flags;
2933 if (tracing_disabled || tracing_selftest_running)
2936 /* Don't pollute graph traces with trace_vprintk internals */
2937 pause_graph_tracing();
2939 pc = preempt_count();
2940 preempt_disable_notrace();
2943 tbuffer = get_trace_buf();
2949 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2951 local_save_flags(flags);
2952 size = sizeof(*entry) + len + 1;
2953 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2957 entry = ring_buffer_event_data(event);
2960 memcpy(&entry->buf, tbuffer, len + 1);
2961 if (!call_filter_check_discard(call, entry, buffer, event)) {
2962 __buffer_unlock_commit(buffer, event);
2963 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
2970 preempt_enable_notrace();
2971 unpause_graph_tracing();
2976 int trace_array_vprintk(struct trace_array *tr,
2977 unsigned long ip, const char *fmt, va_list args)
2979 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2982 int trace_array_printk(struct trace_array *tr,
2983 unsigned long ip, const char *fmt, ...)
2988 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2992 ret = trace_array_vprintk(tr, ip, fmt, ap);
2997 int trace_array_printk_buf(struct ring_buffer *buffer,
2998 unsigned long ip, const char *fmt, ...)
3003 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3007 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3012 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3014 return trace_array_vprintk(&global_trace, ip, fmt, args);
3016 EXPORT_SYMBOL_GPL(trace_vprintk);
3018 static void trace_iterator_increment(struct trace_iterator *iter)
3020 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3024 ring_buffer_read(buf_iter, NULL);
3027 static struct trace_entry *
3028 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3029 unsigned long *lost_events)
3031 struct ring_buffer_event *event;
3032 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3035 event = ring_buffer_iter_peek(buf_iter, ts);
3037 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
3041 iter->ent_size = ring_buffer_event_length(event);
3042 return ring_buffer_event_data(event);
3048 static struct trace_entry *
3049 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3050 unsigned long *missing_events, u64 *ent_ts)
3052 struct ring_buffer *buffer = iter->trace_buffer->buffer;
3053 struct trace_entry *ent, *next = NULL;
3054 unsigned long lost_events = 0, next_lost = 0;
3055 int cpu_file = iter->cpu_file;
3056 u64 next_ts = 0, ts;
3062 * If we are in a per_cpu trace file, don't bother by iterating over
3063 * all cpu and peek directly.
3065 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3066 if (ring_buffer_empty_cpu(buffer, cpu_file))
3068 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3070 *ent_cpu = cpu_file;
3075 for_each_tracing_cpu(cpu) {
3077 if (ring_buffer_empty_cpu(buffer, cpu))
3080 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3083 * Pick the entry with the smallest timestamp:
3085 if (ent && (!next || ts < next_ts)) {
3089 next_lost = lost_events;
3090 next_size = iter->ent_size;
3094 iter->ent_size = next_size;
3097 *ent_cpu = next_cpu;
3103 *missing_events = next_lost;
3108 /* Find the next real entry, without updating the iterator itself */
3109 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3110 int *ent_cpu, u64 *ent_ts)
3112 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3115 /* Find the next real entry, and increment the iterator to the next entry */
3116 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3118 iter->ent = __find_next_entry(iter, &iter->cpu,
3119 &iter->lost_events, &iter->ts);
3122 trace_iterator_increment(iter);
3124 return iter->ent ? iter : NULL;
3127 static void trace_consume(struct trace_iterator *iter)
3129 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
3130 &iter->lost_events);
3133 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3135 struct trace_iterator *iter = m->private;
3139 WARN_ON_ONCE(iter->leftover);
3143 /* can't go backwards */
3148 ent = trace_find_next_entry_inc(iter);
3152 while (ent && iter->idx < i)
3153 ent = trace_find_next_entry_inc(iter);
3160 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3162 struct ring_buffer_event *event;
3163 struct ring_buffer_iter *buf_iter;
3164 unsigned long entries = 0;
3167 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
3169 buf_iter = trace_buffer_iter(iter, cpu);
3173 ring_buffer_iter_reset(buf_iter);
3176 * We could have the case with the max latency tracers
3177 * that a reset never took place on a cpu. This is evident
3178 * by the timestamp being before the start of the buffer.
3180 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
3181 if (ts >= iter->trace_buffer->time_start)
3184 ring_buffer_read(buf_iter, NULL);
3187 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
3191 * The current tracer is copied to avoid a global locking
3194 static void *s_start(struct seq_file *m, loff_t *pos)
3196 struct trace_iterator *iter = m->private;
3197 struct trace_array *tr = iter->tr;
3198 int cpu_file = iter->cpu_file;
3204 * copy the tracer to avoid using a global lock all around.
3205 * iter->trace is a copy of current_trace, the pointer to the
3206 * name may be used instead of a strcmp(), as iter->trace->name
3207 * will point to the same string as current_trace->name.
3209 mutex_lock(&trace_types_lock);
3210 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3211 *iter->trace = *tr->current_trace;
3212 mutex_unlock(&trace_types_lock);
3214 #ifdef CONFIG_TRACER_MAX_TRACE
3215 if (iter->snapshot && iter->trace->use_max_tr)
3216 return ERR_PTR(-EBUSY);
3219 if (!iter->snapshot)
3220 atomic_inc(&trace_record_taskinfo_disabled);
3222 if (*pos != iter->pos) {
3227 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3228 for_each_tracing_cpu(cpu)
3229 tracing_iter_reset(iter, cpu);
3231 tracing_iter_reset(iter, cpu_file);
3234 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3239 * If we overflowed the seq_file before, then we want
3240 * to just reuse the trace_seq buffer again.
3246 p = s_next(m, p, &l);
3250 trace_event_read_lock();
3251 trace_access_lock(cpu_file);
3255 static void s_stop(struct seq_file *m, void *p)
3257 struct trace_iterator *iter = m->private;
3259 #ifdef CONFIG_TRACER_MAX_TRACE
3260 if (iter->snapshot && iter->trace->use_max_tr)
3264 if (!iter->snapshot)
3265 atomic_dec(&trace_record_taskinfo_disabled);
3267 trace_access_unlock(iter->cpu_file);
3268 trace_event_read_unlock();
3272 get_total_entries(struct trace_buffer *buf,
3273 unsigned long *total, unsigned long *entries)
3275 unsigned long count;
3281 for_each_tracing_cpu(cpu) {
3282 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3284 * If this buffer has skipped entries, then we hold all
3285 * entries for the trace and we need to ignore the
3286 * ones before the time stamp.
3288 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3289 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3290 /* total is the same as the entries */
3294 ring_buffer_overrun_cpu(buf->buffer, cpu);
3299 static void print_lat_help_header(struct seq_file *m)
3301 seq_puts(m, "# _------=> CPU# \n"
3302 "# / _-----=> irqs-off \n"
3303 "# | / _----=> need-resched \n"
3304 "# || / _---=> hardirq/softirq \n"
3305 "# ||| / _--=> preempt-depth \n"
3307 "# cmd pid ||||| time | caller \n"
3308 "# \\ / ||||| \\ | / \n");
3311 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
3313 unsigned long total;
3314 unsigned long entries;
3316 get_total_entries(buf, &total, &entries);
3317 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3318 entries, total, num_online_cpus());
3322 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3325 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3327 print_event_info(buf, m);
3329 seq_printf(m, "# TASK-PID CPU# %s TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3330 seq_printf(m, "# | | | %s | |\n", tgid ? " | " : "");
3333 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3336 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3338 seq_printf(m, "# %s _-----=> irqs-off\n", tgid ? " " : "");
3339 seq_printf(m, "# %s / _----=> need-resched\n", tgid ? " " : "");
3340 seq_printf(m, "# %s| / _---=> hardirq/softirq\n", tgid ? " " : "");
3341 seq_printf(m, "# %s|| / _--=> preempt-depth\n", tgid ? " " : "");
3342 seq_printf(m, "# %s||| / delay\n", tgid ? " " : "");
3343 seq_printf(m, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
3344 seq_printf(m, "# | | | %s|||| | |\n", tgid ? " | " : "");
3348 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3350 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3351 struct trace_buffer *buf = iter->trace_buffer;
3352 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3353 struct tracer *type = iter->trace;
3354 unsigned long entries;
3355 unsigned long total;
3356 const char *name = "preemption";
3360 get_total_entries(buf, &total, &entries);
3362 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3364 seq_puts(m, "# -----------------------------------"
3365 "---------------------------------\n");
3366 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3367 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3368 nsecs_to_usecs(data->saved_latency),
3372 #if defined(CONFIG_PREEMPT_NONE)
3374 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3376 #elif defined(CONFIG_PREEMPT)
3381 /* These are reserved for later use */
3384 seq_printf(m, " #P:%d)\n", num_online_cpus());
3388 seq_puts(m, "# -----------------\n");
3389 seq_printf(m, "# | task: %.16s-%d "
3390 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3391 data->comm, data->pid,
3392 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3393 data->policy, data->rt_priority);
3394 seq_puts(m, "# -----------------\n");
3396 if (data->critical_start) {
3397 seq_puts(m, "# => started at: ");
3398 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3399 trace_print_seq(m, &iter->seq);
3400 seq_puts(m, "\n# => ended at: ");
3401 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3402 trace_print_seq(m, &iter->seq);
3403 seq_puts(m, "\n#\n");
3409 static void test_cpu_buff_start(struct trace_iterator *iter)
3411 struct trace_seq *s = &iter->seq;
3412 struct trace_array *tr = iter->tr;
3414 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3417 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3420 if (cpumask_available(iter->started) &&
3421 cpumask_test_cpu(iter->cpu, iter->started))
3424 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
3427 if (cpumask_available(iter->started))
3428 cpumask_set_cpu(iter->cpu, iter->started);
3430 /* Don't print started cpu buffer for the first entry of the trace */
3432 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3436 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3438 struct trace_array *tr = iter->tr;
3439 struct trace_seq *s = &iter->seq;
3440 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3441 struct trace_entry *entry;
3442 struct trace_event *event;
3446 test_cpu_buff_start(iter);
3448 event = ftrace_find_event(entry->type);
3450 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3451 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3452 trace_print_lat_context(iter);
3454 trace_print_context(iter);
3457 if (trace_seq_has_overflowed(s))
3458 return TRACE_TYPE_PARTIAL_LINE;
3461 return event->funcs->trace(iter, sym_flags, event);
3463 trace_seq_printf(s, "Unknown type %d\n", entry->type);
3465 return trace_handle_return(s);
3468 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3470 struct trace_array *tr = iter->tr;
3471 struct trace_seq *s = &iter->seq;
3472 struct trace_entry *entry;
3473 struct trace_event *event;
3477 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3478 trace_seq_printf(s, "%d %d %llu ",
3479 entry->pid, iter->cpu, iter->ts);
3481 if (trace_seq_has_overflowed(s))
3482 return TRACE_TYPE_PARTIAL_LINE;
3484 event = ftrace_find_event(entry->type);
3486 return event->funcs->raw(iter, 0, event);
3488 trace_seq_printf(s, "%d ?\n", entry->type);
3490 return trace_handle_return(s);
3493 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3495 struct trace_array *tr = iter->tr;
3496 struct trace_seq *s = &iter->seq;
3497 unsigned char newline = '\n';
3498 struct trace_entry *entry;
3499 struct trace_event *event;
3503 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3504 SEQ_PUT_HEX_FIELD(s, entry->pid);
3505 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3506 SEQ_PUT_HEX_FIELD(s, iter->ts);
3507 if (trace_seq_has_overflowed(s))
3508 return TRACE_TYPE_PARTIAL_LINE;
3511 event = ftrace_find_event(entry->type);
3513 enum print_line_t ret = event->funcs->hex(iter, 0, event);
3514 if (ret != TRACE_TYPE_HANDLED)
3518 SEQ_PUT_FIELD(s, newline);
3520 return trace_handle_return(s);
3523 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3525 struct trace_array *tr = iter->tr;
3526 struct trace_seq *s = &iter->seq;
3527 struct trace_entry *entry;
3528 struct trace_event *event;
3532 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3533 SEQ_PUT_FIELD(s, entry->pid);
3534 SEQ_PUT_FIELD(s, iter->cpu);
3535 SEQ_PUT_FIELD(s, iter->ts);
3536 if (trace_seq_has_overflowed(s))
3537 return TRACE_TYPE_PARTIAL_LINE;
3540 event = ftrace_find_event(entry->type);
3541 return event ? event->funcs->binary(iter, 0, event) :
3545 int trace_empty(struct trace_iterator *iter)
3547 struct ring_buffer_iter *buf_iter;
3550 /* If we are looking at one CPU buffer, only check that one */
3551 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
3552 cpu = iter->cpu_file;
3553 buf_iter = trace_buffer_iter(iter, cpu);
3555 if (!ring_buffer_iter_empty(buf_iter))
3558 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3564 for_each_tracing_cpu(cpu) {
3565 buf_iter = trace_buffer_iter(iter, cpu);
3567 if (!ring_buffer_iter_empty(buf_iter))
3570 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3578 /* Called with trace_event_read_lock() held. */
3579 enum print_line_t print_trace_line(struct trace_iterator *iter)
3581 struct trace_array *tr = iter->tr;
3582 unsigned long trace_flags = tr->trace_flags;
3583 enum print_line_t ret;
3585 if (iter->lost_events) {
3586 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3587 iter->cpu, iter->lost_events);
3588 if (trace_seq_has_overflowed(&iter->seq))
3589 return TRACE_TYPE_PARTIAL_LINE;
3592 if (iter->trace && iter->trace->print_line) {
3593 ret = iter->trace->print_line(iter);
3594 if (ret != TRACE_TYPE_UNHANDLED)
3598 if (iter->ent->type == TRACE_BPUTS &&
3599 trace_flags & TRACE_ITER_PRINTK &&
3600 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3601 return trace_print_bputs_msg_only(iter);
3603 if (iter->ent->type == TRACE_BPRINT &&
3604 trace_flags & TRACE_ITER_PRINTK &&
3605 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3606 return trace_print_bprintk_msg_only(iter);
3608 if (iter->ent->type == TRACE_PRINT &&
3609 trace_flags & TRACE_ITER_PRINTK &&
3610 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3611 return trace_print_printk_msg_only(iter);
3613 if (trace_flags & TRACE_ITER_BIN)
3614 return print_bin_fmt(iter);
3616 if (trace_flags & TRACE_ITER_HEX)
3617 return print_hex_fmt(iter);
3619 if (trace_flags & TRACE_ITER_RAW)
3620 return print_raw_fmt(iter);
3622 return print_trace_fmt(iter);
3625 void trace_latency_header(struct seq_file *m)
3627 struct trace_iterator *iter = m->private;
3628 struct trace_array *tr = iter->tr;
3630 /* print nothing if the buffers are empty */
3631 if (trace_empty(iter))
3634 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3635 print_trace_header(m, iter);
3637 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
3638 print_lat_help_header(m);
3641 void trace_default_header(struct seq_file *m)
3643 struct trace_iterator *iter = m->private;
3644 struct trace_array *tr = iter->tr;
3645 unsigned long trace_flags = tr->trace_flags;
3647 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3650 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3651 /* print nothing if the buffers are empty */
3652 if (trace_empty(iter))
3654 print_trace_header(m, iter);
3655 if (!(trace_flags & TRACE_ITER_VERBOSE))
3656 print_lat_help_header(m);
3658 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3659 if (trace_flags & TRACE_ITER_IRQ_INFO)
3660 print_func_help_header_irq(iter->trace_buffer,
3663 print_func_help_header(iter->trace_buffer, m,
3669 static void test_ftrace_alive(struct seq_file *m)
3671 if (!ftrace_is_dead())
3673 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3674 "# MAY BE MISSING FUNCTION EVENTS\n");
3677 #ifdef CONFIG_TRACER_MAX_TRACE
3678 static void show_snapshot_main_help(struct seq_file *m)
3680 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3681 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3682 "# Takes a snapshot of the main buffer.\n"
3683 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3684 "# (Doesn't have to be '2' works with any number that\n"
3685 "# is not a '0' or '1')\n");
3688 static void show_snapshot_percpu_help(struct seq_file *m)
3690 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3691 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3692 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3693 "# Takes a snapshot of the main buffer for this cpu.\n");
3695 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3696 "# Must use main snapshot file to allocate.\n");
3698 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3699 "# (Doesn't have to be '2' works with any number that\n"
3700 "# is not a '0' or '1')\n");
3703 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3705 if (iter->tr->allocated_snapshot)
3706 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3708 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3710 seq_puts(m, "# Snapshot commands:\n");
3711 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3712 show_snapshot_main_help(m);
3714 show_snapshot_percpu_help(m);
3717 /* Should never be called */
3718 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3721 static int s_show(struct seq_file *m, void *v)
3723 struct trace_iterator *iter = v;
3726 if (iter->ent == NULL) {
3728 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3730 test_ftrace_alive(m);
3732 if (iter->snapshot && trace_empty(iter))
3733 print_snapshot_help(m, iter);
3734 else if (iter->trace && iter->trace->print_header)
3735 iter->trace->print_header(m);
3737 trace_default_header(m);
3739 } else if (iter->leftover) {
3741 * If we filled the seq_file buffer earlier, we
3742 * want to just show it now.
3744 ret = trace_print_seq(m, &iter->seq);
3746 /* ret should this time be zero, but you never know */
3747 iter->leftover = ret;
3750 print_trace_line(iter);
3751 ret = trace_print_seq(m, &iter->seq);
3753 * If we overflow the seq_file buffer, then it will
3754 * ask us for this data again at start up.
3756 * ret is 0 if seq_file write succeeded.
3759 iter->leftover = ret;
3766 * Should be used after trace_array_get(), trace_types_lock
3767 * ensures that i_cdev was already initialized.
3769 static inline int tracing_get_cpu(struct inode *inode)
3771 if (inode->i_cdev) /* See trace_create_cpu_file() */
3772 return (long)inode->i_cdev - 1;
3773 return RING_BUFFER_ALL_CPUS;
3776 static const struct seq_operations tracer_seq_ops = {
3783 static struct trace_iterator *
3784 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
3786 struct trace_array *tr = inode->i_private;
3787 struct trace_iterator *iter;
3790 if (tracing_disabled)
3791 return ERR_PTR(-ENODEV);
3793 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3795 return ERR_PTR(-ENOMEM);
3797 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3799 if (!iter->buffer_iter)
3803 * We make a copy of the current tracer to avoid concurrent
3804 * changes on it while we are reading.
3806 mutex_lock(&trace_types_lock);
3807 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3811 *iter->trace = *tr->current_trace;
3813 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3818 #ifdef CONFIG_TRACER_MAX_TRACE
3819 /* Currently only the top directory has a snapshot */
3820 if (tr->current_trace->print_max || snapshot)
3821 iter->trace_buffer = &tr->max_buffer;
3824 iter->trace_buffer = &tr->trace_buffer;
3825 iter->snapshot = snapshot;
3827 iter->cpu_file = tracing_get_cpu(inode);
3828 mutex_init(&iter->mutex);
3830 /* Notify the tracer early; before we stop tracing. */
3831 if (iter->trace && iter->trace->open)
3832 iter->trace->open(iter);
3834 /* Annotate start of buffers if we had overruns */
3835 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3836 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3838 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3839 if (trace_clocks[tr->clock_id].in_ns)
3840 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3842 /* stop the trace while dumping if we are not opening "snapshot" */
3843 if (!iter->snapshot)
3844 tracing_stop_tr(tr);
3846 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3847 for_each_tracing_cpu(cpu) {
3848 iter->buffer_iter[cpu] =
3849 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3851 ring_buffer_read_prepare_sync();
3852 for_each_tracing_cpu(cpu) {
3853 ring_buffer_read_start(iter->buffer_iter[cpu]);
3854 tracing_iter_reset(iter, cpu);
3857 cpu = iter->cpu_file;
3858 iter->buffer_iter[cpu] =
3859 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3860 ring_buffer_read_prepare_sync();
3861 ring_buffer_read_start(iter->buffer_iter[cpu]);
3862 tracing_iter_reset(iter, cpu);
3865 mutex_unlock(&trace_types_lock);
3870 mutex_unlock(&trace_types_lock);
3872 kfree(iter->buffer_iter);
3874 seq_release_private(inode, file);
3875 return ERR_PTR(-ENOMEM);
3878 int tracing_open_generic(struct inode *inode, struct file *filp)
3880 if (tracing_disabled)
3883 filp->private_data = inode->i_private;
3887 bool tracing_is_disabled(void)
3889 return (tracing_disabled) ? true: false;
3893 * Open and update trace_array ref count.
3894 * Must have the current trace_array passed to it.
3896 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3898 struct trace_array *tr = inode->i_private;
3900 if (tracing_disabled)
3903 if (trace_array_get(tr) < 0)
3906 filp->private_data = inode->i_private;
3911 static int tracing_release(struct inode *inode, struct file *file)
3913 struct trace_array *tr = inode->i_private;
3914 struct seq_file *m = file->private_data;
3915 struct trace_iterator *iter;
3918 if (!(file->f_mode & FMODE_READ)) {
3919 trace_array_put(tr);
3923 /* Writes do not use seq_file */
3925 mutex_lock(&trace_types_lock);
3927 for_each_tracing_cpu(cpu) {
3928 if (iter->buffer_iter[cpu])
3929 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3932 if (iter->trace && iter->trace->close)
3933 iter->trace->close(iter);
3935 if (!iter->snapshot)
3936 /* reenable tracing if it was previously enabled */
3937 tracing_start_tr(tr);
3939 __trace_array_put(tr);
3941 mutex_unlock(&trace_types_lock);
3943 mutex_destroy(&iter->mutex);
3944 free_cpumask_var(iter->started);
3946 kfree(iter->buffer_iter);
3947 seq_release_private(inode, file);
3952 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3954 struct trace_array *tr = inode->i_private;
3956 trace_array_put(tr);
3960 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3962 struct trace_array *tr = inode->i_private;
3964 trace_array_put(tr);
3966 return single_release(inode, file);
3969 static int tracing_open(struct inode *inode, struct file *file)
3971 struct trace_array *tr = inode->i_private;
3972 struct trace_iterator *iter;
3975 if (trace_array_get(tr) < 0)
3978 /* If this file was open for write, then erase contents */
3979 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3980 int cpu = tracing_get_cpu(inode);
3982 if (cpu == RING_BUFFER_ALL_CPUS)
3983 tracing_reset_online_cpus(&tr->trace_buffer);
3985 tracing_reset(&tr->trace_buffer, cpu);
3988 if (file->f_mode & FMODE_READ) {
3989 iter = __tracing_open(inode, file, false);
3991 ret = PTR_ERR(iter);
3992 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
3993 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3997 trace_array_put(tr);
4003 * Some tracers are not suitable for instance buffers.
4004 * A tracer is always available for the global array (toplevel)
4005 * or if it explicitly states that it is.
4008 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4010 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4013 /* Find the next tracer that this trace array may use */
4014 static struct tracer *
4015 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4017 while (t && !trace_ok_for_array(t, tr))
4024 t_next(struct seq_file *m, void *v, loff_t *pos)
4026 struct trace_array *tr = m->private;
4027 struct tracer *t = v;
4032 t = get_tracer_for_array(tr, t->next);
4037 static void *t_start(struct seq_file *m, loff_t *pos)
4039 struct trace_array *tr = m->private;
4043 mutex_lock(&trace_types_lock);
4045 t = get_tracer_for_array(tr, trace_types);
4046 for (; t && l < *pos; t = t_next(m, t, &l))
4052 static void t_stop(struct seq_file *m, void *p)
4054 mutex_unlock(&trace_types_lock);
4057 static int t_show(struct seq_file *m, void *v)
4059 struct tracer *t = v;
4064 seq_puts(m, t->name);
4073 static const struct seq_operations show_traces_seq_ops = {
4080 static int show_traces_open(struct inode *inode, struct file *file)
4082 struct trace_array *tr = inode->i_private;
4086 if (tracing_disabled)
4089 ret = seq_open(file, &show_traces_seq_ops);
4093 m = file->private_data;
4100 tracing_write_stub(struct file *filp, const char __user *ubuf,
4101 size_t count, loff_t *ppos)
4106 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4110 if (file->f_mode & FMODE_READ)
4111 ret = seq_lseek(file, offset, whence);
4113 file->f_pos = ret = 0;
4118 static const struct file_operations tracing_fops = {
4119 .open = tracing_open,
4121 .write = tracing_write_stub,
4122 .llseek = tracing_lseek,
4123 .release = tracing_release,
4126 static const struct file_operations show_traces_fops = {
4127 .open = show_traces_open,
4129 .release = seq_release,
4130 .llseek = seq_lseek,
4134 * The tracer itself will not take this lock, but still we want
4135 * to provide a consistent cpumask to user-space:
4137 static DEFINE_MUTEX(tracing_cpumask_update_lock);
4140 * Temporary storage for the character representation of the
4141 * CPU bitmask (and one more byte for the newline):
4143 static char mask_str[NR_CPUS + 1];
4146 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4147 size_t count, loff_t *ppos)
4149 struct trace_array *tr = file_inode(filp)->i_private;
4152 mutex_lock(&tracing_cpumask_update_lock);
4154 len = snprintf(mask_str, count, "%*pb\n",
4155 cpumask_pr_args(tr->tracing_cpumask));
4160 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
4163 mutex_unlock(&tracing_cpumask_update_lock);
4169 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4170 size_t count, loff_t *ppos)
4172 struct trace_array *tr = file_inode(filp)->i_private;
4173 cpumask_var_t tracing_cpumask_new;
4176 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4179 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4183 mutex_lock(&tracing_cpumask_update_lock);
4185 local_irq_disable();
4186 arch_spin_lock(&tr->max_lock);
4187 for_each_tracing_cpu(cpu) {
4189 * Increase/decrease the disabled counter if we are
4190 * about to flip a bit in the cpumask:
4192 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4193 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4194 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4195 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
4197 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4198 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4199 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4200 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
4203 arch_spin_unlock(&tr->max_lock);
4206 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4208 mutex_unlock(&tracing_cpumask_update_lock);
4209 free_cpumask_var(tracing_cpumask_new);
4214 free_cpumask_var(tracing_cpumask_new);
4219 static const struct file_operations tracing_cpumask_fops = {
4220 .open = tracing_open_generic_tr,
4221 .read = tracing_cpumask_read,
4222 .write = tracing_cpumask_write,
4223 .release = tracing_release_generic_tr,
4224 .llseek = generic_file_llseek,
4227 static int tracing_trace_options_show(struct seq_file *m, void *v)
4229 struct tracer_opt *trace_opts;
4230 struct trace_array *tr = m->private;
4234 mutex_lock(&trace_types_lock);
4235 tracer_flags = tr->current_trace->flags->val;
4236 trace_opts = tr->current_trace->flags->opts;
4238 for (i = 0; trace_options[i]; i++) {
4239 if (tr->trace_flags & (1 << i))
4240 seq_printf(m, "%s\n", trace_options[i]);
4242 seq_printf(m, "no%s\n", trace_options[i]);
4245 for (i = 0; trace_opts[i].name; i++) {
4246 if (tracer_flags & trace_opts[i].bit)
4247 seq_printf(m, "%s\n", trace_opts[i].name);
4249 seq_printf(m, "no%s\n", trace_opts[i].name);
4251 mutex_unlock(&trace_types_lock);
4256 static int __set_tracer_option(struct trace_array *tr,
4257 struct tracer_flags *tracer_flags,
4258 struct tracer_opt *opts, int neg)
4260 struct tracer *trace = tracer_flags->trace;
4263 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4268 tracer_flags->val &= ~opts->bit;
4270 tracer_flags->val |= opts->bit;
4274 /* Try to assign a tracer specific option */
4275 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4277 struct tracer *trace = tr->current_trace;
4278 struct tracer_flags *tracer_flags = trace->flags;
4279 struct tracer_opt *opts = NULL;
4282 for (i = 0; tracer_flags->opts[i].name; i++) {
4283 opts = &tracer_flags->opts[i];
4285 if (strcmp(cmp, opts->name) == 0)
4286 return __set_tracer_option(tr, trace->flags, opts, neg);
4292 /* Some tracers require overwrite to stay enabled */
4293 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4295 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4301 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4303 /* do nothing if flag is already set */
4304 if (!!(tr->trace_flags & mask) == !!enabled)
4307 /* Give the tracer a chance to approve the change */
4308 if (tr->current_trace->flag_changed)
4309 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4313 tr->trace_flags |= mask;
4315 tr->trace_flags &= ~mask;
4317 if (mask == TRACE_ITER_RECORD_CMD)
4318 trace_event_enable_cmd_record(enabled);
4320 if (mask == TRACE_ITER_RECORD_TGID) {
4322 tgid_map = kzalloc((PID_MAX_DEFAULT + 1) * sizeof(*tgid_map),
4325 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4329 trace_event_enable_tgid_record(enabled);
4332 if (mask == TRACE_ITER_EVENT_FORK)
4333 trace_event_follow_fork(tr, enabled);
4335 if (mask == TRACE_ITER_FUNC_FORK)
4336 ftrace_pid_follow_fork(tr, enabled);
4338 if (mask == TRACE_ITER_OVERWRITE) {
4339 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
4340 #ifdef CONFIG_TRACER_MAX_TRACE
4341 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4345 if (mask == TRACE_ITER_PRINTK) {
4346 trace_printk_start_stop_comm(enabled);
4347 trace_printk_control(enabled);
4353 static int trace_set_options(struct trace_array *tr, char *option)
4359 size_t orig_len = strlen(option);
4361 cmp = strstrip(option);
4363 if (strncmp(cmp, "no", 2) == 0) {
4368 mutex_lock(&trace_types_lock);
4370 for (i = 0; trace_options[i]; i++) {
4371 if (strcmp(cmp, trace_options[i]) == 0) {
4372 ret = set_tracer_flag(tr, 1 << i, !neg);
4377 /* If no option could be set, test the specific tracer options */
4378 if (!trace_options[i])
4379 ret = set_tracer_option(tr, cmp, neg);
4381 mutex_unlock(&trace_types_lock);
4384 * If the first trailing whitespace is replaced with '\0' by strstrip,
4385 * turn it back into a space.
4387 if (orig_len > strlen(option))
4388 option[strlen(option)] = ' ';
4393 static void __init apply_trace_boot_options(void)
4395 char *buf = trace_boot_options_buf;
4399 option = strsep(&buf, ",");
4405 trace_set_options(&global_trace, option);
4407 /* Put back the comma to allow this to be called again */
4414 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4415 size_t cnt, loff_t *ppos)
4417 struct seq_file *m = filp->private_data;
4418 struct trace_array *tr = m->private;
4422 if (cnt >= sizeof(buf))
4425 if (copy_from_user(buf, ubuf, cnt))
4430 ret = trace_set_options(tr, buf);
4439 static int tracing_trace_options_open(struct inode *inode, struct file *file)
4441 struct trace_array *tr = inode->i_private;
4444 if (tracing_disabled)
4447 if (trace_array_get(tr) < 0)
4450 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4452 trace_array_put(tr);
4457 static const struct file_operations tracing_iter_fops = {
4458 .open = tracing_trace_options_open,
4460 .llseek = seq_lseek,
4461 .release = tracing_single_release_tr,
4462 .write = tracing_trace_options_write,
4465 static const char readme_msg[] =
4466 "tracing mini-HOWTO:\n\n"
4467 "# echo 0 > tracing_on : quick way to disable tracing\n"
4468 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4469 " Important files:\n"
4470 " trace\t\t\t- The static contents of the buffer\n"
4471 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4472 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4473 " current_tracer\t- function and latency tracers\n"
4474 " available_tracers\t- list of configured tracers for current_tracer\n"
4475 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4476 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4477 " trace_clock\t\t-change the clock used to order events\n"
4478 " local: Per cpu clock but may not be synced across CPUs\n"
4479 " global: Synced across CPUs but slows tracing down.\n"
4480 " counter: Not a clock, but just an increment\n"
4481 " uptime: Jiffy counter from time of boot\n"
4482 " perf: Same clock that perf events use\n"
4483 #ifdef CONFIG_X86_64
4484 " x86-tsc: TSC cycle counter\n"
4486 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4487 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4488 " tracing_cpumask\t- Limit which CPUs to trace\n"
4489 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4490 "\t\t\t Remove sub-buffer with rmdir\n"
4491 " trace_options\t\t- Set format or modify how tracing happens\n"
4492 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4493 "\t\t\t option name\n"
4494 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4495 #ifdef CONFIG_DYNAMIC_FTRACE
4496 "\n available_filter_functions - list of functions that can be filtered on\n"
4497 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4498 "\t\t\t functions\n"
4499 "\t accepts: func_full_name or glob-matching-pattern\n"
4500 "\t modules: Can select a group via module\n"
4501 "\t Format: :mod:<module-name>\n"
4502 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4503 "\t triggers: a command to perform when function is hit\n"
4504 "\t Format: <function>:<trigger>[:count]\n"
4505 "\t trigger: traceon, traceoff\n"
4506 "\t\t enable_event:<system>:<event>\n"
4507 "\t\t disable_event:<system>:<event>\n"
4508 #ifdef CONFIG_STACKTRACE
4511 #ifdef CONFIG_TRACER_SNAPSHOT
4516 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4517 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4518 "\t The first one will disable tracing every time do_fault is hit\n"
4519 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4520 "\t The first time do trap is hit and it disables tracing, the\n"
4521 "\t counter will decrement to 2. If tracing is already disabled,\n"
4522 "\t the counter will not decrement. It only decrements when the\n"
4523 "\t trigger did work\n"
4524 "\t To remove trigger without count:\n"
4525 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4526 "\t To remove trigger with a count:\n"
4527 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4528 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4529 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4530 "\t modules: Can select a group via module command :mod:\n"
4531 "\t Does not accept triggers\n"
4532 #endif /* CONFIG_DYNAMIC_FTRACE */
4533 #ifdef CONFIG_FUNCTION_TRACER
4534 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4537 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4538 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4539 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4540 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4542 #ifdef CONFIG_TRACER_SNAPSHOT
4543 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4544 "\t\t\t snapshot buffer. Read the contents for more\n"
4545 "\t\t\t information\n"
4547 #ifdef CONFIG_STACK_TRACER
4548 " stack_trace\t\t- Shows the max stack trace when active\n"
4549 " stack_max_size\t- Shows current max stack size that was traced\n"
4550 "\t\t\t Write into this file to reset the max size (trigger a\n"
4551 "\t\t\t new trace)\n"
4552 #ifdef CONFIG_DYNAMIC_FTRACE
4553 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4556 #endif /* CONFIG_STACK_TRACER */
4557 #ifdef CONFIG_KPROBE_EVENTS
4558 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4559 "\t\t\t Write into this file to define/undefine new trace events.\n"
4561 #ifdef CONFIG_UPROBE_EVENTS
4562 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4563 "\t\t\t Write into this file to define/undefine new trace events.\n"
4565 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4566 "\t accepts: event-definitions (one definition per line)\n"
4567 "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
4568 "\t -:[<group>/]<event>\n"
4569 #ifdef CONFIG_KPROBE_EVENTS
4570 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4571 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4573 #ifdef CONFIG_UPROBE_EVENTS
4574 "\t place: <path>:<offset>\n"
4576 "\t args: <name>=fetcharg[:type]\n"
4577 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4578 "\t $stack<index>, $stack, $retval, $comm\n"
4579 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4580 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4582 " events/\t\t- Directory containing all trace event subsystems:\n"
4583 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4584 " events/<system>/\t- Directory containing all trace events for <system>:\n"
4585 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4587 " filter\t\t- If set, only events passing filter are traced\n"
4588 " events/<system>/<event>/\t- Directory containing control files for\n"
4590 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4591 " filter\t\t- If set, only events passing filter are traced\n"
4592 " trigger\t\t- If set, a command to perform when event is hit\n"
4593 "\t Format: <trigger>[:count][if <filter>]\n"
4594 "\t trigger: traceon, traceoff\n"
4595 "\t enable_event:<system>:<event>\n"
4596 "\t disable_event:<system>:<event>\n"
4597 #ifdef CONFIG_HIST_TRIGGERS
4598 "\t enable_hist:<system>:<event>\n"
4599 "\t disable_hist:<system>:<event>\n"
4601 #ifdef CONFIG_STACKTRACE
4604 #ifdef CONFIG_TRACER_SNAPSHOT
4607 #ifdef CONFIG_HIST_TRIGGERS
4608 "\t\t hist (see below)\n"
4610 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4611 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4612 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4613 "\t events/block/block_unplug/trigger\n"
4614 "\t The first disables tracing every time block_unplug is hit.\n"
4615 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4616 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4617 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4618 "\t Like function triggers, the counter is only decremented if it\n"
4619 "\t enabled or disabled tracing.\n"
4620 "\t To remove a trigger without a count:\n"
4621 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4622 "\t To remove a trigger with a count:\n"
4623 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4624 "\t Filters can be ignored when removing a trigger.\n"
4625 #ifdef CONFIG_HIST_TRIGGERS
4626 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
4627 "\t Format: hist:keys=<field1[,field2,...]>\n"
4628 "\t [:values=<field1[,field2,...]>]\n"
4629 "\t [:sort=<field1[,field2,...]>]\n"
4630 "\t [:size=#entries]\n"
4631 "\t [:pause][:continue][:clear]\n"
4632 "\t [:name=histname1]\n"
4633 "\t [if <filter>]\n\n"
4634 "\t When a matching event is hit, an entry is added to a hash\n"
4635 "\t table using the key(s) and value(s) named, and the value of a\n"
4636 "\t sum called 'hitcount' is incremented. Keys and values\n"
4637 "\t correspond to fields in the event's format description. Keys\n"
4638 "\t can be any field, or the special string 'stacktrace'.\n"
4639 "\t Compound keys consisting of up to two fields can be specified\n"
4640 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4641 "\t fields. Sort keys consisting of up to two fields can be\n"
4642 "\t specified using the 'sort' keyword. The sort direction can\n"
4643 "\t be modified by appending '.descending' or '.ascending' to a\n"
4644 "\t sort field. The 'size' parameter can be used to specify more\n"
4645 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4646 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4647 "\t its histogram data will be shared with other triggers of the\n"
4648 "\t same name, and trigger hits will update this common data.\n\n"
4649 "\t Reading the 'hist' file for the event will dump the hash\n"
4650 "\t table in its entirety to stdout. If there are multiple hist\n"
4651 "\t triggers attached to an event, there will be a table for each\n"
4652 "\t trigger in the output. The table displayed for a named\n"
4653 "\t trigger will be the same as any other instance having the\n"
4654 "\t same name. The default format used to display a given field\n"
4655 "\t can be modified by appending any of the following modifiers\n"
4656 "\t to the field name, as applicable:\n\n"
4657 "\t .hex display a number as a hex value\n"
4658 "\t .sym display an address as a symbol\n"
4659 "\t .sym-offset display an address as a symbol and offset\n"
4660 "\t .execname display a common_pid as a program name\n"
4661 "\t .syscall display a syscall id as a syscall name\n\n"
4662 "\t .log2 display log2 value rather than raw number\n\n"
4663 "\t The 'pause' parameter can be used to pause an existing hist\n"
4664 "\t trigger or to start a hist trigger but not log any events\n"
4665 "\t until told to do so. 'continue' can be used to start or\n"
4666 "\t restart a paused hist trigger.\n\n"
4667 "\t The 'clear' parameter will clear the contents of a running\n"
4668 "\t hist trigger and leave its current paused/active state\n"
4670 "\t The enable_hist and disable_hist triggers can be used to\n"
4671 "\t have one event conditionally start and stop another event's\n"
4672 "\t already-attached hist trigger. The syntax is analagous to\n"
4673 "\t the enable_event and disable_event triggers.\n"
4678 tracing_readme_read(struct file *filp, char __user *ubuf,
4679 size_t cnt, loff_t *ppos)
4681 return simple_read_from_buffer(ubuf, cnt, ppos,
4682 readme_msg, strlen(readme_msg));
4685 static const struct file_operations tracing_readme_fops = {
4686 .open = tracing_open_generic,
4687 .read = tracing_readme_read,
4688 .llseek = generic_file_llseek,
4691 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
4693 unsigned int *ptr = v;
4695 if (*pos || m->count)
4700 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4702 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
4711 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4717 arch_spin_lock(&trace_cmdline_lock);
4719 v = &savedcmd->map_cmdline_to_pid[0];
4721 v = saved_cmdlines_next(m, v, &l);
4729 static void saved_cmdlines_stop(struct seq_file *m, void *v)
4731 arch_spin_unlock(&trace_cmdline_lock);
4735 static int saved_cmdlines_show(struct seq_file *m, void *v)
4737 char buf[TASK_COMM_LEN];
4738 unsigned int *pid = v;
4740 __trace_find_cmdline(*pid, buf);
4741 seq_printf(m, "%d %s\n", *pid, buf);
4745 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4746 .start = saved_cmdlines_start,
4747 .next = saved_cmdlines_next,
4748 .stop = saved_cmdlines_stop,
4749 .show = saved_cmdlines_show,
4752 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4754 if (tracing_disabled)
4757 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
4760 static const struct file_operations tracing_saved_cmdlines_fops = {
4761 .open = tracing_saved_cmdlines_open,
4763 .llseek = seq_lseek,
4764 .release = seq_release,
4768 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4769 size_t cnt, loff_t *ppos)
4774 arch_spin_lock(&trace_cmdline_lock);
4775 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
4776 arch_spin_unlock(&trace_cmdline_lock);
4778 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4781 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4783 kfree(s->saved_cmdlines);
4784 kfree(s->map_cmdline_to_pid);
4788 static int tracing_resize_saved_cmdlines(unsigned int val)
4790 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4792 s = kmalloc(sizeof(*s), GFP_KERNEL);
4796 if (allocate_cmdlines_buffer(val, s) < 0) {
4801 arch_spin_lock(&trace_cmdline_lock);
4802 savedcmd_temp = savedcmd;
4804 arch_spin_unlock(&trace_cmdline_lock);
4805 free_saved_cmdlines_buffer(savedcmd_temp);
4811 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4812 size_t cnt, loff_t *ppos)
4817 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4821 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4822 if (!val || val > PID_MAX_DEFAULT)
4825 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4834 static const struct file_operations tracing_saved_cmdlines_size_fops = {
4835 .open = tracing_open_generic,
4836 .read = tracing_saved_cmdlines_size_read,
4837 .write = tracing_saved_cmdlines_size_write,
4840 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
4841 static union trace_eval_map_item *
4842 update_eval_map(union trace_eval_map_item *ptr)
4844 if (!ptr->map.eval_string) {
4845 if (ptr->tail.next) {
4846 ptr = ptr->tail.next;
4847 /* Set ptr to the next real item (skip head) */
4855 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
4857 union trace_eval_map_item *ptr = v;
4860 * Paranoid! If ptr points to end, we don't want to increment past it.
4861 * This really should never happen.
4863 ptr = update_eval_map(ptr);
4864 if (WARN_ON_ONCE(!ptr))
4871 ptr = update_eval_map(ptr);
4876 static void *eval_map_start(struct seq_file *m, loff_t *pos)
4878 union trace_eval_map_item *v;
4881 mutex_lock(&trace_eval_mutex);
4883 v = trace_eval_maps;
4887 while (v && l < *pos) {
4888 v = eval_map_next(m, v, &l);
4894 static void eval_map_stop(struct seq_file *m, void *v)
4896 mutex_unlock(&trace_eval_mutex);
4899 static int eval_map_show(struct seq_file *m, void *v)
4901 union trace_eval_map_item *ptr = v;
4903 seq_printf(m, "%s %ld (%s)\n",
4904 ptr->map.eval_string, ptr->map.eval_value,
4910 static const struct seq_operations tracing_eval_map_seq_ops = {
4911 .start = eval_map_start,
4912 .next = eval_map_next,
4913 .stop = eval_map_stop,
4914 .show = eval_map_show,
4917 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
4919 if (tracing_disabled)
4922 return seq_open(filp, &tracing_eval_map_seq_ops);
4925 static const struct file_operations tracing_eval_map_fops = {
4926 .open = tracing_eval_map_open,
4928 .llseek = seq_lseek,
4929 .release = seq_release,
4932 static inline union trace_eval_map_item *
4933 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
4935 /* Return tail of array given the head */
4936 return ptr + ptr->head.length + 1;
4940 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
4943 struct trace_eval_map **stop;
4944 struct trace_eval_map **map;
4945 union trace_eval_map_item *map_array;
4946 union trace_eval_map_item *ptr;
4951 * The trace_eval_maps contains the map plus a head and tail item,
4952 * where the head holds the module and length of array, and the
4953 * tail holds a pointer to the next list.
4955 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4957 pr_warn("Unable to allocate trace eval mapping\n");
4961 mutex_lock(&trace_eval_mutex);
4963 if (!trace_eval_maps)
4964 trace_eval_maps = map_array;
4966 ptr = trace_eval_maps;
4968 ptr = trace_eval_jmp_to_tail(ptr);
4969 if (!ptr->tail.next)
4971 ptr = ptr->tail.next;
4974 ptr->tail.next = map_array;
4976 map_array->head.mod = mod;
4977 map_array->head.length = len;
4980 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4981 map_array->map = **map;
4984 memset(map_array, 0, sizeof(*map_array));
4986 mutex_unlock(&trace_eval_mutex);
4989 static void trace_create_eval_file(struct dentry *d_tracer)
4991 trace_create_file("eval_map", 0444, d_tracer,
4992 NULL, &tracing_eval_map_fops);
4995 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
4996 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
4997 static inline void trace_insert_eval_map_file(struct module *mod,
4998 struct trace_eval_map **start, int len) { }
4999 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5001 static void trace_insert_eval_map(struct module *mod,
5002 struct trace_eval_map **start, int len)
5004 struct trace_eval_map **map;
5011 trace_event_eval_update(map, len);
5013 trace_insert_eval_map_file(mod, start, len);
5017 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5018 size_t cnt, loff_t *ppos)
5020 struct trace_array *tr = filp->private_data;
5021 char buf[MAX_TRACER_SIZE+2];
5024 mutex_lock(&trace_types_lock);
5025 r = sprintf(buf, "%s\n", tr->current_trace->name);
5026 mutex_unlock(&trace_types_lock);
5028 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5031 int tracer_init(struct tracer *t, struct trace_array *tr)
5033 tracing_reset_online_cpus(&tr->trace_buffer);
5037 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
5041 for_each_tracing_cpu(cpu)
5042 per_cpu_ptr(buf->data, cpu)->entries = val;
5045 #ifdef CONFIG_TRACER_MAX_TRACE
5046 /* resize @tr's buffer to the size of @size_tr's entries */
5047 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5048 struct trace_buffer *size_buf, int cpu_id)
5052 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5053 for_each_tracing_cpu(cpu) {
5054 ret = ring_buffer_resize(trace_buf->buffer,
5055 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5058 per_cpu_ptr(trace_buf->data, cpu)->entries =
5059 per_cpu_ptr(size_buf->data, cpu)->entries;
5062 ret = ring_buffer_resize(trace_buf->buffer,
5063 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5065 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5066 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5071 #endif /* CONFIG_TRACER_MAX_TRACE */
5073 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5074 unsigned long size, int cpu)
5079 * If kernel or user changes the size of the ring buffer
5080 * we use the size that was given, and we can forget about
5081 * expanding it later.
5083 ring_buffer_expanded = true;
5085 /* May be called before buffers are initialized */
5086 if (!tr->trace_buffer.buffer)
5089 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
5093 #ifdef CONFIG_TRACER_MAX_TRACE
5094 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5095 !tr->current_trace->use_max_tr)
5098 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5100 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5101 &tr->trace_buffer, cpu);
5104 * AARGH! We are left with different
5105 * size max buffer!!!!
5106 * The max buffer is our "snapshot" buffer.
5107 * When a tracer needs a snapshot (one of the
5108 * latency tracers), it swaps the max buffer
5109 * with the saved snap shot. We succeeded to
5110 * update the size of the main buffer, but failed to
5111 * update the size of the max buffer. But when we tried
5112 * to reset the main buffer to the original size, we
5113 * failed there too. This is very unlikely to
5114 * happen, but if it does, warn and kill all
5118 tracing_disabled = 1;
5123 if (cpu == RING_BUFFER_ALL_CPUS)
5124 set_buffer_entries(&tr->max_buffer, size);
5126 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5129 #endif /* CONFIG_TRACER_MAX_TRACE */
5131 if (cpu == RING_BUFFER_ALL_CPUS)
5132 set_buffer_entries(&tr->trace_buffer, size);
5134 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
5139 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5140 unsigned long size, int cpu_id)
5144 mutex_lock(&trace_types_lock);
5146 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5147 /* make sure, this cpu is enabled in the mask */
5148 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5154 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5159 mutex_unlock(&trace_types_lock);
5166 * tracing_update_buffers - used by tracing facility to expand ring buffers
5168 * To save on memory when the tracing is never used on a system with it
5169 * configured in. The ring buffers are set to a minimum size. But once
5170 * a user starts to use the tracing facility, then they need to grow
5171 * to their default size.
5173 * This function is to be called when a tracer is about to be used.
5175 int tracing_update_buffers(void)
5179 mutex_lock(&trace_types_lock);
5180 if (!ring_buffer_expanded)
5181 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5182 RING_BUFFER_ALL_CPUS);
5183 mutex_unlock(&trace_types_lock);
5188 struct trace_option_dentry;
5191 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5194 * Used to clear out the tracer before deletion of an instance.
5195 * Must have trace_types_lock held.
5197 static void tracing_set_nop(struct trace_array *tr)
5199 if (tr->current_trace == &nop_trace)
5202 tr->current_trace->enabled--;
5204 if (tr->current_trace->reset)
5205 tr->current_trace->reset(tr);
5207 tr->current_trace = &nop_trace;
5210 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5212 /* Only enable if the directory has been created already. */
5216 create_trace_option_files(tr, t);
5219 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5222 #ifdef CONFIG_TRACER_MAX_TRACE
5227 mutex_lock(&trace_types_lock);
5229 if (!ring_buffer_expanded) {
5230 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5231 RING_BUFFER_ALL_CPUS);
5237 for (t = trace_types; t; t = t->next) {
5238 if (strcmp(t->name, buf) == 0)
5245 if (t == tr->current_trace)
5248 /* Some tracers are only allowed for the top level buffer */
5249 if (!trace_ok_for_array(t, tr)) {
5254 /* If trace pipe files are being read, we can't change the tracer */
5255 if (tr->current_trace->ref) {
5260 trace_branch_disable();
5262 tr->current_trace->enabled--;
5264 if (tr->current_trace->reset)
5265 tr->current_trace->reset(tr);
5267 /* Current trace needs to be nop_trace before synchronize_sched */
5268 tr->current_trace = &nop_trace;
5270 #ifdef CONFIG_TRACER_MAX_TRACE
5271 had_max_tr = tr->allocated_snapshot;
5273 if (had_max_tr && !t->use_max_tr) {
5275 * We need to make sure that the update_max_tr sees that
5276 * current_trace changed to nop_trace to keep it from
5277 * swapping the buffers after we resize it.
5278 * The update_max_tr is called from interrupts disabled
5279 * so a synchronized_sched() is sufficient.
5281 synchronize_sched();
5286 #ifdef CONFIG_TRACER_MAX_TRACE
5287 if (t->use_max_tr && !had_max_tr) {
5288 ret = alloc_snapshot(tr);
5295 ret = tracer_init(t, tr);
5300 tr->current_trace = t;
5301 tr->current_trace->enabled++;
5302 trace_branch_enable(tr);
5304 mutex_unlock(&trace_types_lock);
5310 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5311 size_t cnt, loff_t *ppos)
5313 struct trace_array *tr = filp->private_data;
5314 char buf[MAX_TRACER_SIZE+1];
5321 if (cnt > MAX_TRACER_SIZE)
5322 cnt = MAX_TRACER_SIZE;
5324 if (copy_from_user(buf, ubuf, cnt))
5329 /* strip ending whitespace. */
5330 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5333 err = tracing_set_tracer(tr, buf);
5343 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5344 size_t cnt, loff_t *ppos)
5349 r = snprintf(buf, sizeof(buf), "%ld\n",
5350 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
5351 if (r > sizeof(buf))
5353 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5357 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5358 size_t cnt, loff_t *ppos)
5363 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5373 tracing_thresh_read(struct file *filp, char __user *ubuf,
5374 size_t cnt, loff_t *ppos)
5376 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5380 tracing_thresh_write(struct file *filp, const char __user *ubuf,
5381 size_t cnt, loff_t *ppos)
5383 struct trace_array *tr = filp->private_data;
5386 mutex_lock(&trace_types_lock);
5387 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5391 if (tr->current_trace->update_thresh) {
5392 ret = tr->current_trace->update_thresh(tr);
5399 mutex_unlock(&trace_types_lock);
5404 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5407 tracing_max_lat_read(struct file *filp, char __user *ubuf,
5408 size_t cnt, loff_t *ppos)
5410 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5414 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5415 size_t cnt, loff_t *ppos)
5417 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5422 static int tracing_open_pipe(struct inode *inode, struct file *filp)
5424 struct trace_array *tr = inode->i_private;
5425 struct trace_iterator *iter;
5428 if (tracing_disabled)
5431 if (trace_array_get(tr) < 0)
5434 mutex_lock(&trace_types_lock);
5436 /* create a buffer to store the information to pass to userspace */
5437 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5440 __trace_array_put(tr);
5444 trace_seq_init(&iter->seq);
5445 iter->trace = tr->current_trace;
5447 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5452 /* trace pipe does not show start of buffer */
5453 cpumask_setall(iter->started);
5455 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5456 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5458 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5459 if (trace_clocks[tr->clock_id].in_ns)
5460 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5463 iter->trace_buffer = &tr->trace_buffer;
5464 iter->cpu_file = tracing_get_cpu(inode);
5465 mutex_init(&iter->mutex);
5466 filp->private_data = iter;
5468 if (iter->trace->pipe_open)
5469 iter->trace->pipe_open(iter);
5471 nonseekable_open(inode, filp);
5473 tr->current_trace->ref++;
5475 mutex_unlock(&trace_types_lock);
5481 __trace_array_put(tr);
5482 mutex_unlock(&trace_types_lock);
5486 static int tracing_release_pipe(struct inode *inode, struct file *file)
5488 struct trace_iterator *iter = file->private_data;
5489 struct trace_array *tr = inode->i_private;
5491 mutex_lock(&trace_types_lock);
5493 tr->current_trace->ref--;
5495 if (iter->trace->pipe_close)
5496 iter->trace->pipe_close(iter);
5498 mutex_unlock(&trace_types_lock);
5500 free_cpumask_var(iter->started);
5501 mutex_destroy(&iter->mutex);
5504 trace_array_put(tr);
5510 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
5512 struct trace_array *tr = iter->tr;
5514 /* Iterators are static, they should be filled or empty */
5515 if (trace_buffer_iter(iter, iter->cpu_file))
5516 return POLLIN | POLLRDNORM;
5518 if (tr->trace_flags & TRACE_ITER_BLOCK)
5520 * Always select as readable when in blocking mode
5522 return POLLIN | POLLRDNORM;
5524 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
5529 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5531 struct trace_iterator *iter = filp->private_data;
5533 return trace_poll(iter, filp, poll_table);
5536 /* Must be called with iter->mutex held. */
5537 static int tracing_wait_pipe(struct file *filp)
5539 struct trace_iterator *iter = filp->private_data;
5542 while (trace_empty(iter)) {
5544 if ((filp->f_flags & O_NONBLOCK)) {
5549 * We block until we read something and tracing is disabled.
5550 * We still block if tracing is disabled, but we have never
5551 * read anything. This allows a user to cat this file, and
5552 * then enable tracing. But after we have read something,
5553 * we give an EOF when tracing is again disabled.
5555 * iter->pos will be 0 if we haven't read anything.
5557 if (!tracing_is_on() && iter->pos)
5560 mutex_unlock(&iter->mutex);
5562 ret = wait_on_pipe(iter, false);
5564 mutex_lock(&iter->mutex);
5577 tracing_read_pipe(struct file *filp, char __user *ubuf,
5578 size_t cnt, loff_t *ppos)
5580 struct trace_iterator *iter = filp->private_data;
5584 * Avoid more than one consumer on a single file descriptor
5585 * This is just a matter of traces coherency, the ring buffer itself
5588 mutex_lock(&iter->mutex);
5590 /* return any leftover data */
5591 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5595 trace_seq_init(&iter->seq);
5597 if (iter->trace->read) {
5598 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5604 sret = tracing_wait_pipe(filp);
5608 /* stop when tracing is finished */
5609 if (trace_empty(iter)) {
5614 if (cnt >= PAGE_SIZE)
5615 cnt = PAGE_SIZE - 1;
5617 /* reset all but tr, trace, and overruns */
5618 memset(&iter->seq, 0,
5619 sizeof(struct trace_iterator) -
5620 offsetof(struct trace_iterator, seq));
5621 cpumask_clear(iter->started);
5624 trace_event_read_lock();
5625 trace_access_lock(iter->cpu_file);
5626 while (trace_find_next_entry_inc(iter) != NULL) {
5627 enum print_line_t ret;
5628 int save_len = iter->seq.seq.len;
5630 ret = print_trace_line(iter);
5631 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5632 /* don't print partial lines */
5633 iter->seq.seq.len = save_len;
5636 if (ret != TRACE_TYPE_NO_CONSUME)
5637 trace_consume(iter);
5639 if (trace_seq_used(&iter->seq) >= cnt)
5643 * Setting the full flag means we reached the trace_seq buffer
5644 * size and we should leave by partial output condition above.
5645 * One of the trace_seq_* functions is not used properly.
5647 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5650 trace_access_unlock(iter->cpu_file);
5651 trace_event_read_unlock();
5653 /* Now copy what we have to the user */
5654 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5655 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
5656 trace_seq_init(&iter->seq);
5659 * If there was nothing to send to user, in spite of consuming trace
5660 * entries, go back to wait for more entries.
5666 mutex_unlock(&iter->mutex);
5671 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5674 __free_page(spd->pages[idx]);
5677 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
5679 .confirm = generic_pipe_buf_confirm,
5680 .release = generic_pipe_buf_release,
5681 .steal = generic_pipe_buf_steal,
5682 .get = generic_pipe_buf_get,
5686 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
5692 /* Seq buffer is page-sized, exactly what we need. */
5694 save_len = iter->seq.seq.len;
5695 ret = print_trace_line(iter);
5697 if (trace_seq_has_overflowed(&iter->seq)) {
5698 iter->seq.seq.len = save_len;
5703 * This should not be hit, because it should only
5704 * be set if the iter->seq overflowed. But check it
5705 * anyway to be safe.
5707 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5708 iter->seq.seq.len = save_len;
5712 count = trace_seq_used(&iter->seq) - save_len;
5715 iter->seq.seq.len = save_len;
5719 if (ret != TRACE_TYPE_NO_CONSUME)
5720 trace_consume(iter);
5722 if (!trace_find_next_entry_inc(iter)) {
5732 static ssize_t tracing_splice_read_pipe(struct file *filp,
5734 struct pipe_inode_info *pipe,
5738 struct page *pages_def[PIPE_DEF_BUFFERS];
5739 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5740 struct trace_iterator *iter = filp->private_data;
5741 struct splice_pipe_desc spd = {
5743 .partial = partial_def,
5744 .nr_pages = 0, /* This gets updated below. */
5745 .nr_pages_max = PIPE_DEF_BUFFERS,
5746 .ops = &tracing_pipe_buf_ops,
5747 .spd_release = tracing_spd_release_pipe,
5753 if (splice_grow_spd(pipe, &spd))
5756 mutex_lock(&iter->mutex);
5758 if (iter->trace->splice_read) {
5759 ret = iter->trace->splice_read(iter, filp,
5760 ppos, pipe, len, flags);
5765 ret = tracing_wait_pipe(filp);
5769 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
5774 trace_event_read_lock();
5775 trace_access_lock(iter->cpu_file);
5777 /* Fill as many pages as possible. */
5778 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
5779 spd.pages[i] = alloc_page(GFP_KERNEL);
5783 rem = tracing_fill_pipe_page(rem, iter);
5785 /* Copy the data into the page, so we can start over. */
5786 ret = trace_seq_to_buffer(&iter->seq,
5787 page_address(spd.pages[i]),
5788 trace_seq_used(&iter->seq));
5790 __free_page(spd.pages[i]);
5793 spd.partial[i].offset = 0;
5794 spd.partial[i].len = trace_seq_used(&iter->seq);
5796 trace_seq_init(&iter->seq);
5799 trace_access_unlock(iter->cpu_file);
5800 trace_event_read_unlock();
5801 mutex_unlock(&iter->mutex);
5806 ret = splice_to_pipe(pipe, &spd);
5810 splice_shrink_spd(&spd);
5814 mutex_unlock(&iter->mutex);
5819 tracing_entries_read(struct file *filp, char __user *ubuf,
5820 size_t cnt, loff_t *ppos)
5822 struct inode *inode = file_inode(filp);
5823 struct trace_array *tr = inode->i_private;
5824 int cpu = tracing_get_cpu(inode);
5829 mutex_lock(&trace_types_lock);
5831 if (cpu == RING_BUFFER_ALL_CPUS) {
5832 int cpu, buf_size_same;
5837 /* check if all cpu sizes are same */
5838 for_each_tracing_cpu(cpu) {
5839 /* fill in the size from first enabled cpu */
5841 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5842 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
5848 if (buf_size_same) {
5849 if (!ring_buffer_expanded)
5850 r = sprintf(buf, "%lu (expanded: %lu)\n",
5852 trace_buf_size >> 10);
5854 r = sprintf(buf, "%lu\n", size >> 10);
5856 r = sprintf(buf, "X\n");
5858 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
5860 mutex_unlock(&trace_types_lock);
5862 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5867 tracing_entries_write(struct file *filp, const char __user *ubuf,
5868 size_t cnt, loff_t *ppos)
5870 struct inode *inode = file_inode(filp);
5871 struct trace_array *tr = inode->i_private;
5875 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5879 /* must have at least 1 entry */
5883 /* value is in KB */
5885 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
5895 tracing_total_entries_read(struct file *filp, char __user *ubuf,
5896 size_t cnt, loff_t *ppos)
5898 struct trace_array *tr = filp->private_data;
5901 unsigned long size = 0, expanded_size = 0;
5903 mutex_lock(&trace_types_lock);
5904 for_each_tracing_cpu(cpu) {
5905 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
5906 if (!ring_buffer_expanded)
5907 expanded_size += trace_buf_size >> 10;
5909 if (ring_buffer_expanded)
5910 r = sprintf(buf, "%lu\n", size);
5912 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5913 mutex_unlock(&trace_types_lock);
5915 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5919 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5920 size_t cnt, loff_t *ppos)
5923 * There is no need to read what the user has written, this function
5924 * is just to make sure that there is no error when "echo" is used
5933 tracing_free_buffer_release(struct inode *inode, struct file *filp)
5935 struct trace_array *tr = inode->i_private;
5937 /* disable tracing ? */
5938 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
5939 tracer_tracing_off(tr);
5940 /* resize the ring buffer to 0 */
5941 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
5943 trace_array_put(tr);
5949 tracing_mark_write(struct file *filp, const char __user *ubuf,
5950 size_t cnt, loff_t *fpos)
5952 struct trace_array *tr = filp->private_data;
5953 struct ring_buffer_event *event;
5954 struct ring_buffer *buffer;
5955 struct print_entry *entry;
5956 unsigned long irq_flags;
5957 const char faulted[] = "<faulted>";
5962 /* Used in tracing_mark_raw_write() as well */
5963 #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
5965 if (tracing_disabled)
5968 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5971 if (cnt > TRACE_BUF_SIZE)
5972 cnt = TRACE_BUF_SIZE;
5974 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5976 local_save_flags(irq_flags);
5977 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
5979 /* If less than "<faulted>", then make sure we can still add that */
5980 if (cnt < FAULTED_SIZE)
5981 size += FAULTED_SIZE - cnt;
5983 buffer = tr->trace_buffer.buffer;
5984 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5985 irq_flags, preempt_count());
5986 if (unlikely(!event))
5987 /* Ring buffer disabled, return as if not open for write */
5990 entry = ring_buffer_event_data(event);
5991 entry->ip = _THIS_IP_;
5993 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
5995 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6002 if (entry->buf[cnt - 1] != '\n') {
6003 entry->buf[cnt] = '\n';
6004 entry->buf[cnt + 1] = '\0';
6006 entry->buf[cnt] = '\0';
6008 __buffer_unlock_commit(buffer, event);
6016 /* Limit it for now to 3K (including tag) */
6017 #define RAW_DATA_MAX_SIZE (1024*3)
6020 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6021 size_t cnt, loff_t *fpos)
6023 struct trace_array *tr = filp->private_data;
6024 struct ring_buffer_event *event;
6025 struct ring_buffer *buffer;
6026 struct raw_data_entry *entry;
6027 const char faulted[] = "<faulted>";
6028 unsigned long irq_flags;
6033 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6035 if (tracing_disabled)
6038 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6041 /* The marker must at least have a tag id */
6042 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6045 if (cnt > TRACE_BUF_SIZE)
6046 cnt = TRACE_BUF_SIZE;
6048 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6050 local_save_flags(irq_flags);
6051 size = sizeof(*entry) + cnt;
6052 if (cnt < FAULT_SIZE_ID)
6053 size += FAULT_SIZE_ID - cnt;
6055 buffer = tr->trace_buffer.buffer;
6056 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6057 irq_flags, preempt_count());
6059 /* Ring buffer disabled, return as if not open for write */
6062 entry = ring_buffer_event_data(event);
6064 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6067 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6072 __buffer_unlock_commit(buffer, event);
6080 static int tracing_clock_show(struct seq_file *m, void *v)
6082 struct trace_array *tr = m->private;
6085 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6087 "%s%s%s%s", i ? " " : "",
6088 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6089 i == tr->clock_id ? "]" : "");
6095 static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6099 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6100 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6103 if (i == ARRAY_SIZE(trace_clocks))
6106 mutex_lock(&trace_types_lock);
6110 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
6113 * New clock may not be consistent with the previous clock.
6114 * Reset the buffer so that it doesn't have incomparable timestamps.
6116 tracing_reset_online_cpus(&tr->trace_buffer);
6118 #ifdef CONFIG_TRACER_MAX_TRACE
6119 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
6120 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6121 tracing_reset_online_cpus(&tr->max_buffer);
6124 mutex_unlock(&trace_types_lock);
6129 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6130 size_t cnt, loff_t *fpos)
6132 struct seq_file *m = filp->private_data;
6133 struct trace_array *tr = m->private;
6135 const char *clockstr;
6138 if (cnt >= sizeof(buf))
6141 if (copy_from_user(buf, ubuf, cnt))
6146 clockstr = strstrip(buf);
6148 ret = tracing_set_clock(tr, clockstr);
6157 static int tracing_clock_open(struct inode *inode, struct file *file)
6159 struct trace_array *tr = inode->i_private;
6162 if (tracing_disabled)
6165 if (trace_array_get(tr))
6168 ret = single_open(file, tracing_clock_show, inode->i_private);
6170 trace_array_put(tr);
6175 struct ftrace_buffer_info {
6176 struct trace_iterator iter;
6178 unsigned int spare_cpu;
6182 #ifdef CONFIG_TRACER_SNAPSHOT
6183 static int tracing_snapshot_open(struct inode *inode, struct file *file)
6185 struct trace_array *tr = inode->i_private;
6186 struct trace_iterator *iter;
6190 if (trace_array_get(tr) < 0)
6193 if (file->f_mode & FMODE_READ) {
6194 iter = __tracing_open(inode, file, true);
6196 ret = PTR_ERR(iter);
6198 /* Writes still need the seq_file to hold the private data */
6200 m = kzalloc(sizeof(*m), GFP_KERNEL);
6203 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6211 iter->trace_buffer = &tr->max_buffer;
6212 iter->cpu_file = tracing_get_cpu(inode);
6214 file->private_data = m;
6218 trace_array_put(tr);
6224 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6227 struct seq_file *m = filp->private_data;
6228 struct trace_iterator *iter = m->private;
6229 struct trace_array *tr = iter->tr;
6233 ret = tracing_update_buffers();
6237 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6241 mutex_lock(&trace_types_lock);
6243 if (tr->current_trace->use_max_tr) {
6250 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6254 if (tr->allocated_snapshot)
6258 /* Only allow per-cpu swap if the ring buffer supports it */
6259 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6260 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6265 if (!tr->allocated_snapshot) {
6266 ret = alloc_snapshot(tr);
6270 local_irq_disable();
6271 /* Now, we're going to swap */
6272 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6273 update_max_tr(tr, current, smp_processor_id());
6275 update_max_tr_single(tr, current, iter->cpu_file);
6279 if (tr->allocated_snapshot) {
6280 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6281 tracing_reset_online_cpus(&tr->max_buffer);
6283 tracing_reset(&tr->max_buffer, iter->cpu_file);
6293 mutex_unlock(&trace_types_lock);
6297 static int tracing_snapshot_release(struct inode *inode, struct file *file)
6299 struct seq_file *m = file->private_data;
6302 ret = tracing_release(inode, file);
6304 if (file->f_mode & FMODE_READ)
6307 /* If write only, the seq_file is just a stub */
6315 static int tracing_buffers_open(struct inode *inode, struct file *filp);
6316 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6317 size_t count, loff_t *ppos);
6318 static int tracing_buffers_release(struct inode *inode, struct file *file);
6319 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6320 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6322 static int snapshot_raw_open(struct inode *inode, struct file *filp)
6324 struct ftrace_buffer_info *info;
6327 ret = tracing_buffers_open(inode, filp);
6331 info = filp->private_data;
6333 if (info->iter.trace->use_max_tr) {
6334 tracing_buffers_release(inode, filp);
6338 info->iter.snapshot = true;
6339 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6344 #endif /* CONFIG_TRACER_SNAPSHOT */
6347 static const struct file_operations tracing_thresh_fops = {
6348 .open = tracing_open_generic,
6349 .read = tracing_thresh_read,
6350 .write = tracing_thresh_write,
6351 .llseek = generic_file_llseek,
6354 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6355 static const struct file_operations tracing_max_lat_fops = {
6356 .open = tracing_open_generic,
6357 .read = tracing_max_lat_read,
6358 .write = tracing_max_lat_write,
6359 .llseek = generic_file_llseek,
6363 static const struct file_operations set_tracer_fops = {
6364 .open = tracing_open_generic,
6365 .read = tracing_set_trace_read,
6366 .write = tracing_set_trace_write,
6367 .llseek = generic_file_llseek,
6370 static const struct file_operations tracing_pipe_fops = {
6371 .open = tracing_open_pipe,
6372 .poll = tracing_poll_pipe,
6373 .read = tracing_read_pipe,
6374 .splice_read = tracing_splice_read_pipe,
6375 .release = tracing_release_pipe,
6376 .llseek = no_llseek,
6379 static const struct file_operations tracing_entries_fops = {
6380 .open = tracing_open_generic_tr,
6381 .read = tracing_entries_read,
6382 .write = tracing_entries_write,
6383 .llseek = generic_file_llseek,
6384 .release = tracing_release_generic_tr,
6387 static const struct file_operations tracing_total_entries_fops = {
6388 .open = tracing_open_generic_tr,
6389 .read = tracing_total_entries_read,
6390 .llseek = generic_file_llseek,
6391 .release = tracing_release_generic_tr,
6394 static const struct file_operations tracing_free_buffer_fops = {
6395 .open = tracing_open_generic_tr,
6396 .write = tracing_free_buffer_write,
6397 .release = tracing_free_buffer_release,
6400 static const struct file_operations tracing_mark_fops = {
6401 .open = tracing_open_generic_tr,
6402 .write = tracing_mark_write,
6403 .llseek = generic_file_llseek,
6404 .release = tracing_release_generic_tr,
6407 static const struct file_operations tracing_mark_raw_fops = {
6408 .open = tracing_open_generic_tr,
6409 .write = tracing_mark_raw_write,
6410 .llseek = generic_file_llseek,
6411 .release = tracing_release_generic_tr,
6414 static const struct file_operations trace_clock_fops = {
6415 .open = tracing_clock_open,
6417 .llseek = seq_lseek,
6418 .release = tracing_single_release_tr,
6419 .write = tracing_clock_write,
6422 #ifdef CONFIG_TRACER_SNAPSHOT
6423 static const struct file_operations snapshot_fops = {
6424 .open = tracing_snapshot_open,
6426 .write = tracing_snapshot_write,
6427 .llseek = tracing_lseek,
6428 .release = tracing_snapshot_release,
6431 static const struct file_operations snapshot_raw_fops = {
6432 .open = snapshot_raw_open,
6433 .read = tracing_buffers_read,
6434 .release = tracing_buffers_release,
6435 .splice_read = tracing_buffers_splice_read,
6436 .llseek = no_llseek,
6439 #endif /* CONFIG_TRACER_SNAPSHOT */
6441 static int tracing_buffers_open(struct inode *inode, struct file *filp)
6443 struct trace_array *tr = inode->i_private;
6444 struct ftrace_buffer_info *info;
6447 if (tracing_disabled)
6450 if (trace_array_get(tr) < 0)
6453 info = kzalloc(sizeof(*info), GFP_KERNEL);
6455 trace_array_put(tr);
6459 mutex_lock(&trace_types_lock);
6462 info->iter.cpu_file = tracing_get_cpu(inode);
6463 info->iter.trace = tr->current_trace;
6464 info->iter.trace_buffer = &tr->trace_buffer;
6466 /* Force reading ring buffer for first read */
6467 info->read = (unsigned int)-1;
6469 filp->private_data = info;
6471 tr->current_trace->ref++;
6473 mutex_unlock(&trace_types_lock);
6475 ret = nonseekable_open(inode, filp);
6477 trace_array_put(tr);
6483 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6485 struct ftrace_buffer_info *info = filp->private_data;
6486 struct trace_iterator *iter = &info->iter;
6488 return trace_poll(iter, filp, poll_table);
6492 tracing_buffers_read(struct file *filp, char __user *ubuf,
6493 size_t count, loff_t *ppos)
6495 struct ftrace_buffer_info *info = filp->private_data;
6496 struct trace_iterator *iter = &info->iter;
6503 #ifdef CONFIG_TRACER_MAX_TRACE
6504 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6509 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6511 info->spare_cpu = iter->cpu_file;
6516 /* Do we have previous read data to read? */
6517 if (info->read < PAGE_SIZE)
6521 trace_access_lock(iter->cpu_file);
6522 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
6526 trace_access_unlock(iter->cpu_file);
6529 if (trace_empty(iter)) {
6530 if ((filp->f_flags & O_NONBLOCK))
6533 ret = wait_on_pipe(iter, false);
6544 size = PAGE_SIZE - info->read;
6548 ret = copy_to_user(ubuf, info->spare + info->read, size);
6560 static int tracing_buffers_release(struct inode *inode, struct file *file)
6562 struct ftrace_buffer_info *info = file->private_data;
6563 struct trace_iterator *iter = &info->iter;
6565 mutex_lock(&trace_types_lock);
6567 iter->tr->current_trace->ref--;
6569 __trace_array_put(iter->tr);
6572 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6573 info->spare_cpu, info->spare);
6576 mutex_unlock(&trace_types_lock);
6582 struct ring_buffer *buffer;
6588 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6589 struct pipe_buffer *buf)
6591 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6596 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
6601 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6602 struct pipe_buffer *buf)
6604 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6609 /* Pipe buffer operations for a buffer. */
6610 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
6612 .confirm = generic_pipe_buf_confirm,
6613 .release = buffer_pipe_buf_release,
6614 .steal = generic_pipe_buf_steal,
6615 .get = buffer_pipe_buf_get,
6619 * Callback from splice_to_pipe(), if we need to release some pages
6620 * at the end of the spd in case we error'ed out in filling the pipe.
6622 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6624 struct buffer_ref *ref =
6625 (struct buffer_ref *)spd->partial[i].private;
6630 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
6632 spd->partial[i].private = 0;
6636 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6637 struct pipe_inode_info *pipe, size_t len,
6640 struct ftrace_buffer_info *info = file->private_data;
6641 struct trace_iterator *iter = &info->iter;
6642 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6643 struct page *pages_def[PIPE_DEF_BUFFERS];
6644 struct splice_pipe_desc spd = {
6646 .partial = partial_def,
6647 .nr_pages_max = PIPE_DEF_BUFFERS,
6648 .ops = &buffer_pipe_buf_ops,
6649 .spd_release = buffer_spd_release,
6651 struct buffer_ref *ref;
6652 int entries, size, i;
6655 #ifdef CONFIG_TRACER_MAX_TRACE
6656 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6660 if (*ppos & (PAGE_SIZE - 1))
6663 if (len & (PAGE_SIZE - 1)) {
6664 if (len < PAGE_SIZE)
6669 if (splice_grow_spd(pipe, &spd))
6673 trace_access_lock(iter->cpu_file);
6674 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6676 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
6680 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
6687 ref->buffer = iter->trace_buffer->buffer;
6688 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
6694 ref->cpu = iter->cpu_file;
6696 r = ring_buffer_read_page(ref->buffer, &ref->page,
6697 len, iter->cpu_file, 1);
6699 ring_buffer_free_read_page(ref->buffer, ref->cpu,
6706 * zero out any left over data, this is going to
6709 size = ring_buffer_page_len(ref->page);
6710 if (size < PAGE_SIZE)
6711 memset(ref->page + size, 0, PAGE_SIZE - size);
6713 page = virt_to_page(ref->page);
6715 spd.pages[i] = page;
6716 spd.partial[i].len = PAGE_SIZE;
6717 spd.partial[i].offset = 0;
6718 spd.partial[i].private = (unsigned long)ref;
6722 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6725 trace_access_unlock(iter->cpu_file);
6728 /* did we read anything? */
6729 if (!spd.nr_pages) {
6734 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
6737 ret = wait_on_pipe(iter, true);
6744 ret = splice_to_pipe(pipe, &spd);
6746 splice_shrink_spd(&spd);
6751 static const struct file_operations tracing_buffers_fops = {
6752 .open = tracing_buffers_open,
6753 .read = tracing_buffers_read,
6754 .poll = tracing_buffers_poll,
6755 .release = tracing_buffers_release,
6756 .splice_read = tracing_buffers_splice_read,
6757 .llseek = no_llseek,
6761 tracing_stats_read(struct file *filp, char __user *ubuf,
6762 size_t count, loff_t *ppos)
6764 struct inode *inode = file_inode(filp);
6765 struct trace_array *tr = inode->i_private;
6766 struct trace_buffer *trace_buf = &tr->trace_buffer;
6767 int cpu = tracing_get_cpu(inode);
6768 struct trace_seq *s;
6770 unsigned long long t;
6771 unsigned long usec_rem;
6773 s = kmalloc(sizeof(*s), GFP_KERNEL);
6779 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
6780 trace_seq_printf(s, "entries: %ld\n", cnt);
6782 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
6783 trace_seq_printf(s, "overrun: %ld\n", cnt);
6785 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
6786 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6788 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
6789 trace_seq_printf(s, "bytes: %ld\n", cnt);
6791 if (trace_clocks[tr->clock_id].in_ns) {
6792 /* local or global for trace_clock */
6793 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
6794 usec_rem = do_div(t, USEC_PER_SEC);
6795 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6798 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
6799 usec_rem = do_div(t, USEC_PER_SEC);
6800 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6802 /* counter or tsc mode for trace_clock */
6803 trace_seq_printf(s, "oldest event ts: %llu\n",
6804 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
6806 trace_seq_printf(s, "now ts: %llu\n",
6807 ring_buffer_time_stamp(trace_buf->buffer, cpu));
6810 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
6811 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6813 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
6814 trace_seq_printf(s, "read events: %ld\n", cnt);
6816 count = simple_read_from_buffer(ubuf, count, ppos,
6817 s->buffer, trace_seq_used(s));
6824 static const struct file_operations tracing_stats_fops = {
6825 .open = tracing_open_generic_tr,
6826 .read = tracing_stats_read,
6827 .llseek = generic_file_llseek,
6828 .release = tracing_release_generic_tr,
6831 #ifdef CONFIG_DYNAMIC_FTRACE
6834 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
6835 size_t cnt, loff_t *ppos)
6837 unsigned long *p = filp->private_data;
6838 char buf[64]; /* Not too big for a shallow stack */
6841 r = scnprintf(buf, 63, "%ld", *p);
6844 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6847 static const struct file_operations tracing_dyn_info_fops = {
6848 .open = tracing_open_generic,
6849 .read = tracing_read_dyn_info,
6850 .llseek = generic_file_llseek,
6852 #endif /* CONFIG_DYNAMIC_FTRACE */
6854 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6856 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
6857 struct trace_array *tr, struct ftrace_probe_ops *ops,
6860 tracing_snapshot_instance(tr);
6864 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
6865 struct trace_array *tr, struct ftrace_probe_ops *ops,
6868 struct ftrace_func_mapper *mapper = data;
6872 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
6882 tracing_snapshot_instance(tr);
6886 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6887 struct ftrace_probe_ops *ops, void *data)
6889 struct ftrace_func_mapper *mapper = data;
6892 seq_printf(m, "%ps:", (void *)ip);
6894 seq_puts(m, "snapshot");
6897 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
6900 seq_printf(m, ":count=%ld\n", *count);
6902 seq_puts(m, ":unlimited\n");
6908 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
6909 unsigned long ip, void *init_data, void **data)
6911 struct ftrace_func_mapper *mapper = *data;
6914 mapper = allocate_ftrace_func_mapper();
6920 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
6924 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
6925 unsigned long ip, void *data)
6927 struct ftrace_func_mapper *mapper = data;
6932 free_ftrace_func_mapper(mapper, NULL);
6936 ftrace_func_mapper_remove_ip(mapper, ip);
6939 static struct ftrace_probe_ops snapshot_probe_ops = {
6940 .func = ftrace_snapshot,
6941 .print = ftrace_snapshot_print,
6944 static struct ftrace_probe_ops snapshot_count_probe_ops = {
6945 .func = ftrace_count_snapshot,
6946 .print = ftrace_snapshot_print,
6947 .init = ftrace_snapshot_init,
6948 .free = ftrace_snapshot_free,
6952 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
6953 char *glob, char *cmd, char *param, int enable)
6955 struct ftrace_probe_ops *ops;
6956 void *count = (void *)-1;
6963 /* hash funcs only work with set_ftrace_filter */
6967 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6970 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
6975 number = strsep(¶m, ":");
6977 if (!strlen(number))
6981 * We use the callback data field (which is a pointer)
6984 ret = kstrtoul(number, 0, (unsigned long *)&count);
6989 ret = alloc_snapshot(tr);
6993 ret = register_ftrace_function_probe(glob, tr, ops, count);
6996 return ret < 0 ? ret : 0;
6999 static struct ftrace_func_command ftrace_snapshot_cmd = {
7001 .func = ftrace_trace_snapshot_callback,
7004 static __init int register_snapshot_cmd(void)
7006 return register_ftrace_command(&ftrace_snapshot_cmd);
7009 static inline __init int register_snapshot_cmd(void) { return 0; }
7010 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7012 static struct dentry *tracing_get_dentry(struct trace_array *tr)
7014 if (WARN_ON(!tr->dir))
7015 return ERR_PTR(-ENODEV);
7017 /* Top directory uses NULL as the parent */
7018 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7021 /* All sub buffers have a descriptor */
7025 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7027 struct dentry *d_tracer;
7030 return tr->percpu_dir;
7032 d_tracer = tracing_get_dentry(tr);
7033 if (IS_ERR(d_tracer))
7036 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
7038 WARN_ONCE(!tr->percpu_dir,
7039 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
7041 return tr->percpu_dir;
7044 static struct dentry *
7045 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7046 void *data, long cpu, const struct file_operations *fops)
7048 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7050 if (ret) /* See tracing_get_cpu() */
7051 d_inode(ret)->i_cdev = (void *)(cpu + 1);
7056 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
7058 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
7059 struct dentry *d_cpu;
7060 char cpu_dir[30]; /* 30 characters should be more than enough */
7065 snprintf(cpu_dir, 30, "cpu%ld", cpu);
7066 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
7068 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
7072 /* per cpu trace_pipe */
7073 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
7074 tr, cpu, &tracing_pipe_fops);
7077 trace_create_cpu_file("trace", 0644, d_cpu,
7078 tr, cpu, &tracing_fops);
7080 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
7081 tr, cpu, &tracing_buffers_fops);
7083 trace_create_cpu_file("stats", 0444, d_cpu,
7084 tr, cpu, &tracing_stats_fops);
7086 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
7087 tr, cpu, &tracing_entries_fops);
7089 #ifdef CONFIG_TRACER_SNAPSHOT
7090 trace_create_cpu_file("snapshot", 0644, d_cpu,
7091 tr, cpu, &snapshot_fops);
7093 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
7094 tr, cpu, &snapshot_raw_fops);
7098 #ifdef CONFIG_FTRACE_SELFTEST
7099 /* Let selftest have access to static functions in this file */
7100 #include "trace_selftest.c"
7104 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7107 struct trace_option_dentry *topt = filp->private_data;
7110 if (topt->flags->val & topt->opt->bit)
7115 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7119 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7122 struct trace_option_dentry *topt = filp->private_data;
7126 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7130 if (val != 0 && val != 1)
7133 if (!!(topt->flags->val & topt->opt->bit) != val) {
7134 mutex_lock(&trace_types_lock);
7135 ret = __set_tracer_option(topt->tr, topt->flags,
7137 mutex_unlock(&trace_types_lock);
7148 static const struct file_operations trace_options_fops = {
7149 .open = tracing_open_generic,
7150 .read = trace_options_read,
7151 .write = trace_options_write,
7152 .llseek = generic_file_llseek,
7156 * In order to pass in both the trace_array descriptor as well as the index
7157 * to the flag that the trace option file represents, the trace_array
7158 * has a character array of trace_flags_index[], which holds the index
7159 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7160 * The address of this character array is passed to the flag option file
7161 * read/write callbacks.
7163 * In order to extract both the index and the trace_array descriptor,
7164 * get_tr_index() uses the following algorithm.
7168 * As the pointer itself contains the address of the index (remember
7171 * Then to get the trace_array descriptor, by subtracting that index
7172 * from the ptr, we get to the start of the index itself.
7174 * ptr - idx == &index[0]
7176 * Then a simple container_of() from that pointer gets us to the
7177 * trace_array descriptor.
7179 static void get_tr_index(void *data, struct trace_array **ptr,
7180 unsigned int *pindex)
7182 *pindex = *(unsigned char *)data;
7184 *ptr = container_of(data - *pindex, struct trace_array,
7189 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7192 void *tr_index = filp->private_data;
7193 struct trace_array *tr;
7197 get_tr_index(tr_index, &tr, &index);
7199 if (tr->trace_flags & (1 << index))
7204 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7208 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7211 void *tr_index = filp->private_data;
7212 struct trace_array *tr;
7217 get_tr_index(tr_index, &tr, &index);
7219 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7223 if (val != 0 && val != 1)
7226 mutex_lock(&trace_types_lock);
7227 ret = set_tracer_flag(tr, 1 << index, val);
7228 mutex_unlock(&trace_types_lock);
7238 static const struct file_operations trace_options_core_fops = {
7239 .open = tracing_open_generic,
7240 .read = trace_options_core_read,
7241 .write = trace_options_core_write,
7242 .llseek = generic_file_llseek,
7245 struct dentry *trace_create_file(const char *name,
7247 struct dentry *parent,
7249 const struct file_operations *fops)
7253 ret = tracefs_create_file(name, mode, parent, data, fops);
7255 pr_warn("Could not create tracefs '%s' entry\n", name);
7261 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
7263 struct dentry *d_tracer;
7268 d_tracer = tracing_get_dentry(tr);
7269 if (IS_ERR(d_tracer))
7272 tr->options = tracefs_create_dir("options", d_tracer);
7274 pr_warn("Could not create tracefs directory 'options'\n");
7282 create_trace_option_file(struct trace_array *tr,
7283 struct trace_option_dentry *topt,
7284 struct tracer_flags *flags,
7285 struct tracer_opt *opt)
7287 struct dentry *t_options;
7289 t_options = trace_options_init_dentry(tr);
7293 topt->flags = flags;
7297 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
7298 &trace_options_fops);
7303 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
7305 struct trace_option_dentry *topts;
7306 struct trace_options *tr_topts;
7307 struct tracer_flags *flags;
7308 struct tracer_opt *opts;
7315 flags = tracer->flags;
7317 if (!flags || !flags->opts)
7321 * If this is an instance, only create flags for tracers
7322 * the instance may have.
7324 if (!trace_ok_for_array(tracer, tr))
7327 for (i = 0; i < tr->nr_topts; i++) {
7328 /* Make sure there's no duplicate flags. */
7329 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
7335 for (cnt = 0; opts[cnt].name; cnt++)
7338 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
7342 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7349 tr->topts = tr_topts;
7350 tr->topts[tr->nr_topts].tracer = tracer;
7351 tr->topts[tr->nr_topts].topts = topts;
7354 for (cnt = 0; opts[cnt].name; cnt++) {
7355 create_trace_option_file(tr, &topts[cnt], flags,
7357 WARN_ONCE(topts[cnt].entry == NULL,
7358 "Failed to create trace option: %s",
7363 static struct dentry *
7364 create_trace_option_core_file(struct trace_array *tr,
7365 const char *option, long index)
7367 struct dentry *t_options;
7369 t_options = trace_options_init_dentry(tr);
7373 return trace_create_file(option, 0644, t_options,
7374 (void *)&tr->trace_flags_index[index],
7375 &trace_options_core_fops);
7378 static void create_trace_options_dir(struct trace_array *tr)
7380 struct dentry *t_options;
7381 bool top_level = tr == &global_trace;
7384 t_options = trace_options_init_dentry(tr);
7388 for (i = 0; trace_options[i]; i++) {
7390 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7391 create_trace_option_core_file(tr, trace_options[i], i);
7396 rb_simple_read(struct file *filp, char __user *ubuf,
7397 size_t cnt, loff_t *ppos)
7399 struct trace_array *tr = filp->private_data;
7403 r = tracer_tracing_is_on(tr);
7404 r = sprintf(buf, "%d\n", r);
7406 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7410 rb_simple_write(struct file *filp, const char __user *ubuf,
7411 size_t cnt, loff_t *ppos)
7413 struct trace_array *tr = filp->private_data;
7414 struct ring_buffer *buffer = tr->trace_buffer.buffer;
7418 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7423 mutex_lock(&trace_types_lock);
7425 tracer_tracing_on(tr);
7426 if (tr->current_trace->start)
7427 tr->current_trace->start(tr);
7429 tracer_tracing_off(tr);
7430 if (tr->current_trace->stop)
7431 tr->current_trace->stop(tr);
7433 mutex_unlock(&trace_types_lock);
7441 static const struct file_operations rb_simple_fops = {
7442 .open = tracing_open_generic_tr,
7443 .read = rb_simple_read,
7444 .write = rb_simple_write,
7445 .release = tracing_release_generic_tr,
7446 .llseek = default_llseek,
7449 struct dentry *trace_instance_dir;
7452 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
7455 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
7457 enum ring_buffer_flags rb_flags;
7459 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
7463 buf->buffer = ring_buffer_alloc(size, rb_flags);
7467 buf->data = alloc_percpu(struct trace_array_cpu);
7469 ring_buffer_free(buf->buffer);
7473 /* Allocate the first page for all buffers */
7474 set_buffer_entries(&tr->trace_buffer,
7475 ring_buffer_size(tr->trace_buffer.buffer, 0));
7480 static int allocate_trace_buffers(struct trace_array *tr, int size)
7484 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7488 #ifdef CONFIG_TRACER_MAX_TRACE
7489 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7490 allocate_snapshot ? size : 1);
7492 ring_buffer_free(tr->trace_buffer.buffer);
7493 free_percpu(tr->trace_buffer.data);
7496 tr->allocated_snapshot = allocate_snapshot;
7499 * Only the top level trace array gets its snapshot allocated
7500 * from the kernel command line.
7502 allocate_snapshot = false;
7507 static void free_trace_buffer(struct trace_buffer *buf)
7510 ring_buffer_free(buf->buffer);
7512 free_percpu(buf->data);
7517 static void free_trace_buffers(struct trace_array *tr)
7522 free_trace_buffer(&tr->trace_buffer);
7524 #ifdef CONFIG_TRACER_MAX_TRACE
7525 free_trace_buffer(&tr->max_buffer);
7529 static void init_trace_flags_index(struct trace_array *tr)
7533 /* Used by the trace options files */
7534 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7535 tr->trace_flags_index[i] = i;
7538 static void __update_tracer_options(struct trace_array *tr)
7542 for (t = trace_types; t; t = t->next)
7543 add_tracer_options(tr, t);
7546 static void update_tracer_options(struct trace_array *tr)
7548 mutex_lock(&trace_types_lock);
7549 __update_tracer_options(tr);
7550 mutex_unlock(&trace_types_lock);
7553 static int instance_mkdir(const char *name)
7555 struct trace_array *tr;
7558 mutex_lock(&trace_types_lock);
7561 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7562 if (tr->name && strcmp(tr->name, name) == 0)
7567 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7571 tr->name = kstrdup(name, GFP_KERNEL);
7575 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7578 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
7580 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7582 raw_spin_lock_init(&tr->start_lock);
7584 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7586 tr->current_trace = &nop_trace;
7588 INIT_LIST_HEAD(&tr->systems);
7589 INIT_LIST_HEAD(&tr->events);
7591 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
7594 tr->dir = tracefs_create_dir(name, trace_instance_dir);
7598 ret = event_trace_add_tracer(tr->dir, tr);
7600 tracefs_remove_recursive(tr->dir);
7604 ftrace_init_trace_array(tr);
7606 init_tracer_tracefs(tr, tr->dir);
7607 init_trace_flags_index(tr);
7608 __update_tracer_options(tr);
7610 list_add(&tr->list, &ftrace_trace_arrays);
7612 mutex_unlock(&trace_types_lock);
7617 free_trace_buffers(tr);
7618 free_cpumask_var(tr->tracing_cpumask);
7623 mutex_unlock(&trace_types_lock);
7629 static int instance_rmdir(const char *name)
7631 struct trace_array *tr;
7636 mutex_lock(&trace_types_lock);
7639 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7640 if (tr->name && strcmp(tr->name, name) == 0) {
7649 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
7652 list_del(&tr->list);
7654 /* Disable all the flags that were enabled coming in */
7655 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7656 if ((1 << i) & ZEROED_TRACE_FLAGS)
7657 set_tracer_flag(tr, 1 << i, 0);
7660 tracing_set_nop(tr);
7661 clear_ftrace_function_probes(tr);
7662 event_trace_del_tracer(tr);
7663 ftrace_clear_pids(tr);
7664 ftrace_destroy_function_files(tr);
7665 tracefs_remove_recursive(tr->dir);
7666 free_trace_buffers(tr);
7668 for (i = 0; i < tr->nr_topts; i++) {
7669 kfree(tr->topts[i].topts);
7679 mutex_unlock(&trace_types_lock);
7684 static __init void create_trace_instances(struct dentry *d_tracer)
7686 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7689 if (WARN_ON(!trace_instance_dir))
7694 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
7698 trace_create_file("available_tracers", 0444, d_tracer,
7699 tr, &show_traces_fops);
7701 trace_create_file("current_tracer", 0644, d_tracer,
7702 tr, &set_tracer_fops);
7704 trace_create_file("tracing_cpumask", 0644, d_tracer,
7705 tr, &tracing_cpumask_fops);
7707 trace_create_file("trace_options", 0644, d_tracer,
7708 tr, &tracing_iter_fops);
7710 trace_create_file("trace", 0644, d_tracer,
7713 trace_create_file("trace_pipe", 0444, d_tracer,
7714 tr, &tracing_pipe_fops);
7716 trace_create_file("buffer_size_kb", 0644, d_tracer,
7717 tr, &tracing_entries_fops);
7719 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7720 tr, &tracing_total_entries_fops);
7722 trace_create_file("free_buffer", 0200, d_tracer,
7723 tr, &tracing_free_buffer_fops);
7725 trace_create_file("trace_marker", 0220, d_tracer,
7726 tr, &tracing_mark_fops);
7728 trace_create_file("trace_marker_raw", 0220, d_tracer,
7729 tr, &tracing_mark_raw_fops);
7731 trace_create_file("trace_clock", 0644, d_tracer, tr,
7734 trace_create_file("tracing_on", 0644, d_tracer,
7735 tr, &rb_simple_fops);
7737 create_trace_options_dir(tr);
7739 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7740 trace_create_file("tracing_max_latency", 0644, d_tracer,
7741 &tr->max_latency, &tracing_max_lat_fops);
7744 if (ftrace_create_function_files(tr, d_tracer))
7745 WARN(1, "Could not allocate function filter files");
7747 #ifdef CONFIG_TRACER_SNAPSHOT
7748 trace_create_file("snapshot", 0644, d_tracer,
7749 tr, &snapshot_fops);
7752 for_each_tracing_cpu(cpu)
7753 tracing_init_tracefs_percpu(tr, cpu);
7755 ftrace_init_tracefs(tr, d_tracer);
7758 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
7760 struct vfsmount *mnt;
7761 struct file_system_type *type;
7764 * To maintain backward compatibility for tools that mount
7765 * debugfs to get to the tracing facility, tracefs is automatically
7766 * mounted to the debugfs/tracing directory.
7768 type = get_fs_type("tracefs");
7771 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
7772 put_filesystem(type);
7781 * tracing_init_dentry - initialize top level trace array
7783 * This is called when creating files or directories in the tracing
7784 * directory. It is called via fs_initcall() by any of the boot up code
7785 * and expects to return the dentry of the top level tracing directory.
7787 struct dentry *tracing_init_dentry(void)
7789 struct trace_array *tr = &global_trace;
7791 /* The top level trace array uses NULL as parent */
7795 if (WARN_ON(!tracefs_initialized()) ||
7796 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7797 WARN_ON(!debugfs_initialized())))
7798 return ERR_PTR(-ENODEV);
7801 * As there may still be users that expect the tracing
7802 * files to exist in debugfs/tracing, we must automount
7803 * the tracefs file system there, so older tools still
7804 * work with the newer kerenl.
7806 tr->dir = debugfs_create_automount("tracing", NULL,
7807 trace_automount, NULL);
7809 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7810 return ERR_PTR(-ENOMEM);
7816 extern struct trace_eval_map *__start_ftrace_eval_maps[];
7817 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
7819 static void __init trace_eval_init(void)
7823 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
7824 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
7827 #ifdef CONFIG_MODULES
7828 static void trace_module_add_evals(struct module *mod)
7830 if (!mod->num_trace_evals)
7834 * Modules with bad taint do not have events created, do
7835 * not bother with enums either.
7837 if (trace_module_has_bad_taint(mod))
7840 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
7843 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
7844 static void trace_module_remove_evals(struct module *mod)
7846 union trace_eval_map_item *map;
7847 union trace_eval_map_item **last = &trace_eval_maps;
7849 if (!mod->num_trace_evals)
7852 mutex_lock(&trace_eval_mutex);
7854 map = trace_eval_maps;
7857 if (map->head.mod == mod)
7859 map = trace_eval_jmp_to_tail(map);
7860 last = &map->tail.next;
7861 map = map->tail.next;
7866 *last = trace_eval_jmp_to_tail(map)->tail.next;
7869 mutex_unlock(&trace_eval_mutex);
7872 static inline void trace_module_remove_evals(struct module *mod) { }
7873 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
7875 static int trace_module_notify(struct notifier_block *self,
7876 unsigned long val, void *data)
7878 struct module *mod = data;
7881 case MODULE_STATE_COMING:
7882 trace_module_add_evals(mod);
7884 case MODULE_STATE_GOING:
7885 trace_module_remove_evals(mod);
7892 static struct notifier_block trace_module_nb = {
7893 .notifier_call = trace_module_notify,
7896 #endif /* CONFIG_MODULES */
7898 static __init int tracer_init_tracefs(void)
7900 struct dentry *d_tracer;
7902 trace_access_lock_init();
7904 d_tracer = tracing_init_dentry();
7905 if (IS_ERR(d_tracer))
7908 init_tracer_tracefs(&global_trace, d_tracer);
7909 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
7911 trace_create_file("tracing_thresh", 0644, d_tracer,
7912 &global_trace, &tracing_thresh_fops);
7914 trace_create_file("README", 0444, d_tracer,
7915 NULL, &tracing_readme_fops);
7917 trace_create_file("saved_cmdlines", 0444, d_tracer,
7918 NULL, &tracing_saved_cmdlines_fops);
7920 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7921 NULL, &tracing_saved_cmdlines_size_fops);
7925 trace_create_eval_file(d_tracer);
7927 #ifdef CONFIG_MODULES
7928 register_module_notifier(&trace_module_nb);
7931 #ifdef CONFIG_DYNAMIC_FTRACE
7932 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7933 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
7936 create_trace_instances(d_tracer);
7938 update_tracer_options(&global_trace);
7943 static int trace_panic_handler(struct notifier_block *this,
7944 unsigned long event, void *unused)
7946 if (ftrace_dump_on_oops)
7947 ftrace_dump(ftrace_dump_on_oops);
7951 static struct notifier_block trace_panic_notifier = {
7952 .notifier_call = trace_panic_handler,
7954 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7957 static int trace_die_handler(struct notifier_block *self,
7963 if (ftrace_dump_on_oops)
7964 ftrace_dump(ftrace_dump_on_oops);
7972 static struct notifier_block trace_die_notifier = {
7973 .notifier_call = trace_die_handler,
7978 * printk is set to max of 1024, we really don't need it that big.
7979 * Nothing should be printing 1000 characters anyway.
7981 #define TRACE_MAX_PRINT 1000
7984 * Define here KERN_TRACE so that we have one place to modify
7985 * it if we decide to change what log level the ftrace dump
7988 #define KERN_TRACE KERN_EMERG
7991 trace_printk_seq(struct trace_seq *s)
7993 /* Probably should print a warning here. */
7994 if (s->seq.len >= TRACE_MAX_PRINT)
7995 s->seq.len = TRACE_MAX_PRINT;
7998 * More paranoid code. Although the buffer size is set to
7999 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8000 * an extra layer of protection.
8002 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8003 s->seq.len = s->seq.size - 1;
8005 /* should be zero ended, but we are paranoid. */
8006 s->buffer[s->seq.len] = 0;
8008 printk(KERN_TRACE "%s", s->buffer);
8013 void trace_init_global_iter(struct trace_iterator *iter)
8015 iter->tr = &global_trace;
8016 iter->trace = iter->tr->current_trace;
8017 iter->cpu_file = RING_BUFFER_ALL_CPUS;
8018 iter->trace_buffer = &global_trace.trace_buffer;
8020 if (iter->trace && iter->trace->open)
8021 iter->trace->open(iter);
8023 /* Annotate start of buffers if we had overruns */
8024 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8025 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8027 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8028 if (trace_clocks[iter->tr->clock_id].in_ns)
8029 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
8032 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
8034 /* use static because iter can be a bit big for the stack */
8035 static struct trace_iterator iter;
8036 static atomic_t dump_running;
8037 struct trace_array *tr = &global_trace;
8038 unsigned int old_userobj;
8039 unsigned long flags;
8042 /* Only allow one dump user at a time. */
8043 if (atomic_inc_return(&dump_running) != 1) {
8044 atomic_dec(&dump_running);
8049 * Always turn off tracing when we dump.
8050 * We don't need to show trace output of what happens
8051 * between multiple crashes.
8053 * If the user does a sysrq-z, then they can re-enable
8054 * tracing with echo 1 > tracing_on.
8058 local_irq_save(flags);
8060 /* Simulate the iterator */
8061 trace_init_global_iter(&iter);
8063 for_each_tracing_cpu(cpu) {
8064 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8067 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
8069 /* don't look at user memory in panic mode */
8070 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
8072 switch (oops_dump_mode) {
8074 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8077 iter.cpu_file = raw_smp_processor_id();
8082 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
8083 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8086 printk(KERN_TRACE "Dumping ftrace buffer:\n");
8088 /* Did function tracer already get disabled? */
8089 if (ftrace_is_dead()) {
8090 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8091 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8095 * We need to stop all tracing on all CPUS to read the
8096 * the next buffer. This is a bit expensive, but is
8097 * not done often. We fill all what we can read,
8098 * and then release the locks again.
8101 while (!trace_empty(&iter)) {
8104 printk(KERN_TRACE "---------------------------------\n");
8108 /* reset all but tr, trace, and overruns */
8109 memset(&iter.seq, 0,
8110 sizeof(struct trace_iterator) -
8111 offsetof(struct trace_iterator, seq));
8112 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8115 if (trace_find_next_entry_inc(&iter) != NULL) {
8118 ret = print_trace_line(&iter);
8119 if (ret != TRACE_TYPE_NO_CONSUME)
8120 trace_consume(&iter);
8122 touch_nmi_watchdog();
8124 trace_printk_seq(&iter.seq);
8128 printk(KERN_TRACE " (ftrace buffer empty)\n");
8130 printk(KERN_TRACE "---------------------------------\n");
8133 tr->trace_flags |= old_userobj;
8135 for_each_tracing_cpu(cpu) {
8136 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8138 atomic_dec(&dump_running);
8139 local_irq_restore(flags);
8141 EXPORT_SYMBOL_GPL(ftrace_dump);
8143 __init static int tracer_alloc_buffers(void)
8149 * Make sure we don't accidently add more trace options
8150 * than we have bits for.
8152 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
8154 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8157 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
8158 goto out_free_buffer_mask;
8160 /* Only allocate trace_printk buffers if a trace_printk exists */
8161 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
8162 /* Must be called before global_trace.buffer is allocated */
8163 trace_printk_init_buffers();
8165 /* To save memory, keep the ring buffer size to its minimum */
8166 if (ring_buffer_expanded)
8167 ring_buf_size = trace_buf_size;
8171 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
8172 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
8174 raw_spin_lock_init(&global_trace.start_lock);
8177 * The prepare callbacks allocates some memory for the ring buffer. We
8178 * don't free the buffer if the if the CPU goes down. If we were to free
8179 * the buffer, then the user would lose any trace that was in the
8180 * buffer. The memory will be removed once the "instance" is removed.
8182 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8183 "trace/RB:preapre", trace_rb_cpu_prepare,
8186 goto out_free_cpumask;
8187 /* Used for event triggers */
8188 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8190 goto out_rm_hp_state;
8192 if (trace_create_savedcmd() < 0)
8193 goto out_free_temp_buffer;
8195 /* TODO: make the number of buffers hot pluggable with CPUS */
8196 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
8197 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8199 goto out_free_savedcmd;
8202 if (global_trace.buffer_disabled)
8205 if (trace_boot_clock) {
8206 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8208 pr_warn("Trace clock %s not defined, going back to default\n",
8213 * register_tracer() might reference current_trace, so it
8214 * needs to be set before we register anything. This is
8215 * just a bootstrap of current_trace anyway.
8217 global_trace.current_trace = &nop_trace;
8219 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8221 ftrace_init_global_array_ops(&global_trace);
8223 init_trace_flags_index(&global_trace);
8225 register_tracer(&nop_trace);
8227 /* Function tracing may start here (via kernel command line) */
8228 init_function_trace();
8230 /* All seems OK, enable tracing */
8231 tracing_disabled = 0;
8233 atomic_notifier_chain_register(&panic_notifier_list,
8234 &trace_panic_notifier);
8236 register_die_notifier(&trace_die_notifier);
8238 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8240 INIT_LIST_HEAD(&global_trace.systems);
8241 INIT_LIST_HEAD(&global_trace.events);
8242 list_add(&global_trace.list, &ftrace_trace_arrays);
8244 apply_trace_boot_options();
8246 register_snapshot_cmd();
8251 free_saved_cmdlines_buffer(savedcmd);
8252 out_free_temp_buffer:
8253 ring_buffer_free(temp_buffer);
8255 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
8257 free_cpumask_var(global_trace.tracing_cpumask);
8258 out_free_buffer_mask:
8259 free_cpumask_var(tracing_buffer_mask);
8264 void __init early_trace_init(void)
8266 if (tracepoint_printk) {
8267 tracepoint_print_iter =
8268 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8269 if (WARN_ON(!tracepoint_print_iter))
8270 tracepoint_printk = 0;
8272 static_key_enable(&tracepoint_printk_key.key);
8274 tracer_alloc_buffers();
8277 void __init trace_init(void)
8282 __init static int clear_boot_tracer(void)
8285 * The default tracer at boot buffer is an init section.
8286 * This function is called in lateinit. If we did not
8287 * find the boot tracer, then clear it out, to prevent
8288 * later registration from accessing the buffer that is
8289 * about to be freed.
8291 if (!default_bootup_tracer)
8294 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8295 default_bootup_tracer);
8296 default_bootup_tracer = NULL;
8301 fs_initcall(tracer_init_tracefs);
8302 late_initcall(clear_boot_tracer);