]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge commit 'v2.6.28-rc7'; branch 'x86/dumpstack' into tracing/ftrace
authorIngo Molnar <mingo@elte.hu>
Wed, 3 Dec 2008 07:54:47 +0000 (08:54 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 3 Dec 2008 07:55:34 +0000 (08:55 +0100)
Merge x86/dumpstack into tracing/ftrace because upcoming ftrace changes
depend on cleanups already in x86/dumpstack.

Also merge to latest upstream -rc.

108 files changed:
Documentation/ftrace.txt
Documentation/kernel-parameters.txt
Documentation/markers.txt
Documentation/tracepoints.txt
arch/powerpc/include/asm/ftrace.h
arch/powerpc/include/asm/module.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/ftrace.c
arch/powerpc/kernel/idle.c
arch/powerpc/kernel/module_32.c
arch/powerpc/kernel/module_64.c
arch/powerpc/lib/Makefile
arch/x86/Kconfig
arch/x86/Kconfig.cpu
arch/x86/Kconfig.debug
arch/x86/include/asm/ds.h
arch/x86/include/asm/ftrace.h
arch/x86/include/asm/thread_info.h
arch/x86/kernel/Makefile
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/ds.c
arch/x86/kernel/dumpstack.c [new file with mode: 0644]
arch/x86/kernel/dumpstack.h [new file with mode: 0644]
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/ftrace.c
arch/x86/kernel/process.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/stacktrace.c
arch/x86/kernel/vsyscall_64.c
arch/x86/mm/Makefile
arch/x86/mm/fault.c
arch/x86/vdso/vclock_gettime.c
block/Kconfig
block/blk-core.c
block/blktrace.c
block/elevator.c
drivers/char/sysrq.c
drivers/md/dm.c
fs/bio.c
fs/seq_file.c
include/asm-generic/vmlinux.lds.h
include/linux/blktrace_api.h
include/linux/compiler.h
include/linux/ftrace.h
include/linux/ftrace_irq.h [new file with mode: 0644]
include/linux/hardirq.h
include/linux/marker.h
include/linux/rcupdate.h
include/linux/ring_buffer.h
include/linux/sched.h
include/linux/seq_file.h
include/linux/stacktrace.h
include/linux/tracepoint.h
include/linux/tty.h
include/trace/block.h [new file with mode: 0644]
include/trace/boot.h [new file with mode: 0644]
include/trace/sched.h
init/Kconfig
init/main.c
kernel/Makefile
kernel/exit.c
kernel/fork.c
kernel/kthread.c
kernel/marker.c
kernel/module.c
kernel/power/disk.c
kernel/power/main.c
kernel/profile.c
kernel/sched.c
kernel/signal.c
kernel/sysctl.c
kernel/trace/Kconfig
kernel/trace/Makefile
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_boot.c
kernel/trace/trace_branch.c [new file with mode: 0644]
kernel/trace/trace_bts.c [new file with mode: 0644]
kernel/trace/trace_functions.c
kernel/trace/trace_functions_graph.c [new file with mode: 0644]
kernel/trace/trace_irqsoff.c
kernel/trace/trace_mmiotrace.c
kernel/trace/trace_nop.c
kernel/trace/trace_power.c [new file with mode: 0644]
kernel/trace/trace_sched_switch.c
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_selftest.c
kernel/trace/trace_stack.c
kernel/trace/trace_sysprof.c
kernel/tracepoint.c
mm/bounce.c
samples/tracepoints/tp-samples-trace.h
samples/tracepoints/tracepoint-probe-sample.c
samples/tracepoints/tracepoint-probe-sample2.c
samples/tracepoints/tracepoint-sample.c
scripts/Makefile.build
scripts/bootgraph.pl
scripts/recordmcount.pl
scripts/trace/power.pl [new file with mode: 0644]
scripts/tracing/draw_functrace.py [new file with mode: 0644]

index 9cc4d685dde583464cbfbd9c7448358c2de90ef3..803b1318b13da11242a3558123d28293d3ee4378 100644 (file)
@@ -82,7 +82,7 @@ of ftrace. Here is a list of some of the key files:
                tracer is not adding more data, they will display
                the same information every time they are read.
 
-  iter_ctrl: This file lets the user control the amount of data
+  trace_options: This file lets the user control the amount of data
                that is displayed in one of the above output
                files.
 
@@ -94,10 +94,10 @@ of ftrace. Here is a list of some of the key files:
                only be recorded if the latency is greater than
                the value in this file. (in microseconds)
 
-  trace_entries: This sets or displays the number of bytes each CPU
+  buffer_size_kb: This sets or displays the number of kilobytes each CPU
                buffer can hold. The tracer buffers are the same size
                for each CPU. The displayed number is the size of the
-                CPU buffer and not total size of all buffers. The
+               CPU buffer and not total size of all buffers. The
                trace buffers are allocated in pages (blocks of memory
                that the kernel uses for allocation, usually 4 KB in size).
                If the last page allocated has room for more bytes
@@ -127,6 +127,8 @@ of ftrace. Here is a list of some of the key files:
                be traced. If a function exists in both set_ftrace_filter
                and set_ftrace_notrace, the function will _not_ be traced.
 
+  set_ftrace_pid: Have the function tracer only trace a single thread.
+
   available_filter_functions: This lists the functions that ftrace
                has processed and can trace. These are the function
                names that you can pass to "set_ftrace_filter" or
@@ -316,23 +318,23 @@ The above is mostly meaningful for kernel developers.
   The rest is the same as the 'trace' file.
 
 
-iter_ctrl
----------
+trace_options
+-------------
 
-The iter_ctrl file is used to control what gets printed in the trace
+The trace_options file is used to control what gets printed in the trace
 output. To see what is available, simply cat the file:
 
-  cat /debug/tracing/iter_ctrl
+  cat /debug/tracing/trace_options
   print-parent nosym-offset nosym-addr noverbose noraw nohex nobin \
- noblock nostacktrace nosched-tree
+ noblock nostacktrace nosched-tree nouserstacktrace nosym-userobj
 
 To disable one of the options, echo in the option prepended with "no".
 
-  echo noprint-parent > /debug/tracing/iter_ctrl
+  echo noprint-parent > /debug/tracing/trace_options
 
 To enable an option, leave off the "no".
 
-  echo sym-offset > /debug/tracing/iter_ctrl
+  echo sym-offset > /debug/tracing/trace_options
 
 Here are the available options:
 
@@ -378,6 +380,20 @@ Here are the available options:
                When a trace is recorded, so is the stack of functions.
                This allows for back traces of trace sites.
 
+  userstacktrace - This option changes the trace.
+                  It records a stacktrace of the current userspace thread.
+
+  sym-userobj - when user stacktrace are enabled, look up which object the
+               address belongs to, and print a relative address
+               This is especially useful when ASLR is on, otherwise you don't
+               get a chance to resolve the address to object/file/line after the app is no
+               longer running
+
+               The lookup is performed when you read trace,trace_pipe,latency_trace. Example:
+
+               a.out-1623  [000] 40874.465068: /root/a.out[+0x480] <-/root/a.out[+0
+x494] <- /root/a.out[+0x4a8] <- /lib/libc-2.7.so[+0x1e1a6]
+
   sched-tree - TBD (any users??)
 
 
@@ -1059,6 +1075,83 @@ For simple one time traces, the above is sufficent. For anything else,
 a search through /proc/mounts may be needed to find where the debugfs
 file-system is mounted.
 
+
+Single thread tracing
+---------------------
+
+By writing into /debug/tracing/set_ftrace_pid you can trace a
+single thread. For example:
+
+# cat /debug/tracing/set_ftrace_pid
+no pid
+# echo 3111 > /debug/tracing/set_ftrace_pid
+# cat /debug/tracing/set_ftrace_pid
+3111
+# echo function > /debug/tracing/current_tracer
+# cat /debug/tracing/trace | head
+ # tracer: function
+ #
+ #           TASK-PID    CPU#    TIMESTAMP  FUNCTION
+ #              | |       |          |         |
+     yum-updatesd-3111  [003]  1637.254676: finish_task_switch <-thread_return
+     yum-updatesd-3111  [003]  1637.254681: hrtimer_cancel <-schedule_hrtimeout_range
+     yum-updatesd-3111  [003]  1637.254682: hrtimer_try_to_cancel <-hrtimer_cancel
+     yum-updatesd-3111  [003]  1637.254683: lock_hrtimer_base <-hrtimer_try_to_cancel
+     yum-updatesd-3111  [003]  1637.254685: fget_light <-do_sys_poll
+     yum-updatesd-3111  [003]  1637.254686: pipe_poll <-do_sys_poll
+# echo -1 > /debug/tracing/set_ftrace_pid
+# cat /debug/tracing/trace |head
+ # tracer: function
+ #
+ #           TASK-PID    CPU#    TIMESTAMP  FUNCTION
+ #              | |       |          |         |
+ ##### CPU 3 buffer started ####
+     yum-updatesd-3111  [003]  1701.957688: free_poll_entry <-poll_freewait
+     yum-updatesd-3111  [003]  1701.957689: remove_wait_queue <-free_poll_entry
+     yum-updatesd-3111  [003]  1701.957691: fput <-free_poll_entry
+     yum-updatesd-3111  [003]  1701.957692: audit_syscall_exit <-sysret_audit
+     yum-updatesd-3111  [003]  1701.957693: path_put <-audit_syscall_exit
+
+If you want to trace a function when executing, you could use
+something like this simple program:
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+int main (int argc, char **argv)
+{
+        if (argc < 1)
+                exit(-1);
+
+        if (fork() > 0) {
+                int fd, ffd;
+                char line[64];
+                int s;
+
+                ffd = open("/debug/tracing/current_tracer", O_WRONLY);
+                if (ffd < 0)
+                        exit(-1);
+                write(ffd, "nop", 3);
+
+                fd = open("/debug/tracing/set_ftrace_pid", O_WRONLY);
+                s = sprintf(line, "%d\n", getpid());
+                write(fd, line, s);
+
+                write(ffd, "function", 8);
+
+                close(fd);
+                close(ffd);
+
+                execvp(argv[1], argv+1);
+        }
+
+        return 0;
+}
+
 dynamic ftrace
 --------------
 
@@ -1158,7 +1251,11 @@ These are the only wild cards which are supported.
 
   <match>*<match> will not work.
 
- # echo hrtimer_* > /debug/tracing/set_ftrace_filter
+Note: It is better to use quotes to enclose the wild cards, otherwise
+  the shell may expand the parameters into names of files in the local
+  directory.
+
+ # echo 'hrtimer_*' > /debug/tracing/set_ftrace_filter
 
 Produces:
 
@@ -1213,7 +1310,7 @@ Again, now we want to append.
  # echo sys_nanosleep > /debug/tracing/set_ftrace_filter
  # cat /debug/tracing/set_ftrace_filter
 sys_nanosleep
- # echo hrtimer_* >> /debug/tracing/set_ftrace_filter
+ # echo 'hrtimer_*' >> /debug/tracing/set_ftrace_filter
  # cat /debug/tracing/set_ftrace_filter
 hrtimer_run_queues
 hrtimer_run_pending
@@ -1299,41 +1396,29 @@ trace entries
 -------------
 
 Having too much or not enough data can be troublesome in diagnosing
-an issue in the kernel. The file trace_entries is used to modify
+an issue in the kernel. The file buffer_size_kb is used to modify
 the size of the internal trace buffers. The number listed
 is the number of entries that can be recorded per CPU. To know
 the full size, multiply the number of possible CPUS with the
 number of entries.
 
- # cat /debug/tracing/trace_entries
-65620
+ # cat /debug/tracing/buffer_size_kb
+1408 (units kilobytes)
 
 Note, to modify this, you must have tracing completely disabled. To do that,
 echo "nop" into the current_tracer. If the current_tracer is not set
 to "nop", an EINVAL error will be returned.
 
  # echo nop > /debug/tracing/current_tracer
- # echo 100000 > /debug/tracing/trace_entries
- # cat /debug/tracing/trace_entries
-100045
-
-
-Notice that we echoed in 100,000 but the size is 100,045. The entries
-are held in individual pages. It allocates the number of pages it takes
-to fulfill the request. If more entries may fit on the last page
-then they will be added.
-
- # echo 1 > /debug/tracing/trace_entries
- # cat /debug/tracing/trace_entries
-85
-
-This shows us that 85 entries can fit in a single page.
+ # echo 10000 > /debug/tracing/buffer_size_kb
+ # cat /debug/tracing/buffer_size_kb
+10000 (units kilobytes)
 
 The number of pages which will be allocated is limited to a percentage
 of available memory. Allocating too much will produce an error.
 
- # echo 1000000000000 > /debug/tracing/trace_entries
+ # echo 1000000000000 > /debug/tracing/buffer_size_kb
 -bash: echo: write error: Cannot allocate memory
- # cat /debug/tracing/trace_entries
+ # cat /debug/tracing/buffer_size_kb
 85
 
index e0f346d201edb70fae654c55f6be842c4465a5ff..2919a2e919388cd572a7e99559c90e92dc544a28 100644 (file)
@@ -750,6 +750,14 @@ and is between 256 and 4096 characters. It is defined in the file
                        parameter will force ia64_sal_cache_flush to call
                        ia64_pal_cache_flush instead of SAL_CACHE_FLUSH.
 
+       ftrace=[tracer]
+                       [ftrace] will set and start the specified tracer
+                       as early as possible in order to facilitate early
+                       boot debugging.
+
+       ftrace_dump_on_oops
+                       [ftrace] will dump the trace buffers on oops.
+
        gamecon.map[2|3]=
                        [HW,JOY] Multisystem joystick and NES/SNES/PSX pad
                        support via parallel port (up to 5 devices per port)
index 089f6138fcd94249a6444ca3a932a50c263098e1..d2b3d0e91b26d21423d8202668ac8d512558e7dd 100644 (file)
@@ -51,11 +51,16 @@ to call) for the specific marker through marker_probe_register() and can be
 activated by calling marker_arm(). Marker deactivation can be done by calling
 marker_disarm() as many times as marker_arm() has been called. Removing a probe
 is done through marker_probe_unregister(); it will disarm the probe.
-marker_synchronize_unregister() must be called before the end of the module exit
-function to make sure there is no caller left using the probe. This, and the
-fact that preemption is disabled around the probe call, make sure that probe
-removal and module unload are safe. See the "Probe example" section below for a
-sample probe module.
+
+marker_synchronize_unregister() must be called between probe unregistration and
+the first occurrence of
+- the end of module exit function,
+  to make sure there is no caller left using the probe;
+- the free of any resource used by the probes,
+  to make sure the probes wont be accessing invalid data.
+This, and the fact that preemption is disabled around the probe call, make sure
+that probe removal and module unload are safe. See the "Probe example" section
+below for a sample probe module.
 
 The marker mechanism supports inserting multiple instances of the same marker.
 Markers can be put in inline functions, inlined static functions, and
@@ -70,6 +75,20 @@ a printk warning which identifies the inconsistency:
 
 "Format mismatch for probe probe_name (format), marker (format)"
 
+Another way to use markers is to simply define the marker without generating any
+function call to actually call into the marker. This is useful in combination
+with tracepoint probes in a scheme like this :
+
+void probe_tracepoint_name(unsigned int arg1, struct task_struct *tsk);
+
+DEFINE_MARKER_TP(marker_eventname, tracepoint_name, probe_tracepoint_name,
+       "arg1 %u pid %d");
+
+notrace void probe_tracepoint_name(unsigned int arg1, struct task_struct *tsk)
+{
+       struct marker *marker = &GET_MARKER(kernel_irq_entry);
+       /* write data to trace buffers ... */
+}
 
 * Probe / marker example
 
index 5d354e16749447c667934ecd4c8f4fe5ee3da36c..6f0a044f5b5e51e27f2bdae2cd6d68123eb98bd7 100644 (file)
@@ -3,28 +3,30 @@
                            Mathieu Desnoyers
 
 
-This document introduces Linux Kernel Tracepoints and their use. It provides
-examples of how to insert tracepoints in the kernel and connect probe functions
-to them and provides some examples of probe functions.
+This document introduces Linux Kernel Tracepoints and their use. It
+provides examples of how to insert tracepoints in the kernel and
+connect probe functions to them and provides some examples of probe
+functions.
 
 
 * Purpose of tracepoints
 
-A tracepoint placed in code provides a hook to call a function (probe) that you
-can provide at runtime. A tracepoint can be "on" (a probe is connected to it) or
-"off" (no probe is attached). When a tracepoint is "off" it has no effect,
-except for adding a tiny time penalty (checking a condition for a branch) and
-space penalty (adding a few bytes for the function call at the end of the
-instrumented function and adds a data structure in a separate section).  When a
-tracepoint is "on", the function you provide is called each time the tracepoint
-is executed, in the execution context of the caller. When the function provided
-ends its execution, it returns to the caller (continuing from the tracepoint
-site).
+A tracepoint placed in code provides a hook to call a function (probe)
+that you can provide at runtime. A tracepoint can be "on" (a probe is
+connected to it) or "off" (no probe is attached). When a tracepoint is
+"off" it has no effect, except for adding a tiny time penalty
+(checking a condition for a branch) and space penalty (adding a few
+bytes for the function call at the end of the instrumented function
+and adds a data structure in a separate section).  When a tracepoint
+is "on", the function you provide is called each time the tracepoint
+is executed, in the execution context of the caller. When the function
+provided ends its execution, it returns to the caller (continuing from
+the tracepoint site).
 
 You can put tracepoints at important locations in the code. They are
 lightweight hooks that can pass an arbitrary number of parameters,
-which prototypes are described in a tracepoint declaration placed in a header
-file.
+which prototypes are described in a tracepoint declaration placed in a
+header file.
 
 They can be used for tracing and performance accounting.
 
@@ -42,14 +44,16 @@ In include/trace/subsys.h :
 
 #include <linux/tracepoint.h>
 
-DEFINE_TRACE(subsys_eventname,
-       TPPTOTO(int firstarg, struct task_struct *p),
+DECLARE_TRACE(subsys_eventname,
+       TPPROTO(int firstarg, struct task_struct *p),
        TPARGS(firstarg, p));
 
 In subsys/file.c (where the tracing statement must be added) :
 
 #include <trace/subsys.h>
 
+DEFINE_TRACE(subsys_eventname);
+
 void somefct(void)
 {
        ...
@@ -61,31 +65,41 @@ Where :
 - subsys_eventname is an identifier unique to your event
     - subsys is the name of your subsystem.
     - eventname is the name of the event to trace.
-- TPPTOTO(int firstarg, struct task_struct *p) is the prototype of the function
-  called by this tracepoint.
-- TPARGS(firstarg, p) are the parameters names, same as found in the prototype.
 
-Connecting a function (probe) to a tracepoint is done by providing a probe
-(function to call) for the specific tracepoint through
-register_trace_subsys_eventname().  Removing a probe is done through
-unregister_trace_subsys_eventname(); it will remove the probe sure there is no
-caller left using the probe when it returns. Probe removal is preempt-safe
-because preemption is disabled around the probe call. See the "Probe example"
-section below for a sample probe module.
-
-The tracepoint mechanism supports inserting multiple instances of the same
-tracepoint, but a single definition must be made of a given tracepoint name over
-all the kernel to make sure no type conflict will occur. Name mangling of the
-tracepoints is done using the prototypes to make sure typing is correct.
-Verification of probe type correctness is done at the registration site by the
-compiler. Tracepoints can be put in inline functions, inlined static functions,
-and unrolled loops as well as regular functions.
-
-The naming scheme "subsys_event" is suggested here as a convention intended
-to limit collisions. Tracepoint names are global to the kernel: they are
-considered as being the same whether they are in the core kernel image or in
-modules.
+- TPPROTO(int firstarg, struct task_struct *p) is the prototype of the
+  function called by this tracepoint.
 
+- TPARGS(firstarg, p) are the parameters names, same as found in the
+  prototype.
+
+Connecting a function (probe) to a tracepoint is done by providing a
+probe (function to call) for the specific tracepoint through
+register_trace_subsys_eventname().  Removing a probe is done through
+unregister_trace_subsys_eventname(); it will remove the probe.
+
+tracepoint_synchronize_unregister() must be called before the end of
+the module exit function to make sure there is no caller left using
+the probe. This, and the fact that preemption is disabled around the
+probe call, make sure that probe removal and module unload are safe.
+See the "Probe example" section below for a sample probe module.
+
+The tracepoint mechanism supports inserting multiple instances of the
+same tracepoint, but a single definition must be made of a given
+tracepoint name over all the kernel to make sure no type conflict will
+occur. Name mangling of the tracepoints is done using the prototypes
+to make sure typing is correct. Verification of probe type correctness
+is done at the registration site by the compiler. Tracepoints can be
+put in inline functions, inlined static functions, and unrolled loops
+as well as regular functions.
+
+The naming scheme "subsys_event" is suggested here as a convention
+intended to limit collisions. Tracepoint names are global to the
+kernel: they are considered as being the same whether they are in the
+core kernel image or in modules.
+
+If the tracepoint has to be used in kernel modules, an
+EXPORT_TRACEPOINT_SYMBOL_GPL() or EXPORT_TRACEPOINT_SYMBOL() can be
+used to export the defined tracepoints.
 
 * Probe / tracepoint example
 
index b298f7a631e6cd9620094c72da4c82832afcdb43..e5f2ae8362f7ea8e15fcaca975fefa6e3662010b 100644 (file)
@@ -7,7 +7,19 @@
 
 #ifndef __ASSEMBLY__
 extern void _mcount(void);
-#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+       /* reloction of mcount call site is the same as the address */
+       return addr;
+}
+
+struct dyn_arch_ftrace {
+       struct module *mod;
+};
+#endif /*  CONFIG_DYNAMIC_FTRACE */
+#endif /* __ASSEMBLY__ */
 
 #endif
 
index e5f14b13ccf0ff2c11ef8392b6697750608ca919..08454880a2c047258da91b4968f080f4e38fc4af 100644 (file)
@@ -34,11 +34,19 @@ struct mod_arch_specific {
 #ifdef __powerpc64__
        unsigned int stubs_section;     /* Index of stubs section in module */
        unsigned int toc_section;       /* What section is the TOC? */
-#else
+#ifdef CONFIG_DYNAMIC_FTRACE
+       unsigned long toc;
+       unsigned long tramp;
+#endif
+
+#else /* powerpc64 */
        /* Indices of PLT sections within module. */
        unsigned int core_plt_section;
        unsigned int init_plt_section;
+#ifdef CONFIG_DYNAMIC_FTRACE
+       unsigned long tramp;
 #endif
+#endif /* powerpc64 */
 
        /* List of BUG addresses, source line numbers and filenames */
        struct list_head bug_list;
@@ -68,6 +76,12 @@ struct mod_arch_specific {
 #    endif     /* MODULE */
 #endif
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+#    ifdef MODULE
+       asm(".section .ftrace.tramp,\"ax\",@nobits; .align 3; .previous");
+#    endif     /* MODULE */
+#endif
+
 
 struct exception_table_entry;
 void sort_ex_table(struct exception_table_entry *start,
index 92673b43858d067d6ccfab7a1cc083018d4a5c4f..d17edb4a2f9d57490cc8847b881b07fc3f06d7b9 100644 (file)
@@ -17,6 +17,7 @@ ifdef CONFIG_FUNCTION_TRACER
 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
 CFLAGS_REMOVE_prom_init.o = -pg -mno-sched-epilog
 CFLAGS_REMOVE_btext.o = -pg -mno-sched-epilog
+CFLAGS_REMOVE_prom.o = -pg -mno-sched-epilog
 
 ifdef CONFIG_DYNAMIC_FTRACE
 # dynamic ftrace setup.
index 7ecc0d1855c3342d341a8b00ea9cdbd9f51d1b42..6f7eb7e00c79eb3df9c6eba5ad61087e36427015 100644 (file)
@@ -1162,39 +1162,17 @@ machine_check_in_rtas:
 #ifdef CONFIG_DYNAMIC_FTRACE
 _GLOBAL(mcount)
 _GLOBAL(_mcount)
-       stwu    r1,-48(r1)
-       stw     r3, 12(r1)
-       stw     r4, 16(r1)
-       stw     r5, 20(r1)
-       stw     r6, 24(r1)
-       mflr    r3
-       stw     r7, 28(r1)
-       mfcr    r5
-       stw     r8, 32(r1)
-       stw     r9, 36(r1)
-       stw     r10,40(r1)
-       stw     r3, 44(r1)
-       stw     r5, 8(r1)
-       subi    r3, r3, MCOUNT_INSN_SIZE
-       .globl mcount_call
-mcount_call:
-       bl      ftrace_stub
-       nop
-       lwz     r6, 8(r1)
-       lwz     r0, 44(r1)
-       lwz     r3, 12(r1)
+       /*
+        * It is required that _mcount on PPC32 must preserve the
+        * link register. But we have r0 to play with. We use r0
+        * to push the return address back to the caller of mcount
+        * into the ctr register, restore the link register and
+        * then jump back using the ctr register.
+        */
+       mflr    r0
        mtctr   r0
-       lwz     r4, 16(r1)
-       mtcr    r6
-       lwz     r5, 20(r1)
-       lwz     r6, 24(r1)
-       lwz     r0, 52(r1)
-       lwz     r7, 28(r1)
-       lwz     r8, 32(r1)
+       lwz     r0, 4(r1)
        mtlr    r0
-       lwz     r9, 36(r1)
-       lwz     r10,40(r1)
-       addi    r1, r1, 48
        bctr
 
 _GLOBAL(ftrace_caller)
index e0bcf93542867d79e20c1d5951397813535545a2..383ed6eb00850d2a7ab89b3b54b4677a3b9b1478 100644 (file)
@@ -894,18 +894,6 @@ _GLOBAL(enter_prom)
 #ifdef CONFIG_DYNAMIC_FTRACE
 _GLOBAL(mcount)
 _GLOBAL(_mcount)
-       /* Taken from output of objdump from lib64/glibc */
-       mflr    r3
-       stdu    r1, -112(r1)
-       std     r3, 128(r1)
-       subi    r3, r3, MCOUNT_INSN_SIZE
-       .globl mcount_call
-mcount_call:
-       bl      ftrace_stub
-       nop
-       ld      r0, 128(r1)
-       mtlr    r0
-       addi    r1, r1, 112
        blr
 
 _GLOBAL(ftrace_caller)
index f4b006ed0ab1ef183a0b0593520be182f4144c1f..5355244c99ff934abde18ce75336fe9a37689bcb 100644 (file)
@@ -9,22 +9,30 @@
 
 #include <linux/spinlock.h>
 #include <linux/hardirq.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
 #include <linux/ftrace.h>
 #include <linux/percpu.h>
 #include <linux/init.h>
 #include <linux/list.h>
 
 #include <asm/cacheflush.h>
+#include <asm/code-patching.h>
 #include <asm/ftrace.h>
 
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(fmt , ...)      do { } while (0)
+#endif
 
-static unsigned int ftrace_nop = 0x60000000;
+static unsigned int ftrace_nop = PPC_NOP_INSTR;
 
 #ifdef CONFIG_PPC32
 # define GET_ADDR(addr) addr
 #else
 /* PowerPC64's functions are data that points to the functions */
-# define GET_ADDR(addr) *(unsigned long *)addr
+# define GET_ADDR(addr) (*(unsigned long *)addr)
 #endif
 
 
@@ -33,12 +41,12 @@ static unsigned int ftrace_calc_offset(long ip, long addr)
        return (int)(addr - ip);
 }
 
-unsigned char *ftrace_nop_replace(void)
+static unsigned char *ftrace_nop_replace(void)
 {
        return (char *)&ftrace_nop;
 }
 
-unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
 {
        static unsigned int op;
 
@@ -68,49 +76,422 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
 # define _ASM_PTR      " .long "
 #endif
 
-int
+static int
 ftrace_modify_code(unsigned long ip, unsigned char *old_code,
                   unsigned char *new_code)
 {
-       unsigned replaced;
-       unsigned old = *(unsigned *)old_code;
-       unsigned new = *(unsigned *)new_code;
-       int faulted = 0;
+       unsigned char replaced[MCOUNT_INSN_SIZE];
 
        /*
         * Note: Due to modules and __init, code can
         *  disappear and change, we need to protect against faulting
-        *  as well as code changing.
+        *  as well as code changing. We do this by using the
+        *  probe_kernel_* functions.
         *
         * No real locking needed, this code is run through
-        * kstop_machine.
+        * kstop_machine, or before SMP starts.
         */
-       asm volatile (
-               "1: lwz         %1, 0(%2)\n"
-               "   cmpw        %1, %5\n"
-               "   bne         2f\n"
-               "   stwu        %3, 0(%2)\n"
-               "2:\n"
-               ".section .fixup, \"ax\"\n"
-               "3:     li %0, 1\n"
-               "       b 2b\n"
-               ".previous\n"
-               ".section __ex_table,\"a\"\n"
-               _ASM_ALIGN "\n"
-               _ASM_PTR "1b, 3b\n"
-               ".previous"
-               : "=r"(faulted), "=r"(replaced)
-               : "r"(ip), "r"(new),
-                 "0"(faulted), "r"(old)
-               : "memory");
-
-       if (replaced != old && replaced != new)
-               faulted = 2;
-
-       if (!faulted)
-               flush_icache_range(ip, ip + 8);
-
-       return faulted;
+
+       /* read the text we want to modify */
+       if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
+               return -EFAULT;
+
+       /* Make sure it is what we expect it to be */
+       if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
+               return -EINVAL;
+
+       /* replace the text with the new text */
+       if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
+               return -EPERM;
+
+       flush_icache_range(ip, ip + 8);
+
+       return 0;
+}
+
+/*
+ * Helper functions that are the same for both PPC64 and PPC32.
+ */
+static int test_24bit_addr(unsigned long ip, unsigned long addr)
+{
+
+       /* use the create_branch to verify that this offset can be branched */
+       return create_branch((unsigned int *)ip, addr, 0);
+}
+
+static int is_bl_op(unsigned int op)
+{
+       return (op & 0xfc000003) == 0x48000001;
+}
+
+static unsigned long find_bl_target(unsigned long ip, unsigned int op)
+{
+       static int offset;
+
+       offset = (op & 0x03fffffc);
+       /* make it signed */
+       if (offset & 0x02000000)
+               offset |= 0xfe000000;
+
+       return ip + (long)offset;
+}
+
+#ifdef CONFIG_PPC64
+static int
+__ftrace_make_nop(struct module *mod,
+                 struct dyn_ftrace *rec, unsigned long addr)
+{
+       unsigned int op;
+       unsigned int jmp[5];
+       unsigned long ptr;
+       unsigned long ip = rec->ip;
+       unsigned long tramp;
+       int offset;
+
+       /* read where this goes */
+       if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
+               return -EFAULT;
+
+       /* Make sure that that this is still a 24bit jump */
+       if (!is_bl_op(op)) {
+               printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
+               return -EINVAL;
+       }
+
+       /* lets find where the pointer goes */
+       tramp = find_bl_target(ip, op);
+
+       /*
+        * On PPC64 the trampoline looks like:
+        * 0x3d, 0x82, 0x00, 0x00,    addis   r12,r2, <high>
+        * 0x39, 0x8c, 0x00, 0x00,    addi    r12,r12, <low>
+        *   Where the bytes 2,3,6 and 7 make up the 32bit offset
+        *   to the TOC that holds the pointer.
+        *   to jump to.
+        * 0xf8, 0x41, 0x00, 0x28,    std     r2,40(r1)
+        * 0xe9, 0x6c, 0x00, 0x20,    ld      r11,32(r12)
+        *   The actually address is 32 bytes from the offset
+        *   into the TOC.
+        * 0xe8, 0x4c, 0x00, 0x28,    ld      r2,40(r12)
+        */
+
+       DEBUGP("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
+
+       /* Find where the trampoline jumps to */
+       if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
+               printk(KERN_ERR "Failed to read %lx\n", tramp);
+               return -EFAULT;
+       }
+
+       DEBUGP(" %08x %08x", jmp[0], jmp[1]);
+
+       /* verify that this is what we expect it to be */
+       if (((jmp[0] & 0xffff0000) != 0x3d820000) ||
+           ((jmp[1] & 0xffff0000) != 0x398c0000) ||
+           (jmp[2] != 0xf8410028) ||
+           (jmp[3] != 0xe96c0020) ||
+           (jmp[4] != 0xe84c0028)) {
+               printk(KERN_ERR "Not a trampoline\n");
+               return -EINVAL;
+       }
+
+       offset = (unsigned)((unsigned short)jmp[0]) << 16 |
+               (unsigned)((unsigned short)jmp[1]);
+
+       DEBUGP(" %x ", offset);
+
+       /* get the address this jumps too */
+       tramp = mod->arch.toc + offset + 32;
+       DEBUGP("toc: %lx", tramp);
+
+       if (probe_kernel_read(jmp, (void *)tramp, 8)) {
+               printk(KERN_ERR "Failed to read %lx\n", tramp);
+               return -EFAULT;
+       }
+
+       DEBUGP(" %08x %08x\n", jmp[0], jmp[1]);
+
+       ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
+
+       /* This should match what was called */
+       if (ptr != GET_ADDR(addr)) {
+               printk(KERN_ERR "addr does not match %lx\n", ptr);
+               return -EINVAL;
+       }
+
+       /*
+        * We want to nop the line, but the next line is
+        *  0xe8, 0x41, 0x00, 0x28   ld r2,40(r1)
+        * This needs to be turned to a nop too.
+        */
+       if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE))
+               return -EFAULT;
+
+       if (op != 0xe8410028) {
+               printk(KERN_ERR "Next line is not ld! (%08x)\n", op);
+               return -EINVAL;
+       }
+
+       /*
+        * Milton Miller pointed out that we can not blindly do nops.
+        * If a task was preempted when calling a trace function,
+        * the nops will remove the way to restore the TOC in r2
+        * and the r2 TOC will get corrupted.
+        */
+
+       /*
+        * Replace:
+        *   bl <tramp>  <==== will be replaced with "b 1f"
+        *   ld r2,40(r1)
+        *  1:
+        */
+       op = 0x48000008;        /* b +8 */
+
+       if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
+               return -EPERM;
+
+
+       flush_icache_range(ip, ip + 8);
+
+       return 0;
+}
+
+#else /* !PPC64 */
+static int
+__ftrace_make_nop(struct module *mod,
+                 struct dyn_ftrace *rec, unsigned long addr)
+{
+       unsigned int op;
+       unsigned int jmp[4];
+       unsigned long ip = rec->ip;
+       unsigned long tramp;
+
+       if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
+               return -EFAULT;
+
+       /* Make sure that that this is still a 24bit jump */
+       if (!is_bl_op(op)) {
+               printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
+               return -EINVAL;
+       }
+
+       /* lets find where the pointer goes */
+       tramp = find_bl_target(ip, op);
+
+       /*
+        * On PPC32 the trampoline looks like:
+        *  0x3d, 0x60, 0x00, 0x00  lis r11,sym@ha
+        *  0x39, 0x6b, 0x00, 0x00  addi r11,r11,sym@l
+        *  0x7d, 0x69, 0x03, 0xa6  mtctr r11
+        *  0x4e, 0x80, 0x04, 0x20  bctr
+        */
+
+       DEBUGP("ip:%lx jumps to %lx", ip, tramp);
+
+       /* Find where the trampoline jumps to */
+       if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
+               printk(KERN_ERR "Failed to read %lx\n", tramp);
+               return -EFAULT;
+       }
+
+       DEBUGP(" %08x %08x ", jmp[0], jmp[1]);
+
+       /* verify that this is what we expect it to be */
+       if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
+           ((jmp[1] & 0xffff0000) != 0x396b0000) ||
+           (jmp[2] != 0x7d6903a6) ||
+           (jmp[3] != 0x4e800420)) {
+               printk(KERN_ERR "Not a trampoline\n");
+               return -EINVAL;
+       }
+
+       tramp = (jmp[1] & 0xffff) |
+               ((jmp[0] & 0xffff) << 16);
+       if (tramp & 0x8000)
+               tramp -= 0x10000;
+
+       DEBUGP(" %x ", tramp);
+
+       if (tramp != addr) {
+               printk(KERN_ERR
+                      "Trampoline location %08lx does not match addr\n",
+                      tramp);
+               return -EINVAL;
+       }
+
+       op = PPC_NOP_INSTR;
+
+       if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
+               return -EPERM;
+
+       flush_icache_range(ip, ip + 8);
+
+       return 0;
+}
+#endif /* PPC64 */
+
+int ftrace_make_nop(struct module *mod,
+                   struct dyn_ftrace *rec, unsigned long addr)
+{
+       unsigned char *old, *new;
+       unsigned long ip = rec->ip;
+
+       /*
+        * If the calling address is more that 24 bits away,
+        * then we had to use a trampoline to make the call.
+        * Otherwise just update the call site.
+        */
+       if (test_24bit_addr(ip, addr)) {
+               /* within range */
+               old = ftrace_call_replace(ip, addr);
+               new = ftrace_nop_replace();
+               return ftrace_modify_code(ip, old, new);
+       }
+
+       /*
+        * Out of range jumps are called from modules.
+        * We should either already have a pointer to the module
+        * or it has been passed in.
+        */
+       if (!rec->arch.mod) {
+               if (!mod) {
+                       printk(KERN_ERR "No module loaded addr=%lx\n",
+                              addr);
+                       return -EFAULT;
+               }
+               rec->arch.mod = mod;
+       } else if (mod) {
+               if (mod != rec->arch.mod) {
+                       printk(KERN_ERR
+                              "Record mod %p not equal to passed in mod %p\n",
+                              rec->arch.mod, mod);
+                       return -EINVAL;
+               }
+               /* nothing to do if mod == rec->arch.mod */
+       } else
+               mod = rec->arch.mod;
+
+       return __ftrace_make_nop(mod, rec, addr);
+
+}
+
+#ifdef CONFIG_PPC64
+static int
+__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+       unsigned int op[2];
+       unsigned long ip = rec->ip;
+
+       /* read where this goes */
+       if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2))
+               return -EFAULT;
+
+       /*
+        * It should be pointing to two nops or
+        *  b +8; ld r2,40(r1)
+        */
+       if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) &&
+           ((op[0] != PPC_NOP_INSTR) || (op[1] != PPC_NOP_INSTR))) {
+               printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]);
+               return -EINVAL;
+       }
+
+       /* If we never set up a trampoline to ftrace_caller, then bail */
+       if (!rec->arch.mod->arch.tramp) {
+               printk(KERN_ERR "No ftrace trampoline\n");
+               return -EINVAL;
+       }
+
+       /* create the branch to the trampoline */
+       op[0] = create_branch((unsigned int *)ip,
+                             rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
+       if (!op[0]) {
+               printk(KERN_ERR "REL24 out of range!\n");
+               return -EINVAL;
+       }
+
+       /* ld r2,40(r1) */
+       op[1] = 0xe8410028;
+
+       DEBUGP("write to %lx\n", rec->ip);
+
+       if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2))
+               return -EPERM;
+
+       flush_icache_range(ip, ip + 8);
+
+       return 0;
+}
+#else
+static int
+__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+       unsigned int op;
+       unsigned long ip = rec->ip;
+
+       /* read where this goes */
+       if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
+               return -EFAULT;
+
+       /* It should be pointing to a nop */
+       if (op != PPC_NOP_INSTR) {
+               printk(KERN_ERR "Expected NOP but have %x\n", op);
+               return -EINVAL;
+       }
+
+       /* If we never set up a trampoline to ftrace_caller, then bail */
+       if (!rec->arch.mod->arch.tramp) {
+               printk(KERN_ERR "No ftrace trampoline\n");
+               return -EINVAL;
+       }
+
+       /* create the branch to the trampoline */
+       op = create_branch((unsigned int *)ip,
+                          rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
+       if (!op) {
+               printk(KERN_ERR "REL24 out of range!\n");
+               return -EINVAL;
+       }
+
+       DEBUGP("write to %lx\n", rec->ip);
+
+       if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
+               return -EPERM;
+
+       flush_icache_range(ip, ip + 8);
+
+       return 0;
+}
+#endif /* CONFIG_PPC64 */
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+       unsigned char *old, *new;
+       unsigned long ip = rec->ip;
+
+       /*
+        * If the calling address is more that 24 bits away,
+        * then we had to use a trampoline to make the call.
+        * Otherwise just update the call site.
+        */
+       if (test_24bit_addr(ip, addr)) {
+               /* within range */
+               old = ftrace_nop_replace();
+               new = ftrace_call_replace(ip, addr);
+               return ftrace_modify_code(ip, old, new);
+       }
+
+       /*
+        * Out of range jumps are called from modules.
+        * Being that we are converting from nop, it had better
+        * already have a module defined.
+        */
+       if (!rec->arch.mod) {
+               printk(KERN_ERR "No module loaded\n");
+               return -EINVAL;
+       }
+
+       return __ftrace_make_call(rec, addr);
 }
 
 int ftrace_update_ftrace_func(ftrace_func_t func)
@@ -128,10 +509,10 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
 
 int __init ftrace_dyn_arch_init(void *data)
 {
-       /* This is running in kstop_machine */
+       /* caller expects data to be zero */
+       unsigned long *p = data;
 
-       ftrace_mcount_set(data);
+       *p = 0;
 
        return 0;
 }
-
index 31982d05d81a8814d73dfc8b5a562cd90b66e5d3..88d9c1d5e5fb0aea1e74757ae9ac1133e9f352ef 100644 (file)
@@ -69,10 +69,15 @@ void cpu_idle(void)
                                smp_mb();
                                local_irq_disable();
 
+                               /* Don't trace irqs off for idle */
+                               stop_critical_timings();
+
                                /* check again after disabling irqs */
                                if (!need_resched() && !cpu_should_die())
                                        ppc_md.power_save();
 
+                               start_critical_timings();
+
                                local_irq_enable();
                                set_thread_flag(TIF_POLLING_NRFLAG);
 
index 2df91a03462a26109d49b9fc82b852df76c14b34..f832773fc28e940f82ed0dcecc6104d5e6e71939 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/fs.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
+#include <linux/ftrace.h>
 #include <linux/cache.h>
 #include <linux/bug.h>
 #include <linux/sort.h>
@@ -53,6 +54,9 @@ static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
                        r_addend = rela[i].r_addend;
                }
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+       _count_relocs++;        /* add one for ftrace_caller */
+#endif
        return _count_relocs;
 }
 
@@ -306,5 +310,11 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
                        return -ENOEXEC;
                }
        }
+#ifdef CONFIG_DYNAMIC_FTRACE
+       module->arch.tramp =
+               do_plt_call(module->module_core,
+                           (unsigned long)ftrace_caller,
+                           sechdrs, module);
+#endif
        return 0;
 }
index 1af2377e49929dc367d49b4588fd2ba6d23d7e91..8992b031a7b6a6cc54433453b6b991d1610cb5d6 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/moduleloader.h>
 #include <linux/err.h>
 #include <linux/vmalloc.h>
+#include <linux/ftrace.h>
 #include <linux/bug.h>
 #include <asm/module.h>
 #include <asm/firmware.h>
@@ -163,6 +164,11 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
                }
        }
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+       /* make the trampoline to the ftrace_caller */
+       relocs++;
+#endif
+
        DEBUGP("Looks like a total of %lu stubs, max\n", relocs);
        return relocs * sizeof(struct ppc64_stub_entry);
 }
@@ -441,5 +447,12 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                }
        }
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+       me->arch.toc = my_r2(sechdrs, me);
+       me->arch.tramp = stub_for_addr(sechdrs,
+                                      (unsigned long)ftrace_caller,
+                                      me);
+#endif
+
        return 0;
 }
index d69912c07ce732c937412dac1286ba0223ded96c..8db35278a4b43643c2cf42b280b79ad9d33be7bc 100644 (file)
@@ -6,6 +6,9 @@ ifeq ($(CONFIG_PPC64),y)
 EXTRA_CFLAGS           += -mno-minimal-toc
 endif
 
+CFLAGS_REMOVE_code-patching.o = -pg
+CFLAGS_REMOVE_feature-fixups.o = -pg
+
 obj-y                  := string.o alloc.o \
                           checksum_$(CONFIG_WORD_SIZE).o
 obj-$(CONFIG_PPC32)    += div64.o copy_32.o crtsavres.o
index ac22bb7719f730e6b8d12b305ec9e08952a513a4..45c86fb941326584f167ca081d4f12ba30c1cd21 100644 (file)
@@ -29,11 +29,14 @@ config X86
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FUNCTION_TRACER
+       select HAVE_FUNCTION_GRAPH_TRACER
+       select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
        select HAVE_ARCH_KGDB if !X86_VOYAGER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_GENERIC_DMA_COHERENT if X86_32
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
+       select USER_STACKTRACE_SUPPORT
 
 config ARCH_DEFCONFIG
        string
index b815664fe3700b77aa031fab7711be4128c858d1..85a78575956cb0bdf473c4694f29eb7d497a154e 100644 (file)
@@ -515,6 +515,7 @@ config CPU_SUP_UMC_32
 config X86_DS
        def_bool X86_PTRACE_BTS
        depends on X86_DEBUGCTLMSR
+       select HAVE_HW_BRANCH_TRACER
 
 config X86_PTRACE_BTS
        bool "Branch Trace Store"
index 2a3dfbd5e677b548e5e75f43da69e934b90a7eff..fa013f529b746564fd04695bd4a12fe65c401dca 100644 (file)
@@ -186,14 +186,10 @@ config IOMMU_LEAK
          Add a simple leak tracer to the IOMMU code. This is useful when you
          are debugging a buggy device driver that leaks IOMMU mappings.
 
-config MMIOTRACE_HOOKS
-       bool
-
 config MMIOTRACE
        bool "Memory mapped IO tracing"
        depends on DEBUG_KERNEL && PCI
        select TRACING
-       select MMIOTRACE_HOOKS
        help
          Mmiotrace traces Memory Mapped I/O access and is meant for
          debugging and reverse engineering. It is called from the ioremap
index a95008457ea430ffbe7fdde3fc6a5fd57778ce79..99b6c39774a44179fcf3554a096449962167d490 100644 (file)
@@ -7,13 +7,12 @@
  *
  * It manages:
  * - per-thread and per-cpu allocation of BTS and PEBS
- * - buffer memory allocation (optional)
- * - buffer overflow handling
+ * - buffer overflow handling (to be done)
  * - buffer access
  *
  * It assumes:
- * - get_task_struct on all parameter tasks
- * - current is allowed to trace parameter tasks
+ * - get_task_struct on all traced tasks
+ * - current is allowed to trace tasks
  *
  *
  * Copyright (C) 2007-2008 Intel Corporation.
 
 #include <linux/types.h>
 #include <linux/init.h>
+#include <linux/err.h>
 
 
 #ifdef CONFIG_X86_DS
 
 struct task_struct;
+struct ds_tracer;
+struct bts_tracer;
+struct pebs_tracer;
+
+typedef void (*bts_ovfl_callback_t)(struct bts_tracer *);
+typedef void (*pebs_ovfl_callback_t)(struct pebs_tracer *);
 
 /*
  * Request BTS or PEBS
@@ -38,60 +44,62 @@ struct task_struct;
  * Due to alignement constraints, the actual buffer may be slightly
  * smaller than the requested or provided buffer.
  *
- * Returns 0 on success; -Eerrno otherwise
+ * Returns a pointer to a tracer structure on success, or
+ * ERR_PTR(errcode) on failure.
+ *
+ * The interrupt threshold is independent from the overflow callback
+ * to allow users to use their own overflow interrupt handling mechanism.
  *
  * task: the task to request recording for;
  *       NULL for per-cpu recording on the current cpu
  * base: the base pointer for the (non-pageable) buffer;
- *       NULL if buffer allocation requested
- * size: the size of the requested or provided buffer
+ * size: the size of the provided buffer in bytes
  * ovfl: pointer to a function to be called on buffer overflow;
  *       NULL if cyclic buffer requested
+ * th: the interrupt threshold in records from the end of the buffer;
+ *     -1 if no interrupt threshold is requested.
  */
-typedef void (*ds_ovfl_callback_t)(struct task_struct *);
-extern int ds_request_bts(struct task_struct *task, void *base, size_t size,
-                         ds_ovfl_callback_t ovfl);
-extern int ds_request_pebs(struct task_struct *task, void *base, size_t size,
-                          ds_ovfl_callback_t ovfl);
+extern struct bts_tracer *ds_request_bts(struct task_struct *task,
+                                        void *base, size_t size,
+                                        bts_ovfl_callback_t ovfl, size_t th);
+extern struct pebs_tracer *ds_request_pebs(struct task_struct *task,
+                                          void *base, size_t size,
+                                          pebs_ovfl_callback_t ovfl,
+                                          size_t th);
 
 /*
  * Release BTS or PEBS resources
  *
- * Frees buffers allocated on ds_request.
- *
  * Returns 0 on success; -Eerrno otherwise
  *
- * task: the task to release resources for;
- *       NULL to release resources for the current cpu
+ * tracer: the tracer handle returned from ds_request_~()
  */
-extern int ds_release_bts(struct task_struct *task);
-extern int ds_release_pebs(struct task_struct *task);
+extern int ds_release_bts(struct bts_tracer *tracer);
+extern int ds_release_pebs(struct pebs_tracer *tracer);
 
 /*
- * Return the (array) index of the write pointer.
+ * Get the (array) index of the write pointer.
  * (assuming an array of BTS/PEBS records)
  *
- * Returns -Eerrno on error
+ * Returns 0 on success; -Eerrno on error
  *
- * task: the task to access;
- *       NULL to access the current cpu
- * pos (out): if not NULL, will hold the result
+ * tracer: the tracer handle returned from ds_request_~()
+ * pos (out): will hold the result
  */
-extern int ds_get_bts_index(struct task_struct *task, size_t *pos);
-extern int ds_get_pebs_index(struct task_struct *task, size_t *pos);
+extern int ds_get_bts_index(struct bts_tracer *tracer, size_t *pos);
+extern int ds_get_pebs_index(struct pebs_tracer *tracer, size_t *pos);
 
 /*
- * Return the (array) index one record beyond the end of the array.
+ * Get the (array) index one record beyond the end of the array.
  * (assuming an array of BTS/PEBS records)
  *
- * Returns -Eerrno on error
+ * Returns 0 on success; -Eerrno on error
  *
- * task: the task to access;
- *       NULL to access the current cpu
- * pos (out): if not NULL, will hold the result
+ * tracer: the tracer handle returned from ds_request_~()
+ * pos (out): will hold the result
  */
-extern int ds_get_bts_end(struct task_struct *task, size_t *pos);
-extern int ds_get_pebs_end(struct task_struct *task, size_t *pos);
+extern int ds_get_bts_end(struct bts_tracer *tracer, size_t *pos);
+extern int ds_get_pebs_end(struct pebs_tracer *tracer, size_t *pos);
 
 /*
  * Provide a pointer to the BTS/PEBS record at parameter index.
@@ -102,14 +110,13 @@ extern int ds_get_pebs_end(struct task_struct *task, size_t *pos);
  *
  * Returns the size of a single record on success; -Eerrno on error
  *
- * task: the task to access;
- *       NULL to access the current cpu
+ * tracer: the tracer handle returned from ds_request_~()
  * index: the index of the requested record
  * record (out): pointer to the requested record
  */
-extern int ds_access_bts(struct task_struct *task,
+extern int ds_access_bts(struct bts_tracer *tracer,
                         size_t index, const void **record);
-extern int ds_access_pebs(struct task_struct *task,
+extern int ds_access_pebs(struct pebs_tracer *tracer,
                          size_t index, const void **record);
 
 /*
@@ -129,38 +136,24 @@ extern int ds_access_pebs(struct task_struct *task,
  *
  * Returns the number of bytes written or -Eerrno.
  *
- * task: the task to access;
- *       NULL to access the current cpu
+ * tracer: the tracer handle returned from ds_request_~()
  * buffer: the buffer to write
  * size: the size of the buffer
  */
-extern int ds_write_bts(struct task_struct *task,
+extern int ds_write_bts(struct bts_tracer *tracer,
                        const void *buffer, size_t size);
-extern int ds_write_pebs(struct task_struct *task,
+extern int ds_write_pebs(struct pebs_tracer *tracer,
                         const void *buffer, size_t size);
 
-/*
- * Same as ds_write_bts/pebs, but omit ownership checks.
- *
- * This is needed to have some other task than the owner of the
- * BTS/PEBS buffer or the parameter task itself write into the
- * respective buffer.
- */
-extern int ds_unchecked_write_bts(struct task_struct *task,
-                                 const void *buffer, size_t size);
-extern int ds_unchecked_write_pebs(struct task_struct *task,
-                                  const void *buffer, size_t size);
-
 /*
  * Reset the write pointer of the BTS/PEBS buffer.
  *
  * Returns 0 on success; -Eerrno on error
  *
- * task: the task to access;
- *       NULL to access the current cpu
+ * tracer: the tracer handle returned from ds_request_~()
  */
-extern int ds_reset_bts(struct task_struct *task);
-extern int ds_reset_pebs(struct task_struct *task);
+extern int ds_reset_bts(struct bts_tracer *tracer);
+extern int ds_reset_pebs(struct pebs_tracer *tracer);
 
 /*
  * Clear the BTS/PEBS buffer and reset the write pointer.
@@ -168,33 +161,30 @@ extern int ds_reset_pebs(struct task_struct *task);
  *
  * Returns 0 on success; -Eerrno on error
  *
- * task: the task to access;
- *       NULL to access the current cpu
+ * tracer: the tracer handle returned from ds_request_~()
  */
-extern int ds_clear_bts(struct task_struct *task);
-extern int ds_clear_pebs(struct task_struct *task);
+extern int ds_clear_bts(struct bts_tracer *tracer);
+extern int ds_clear_pebs(struct pebs_tracer *tracer);
 
 /*
  * Provide the PEBS counter reset value.
  *
  * Returns 0 on success; -Eerrno on error
  *
- * task: the task to access;
- *       NULL to access the current cpu
+ * tracer: the tracer handle returned from ds_request_pebs()
  * value (out): the counter reset value
  */
-extern int ds_get_pebs_reset(struct task_struct *task, u64 *value);
+extern int ds_get_pebs_reset(struct pebs_tracer *tracer, u64 *value);
 
 /*
  * Set the PEBS counter reset value.
  *
  * Returns 0 on success; -Eerrno on error
  *
- * task: the task to access;
- *       NULL to access the current cpu
+ * tracer: the tracer handle returned from ds_request_pebs()
  * value: the new counter reset value
  */
-extern int ds_set_pebs_reset(struct task_struct *task, u64 value);
+extern int ds_set_pebs_reset(struct pebs_tracer *tracer, u64 value);
 
 /*
  * Initialization
@@ -207,17 +197,13 @@ extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *);
 /*
  * The DS context - part of struct thread_struct.
  */
+#define MAX_SIZEOF_DS (12 * 8)
+
 struct ds_context {
        /* pointer to the DS configuration; goes into MSR_IA32_DS_AREA */
-       unsigned char *ds;
+       unsigned char ds[MAX_SIZEOF_DS];
        /* the owner of the BTS and PEBS configuration, respectively */
-       struct task_struct *owner[2];
-       /* buffer overflow notification function for BTS and PEBS */
-       ds_ovfl_callback_t callback[2];
-       /* the original buffer address */
-       void *buffer[2];
-       /* the number of allocated pages for on-request allocated buffers */
-       unsigned int pages[2];
+       struct ds_tracer  *owner[2];
        /* use count */
        unsigned long count;
        /* a pointer to the context location inside the thread_struct
index 9e8bc29b8b17dd3739d7479af6920c3c627130cb..7e61b4ceb9a4c144f85a1eae55228e355ae390ae 100644 (file)
@@ -17,8 +17,40 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
         */
        return addr - 1;
 }
-#endif
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+struct dyn_arch_ftrace {
+       /* No extra data needed for x86 */
+};
+
+#endif /*  CONFIG_DYNAMIC_FTRACE */
+#endif /* __ASSEMBLY__ */
 #endif /* CONFIG_FUNCTION_TRACER */
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Stack of return addresses for functions
+ * of a thread.
+ * Used in struct thread_info
+ */
+struct ftrace_ret_stack {
+       unsigned long ret;
+       unsigned long func;
+       unsigned long long calltime;
+};
+
+/*
+ * Primary handler of a function return.
+ * It relays on ftrace_return_to_handler.
+ * Defined in entry32.S
+ */
+extern void return_to_handler(void);
+
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
 #endif /* _ASM_X86_FTRACE_H */
index e44d379faad2b891245ef9cc2332dcaf8f8491c5..0921b4018c11d1a926c3eb083c55d9ba4f45370f 100644 (file)
@@ -20,6 +20,8 @@
 struct task_struct;
 struct exec_domain;
 #include <asm/processor.h>
+#include <asm/ftrace.h>
+#include <asm/atomic.h>
 
 struct thread_info {
        struct task_struct      *task;          /* main task structure */
index b62a7667828eb77574128a8ca82b1f54cee28238..a3049da61985f777bbbcc0c2eedb9b3820b4d63b 100644 (file)
@@ -14,6 +14,12 @@ CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
 CFLAGS_REMOVE_ftrace.o = -pg
 endif
 
+ifdef CONFIG_FUNCTION_GRAPH_TRACER
+# Don't trace __switch_to() but let it for function tracer
+CFLAGS_REMOVE_process_32.o = -pg
+CFLAGS_REMOVE_process_64.o = -pg
+endif
+
 #
 # vsyscalls (which work on the user stack) should have
 # no stack-protector checks:
@@ -25,7 +31,7 @@ CFLAGS_tsc.o          := $(nostackp)
 
 obj-y                  := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o
 obj-y                  += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
-obj-y                  += time_$(BITS).o ioport.o ldt.o
+obj-y                  += time_$(BITS).o ioport.o ldt.o dumpstack.o
 obj-y                  += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o
 obj-$(CONFIG_X86_VISWS)        += visws_quirks.o
 obj-$(CONFIG_X86_32)   += probe_roms_32.o
@@ -65,6 +71,7 @@ obj-$(CONFIG_X86_LOCAL_APIC)  += apic.o nmi.o
 obj-$(CONFIG_X86_IO_APIC)      += io_apic.o
 obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
 obj-$(CONFIG_DYNAMIC_FTRACE)   += ftrace.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER)    += ftrace.o
 obj-$(CONFIG_KEXEC)            += machine_kexec_$(BITS).o
 obj-$(CONFIG_KEXEC)            += relocate_kernel_$(BITS).o crash.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump_$(BITS).o
index 8e48c5d4467df61d3652a39b06b85e7389143c54..88ea02dcb622fe5bf3ef4f2d7a28fc31bdf938e0 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/cpufreq.h>
 #include <linux/compiler.h>
 #include <linux/dmi.h>
+#include <linux/ftrace.h>
 
 #include <linux/acpi.h>
 #include <acpi/processor.h>
@@ -391,6 +392,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
        unsigned int next_perf_state = 0; /* Index into perf table */
        unsigned int i;
        int result = 0;
+       struct power_trace it;
 
        dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
 
@@ -427,6 +429,8 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
                }
        }
 
+       trace_power_mark(&it, POWER_PSTATE, next_perf_state);
+
        switch (data->cpu_feature) {
        case SYSTEM_INTEL_MSR_CAPABLE:
                cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
index cce0b6118d550e015a34c5e1b0f9bc60de1f8b3d..816f27f289b10a416847a141b20b1ed9540a530b 100644 (file)
@@ -307,12 +307,11 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
                set_cpu_cap(c, X86_FEATURE_P4);
        if (c->x86 == 6)
                set_cpu_cap(c, X86_FEATURE_P3);
+#endif
 
        if (cpu_has_bts)
                ptrace_bts_init_intel(c);
 
-#endif
-
        detect_extended_topology(c);
        if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
                /*
index a2d1176c38ee0d59b3e47925a79d8aebdff73d20..19a8c2c0389f1ab87dfe41193e04a5509f257130 100644 (file)
@@ -7,13 +7,12 @@
  *
  * It manages:
  * - per-thread and per-cpu allocation of BTS and PEBS
- * - buffer memory allocation (optional)
- * - buffer overflow handling
+ * - buffer overflow handling (to be done)
  * - buffer access
  *
  * It assumes:
- * - get_task_struct on all parameter tasks
- * - current is allowed to trace parameter tasks
+ * - get_task_struct on all traced tasks
+ * - current is allowed to trace tasks
  *
  *
  * Copyright (C) 2007-2008 Intel Corporation.
@@ -28,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
+#include <linux/kernel.h>
 
 
 /*
@@ -44,6 +44,33 @@ struct ds_configuration {
 };
 static struct ds_configuration ds_cfg;
 
+/*
+ * A BTS or PEBS tracer.
+ *
+ * This holds the configuration of the tracer and serves as a handle
+ * to identify tracers.
+ */
+struct ds_tracer {
+       /* the DS context (partially) owned by this tracer */
+       struct ds_context *context;
+       /* the buffer provided on ds_request() and its size in bytes */
+       void *buffer;
+       size_t size;
+};
+
+struct bts_tracer {
+       /* the common DS part */
+       struct ds_tracer ds;
+       /* buffer overflow notification function */
+       bts_ovfl_callback_t ovfl;
+};
+
+struct pebs_tracer {
+       /* the common DS part */
+       struct ds_tracer ds;
+       /* buffer overflow notification function */
+       pebs_ovfl_callback_t ovfl;
+};
 
 /*
  * Debug Store (DS) save area configuration (see Intel64 and IA32
@@ -107,34 +134,13 @@ static inline void ds_set(unsigned char *base, enum ds_qualifier qual,
        (*(unsigned long *)base) = value;
 }
 
+#define DS_ALIGNMENT (1 << 3)  /* BTS and PEBS buffer alignment */
 
-/*
- * Locking is done only for allocating BTS or PEBS resources and for
- * guarding context and buffer memory allocation.
- *
- * Most functions require the current task to own the ds context part
- * they are going to access. All the locking is done when validating
- * access to the context.
- */
-static spinlock_t ds_lock = __SPIN_LOCK_UNLOCKED(ds_lock);
 
 /*
- * Validate that the current task is allowed to access the BTS/PEBS
- * buffer of the parameter task.
- *
- * Returns 0, if access is granted; -Eerrno, otherwise.
+ * Locking is done only for allocating BTS or PEBS resources.
  */
-static inline int ds_validate_access(struct ds_context *context,
-                                    enum ds_qualifier qual)
-{
-       if (!context)
-               return -EPERM;
-
-       if (context->owner[qual] == current)
-               return 0;
-
-       return -EPERM;
-}
+static spinlock_t ds_lock = __SPIN_LOCK_UNLOCKED(ds_lock);
 
 
 /*
@@ -183,50 +189,12 @@ static inline int check_tracer(struct task_struct *task)
  *
  * Contexts are use-counted. They are allocated on first access and
  * deallocated when the last user puts the context.
- *
- * We distinguish between an allocating and a non-allocating get of a
- * context:
- * - the allocating get is used for requesting BTS/PEBS resources. It
- *   requires the caller to hold the global ds_lock.
- * - the non-allocating get is used for all other cases. A
- *   non-existing context indicates an error. It acquires and releases
- *   the ds_lock itself for obtaining the context.
- *
- * A context and its DS configuration are allocated and deallocated
- * together. A context always has a DS configuration of the
- * appropriate size.
  */
 static DEFINE_PER_CPU(struct ds_context *, system_context);
 
 #define this_system_context per_cpu(system_context, smp_processor_id())
 
-/*
- * Returns the pointer to the parameter task's context or to the
- * system-wide context, if task is NULL.
- *
- * Increases the use count of the returned context, if not NULL.
- */
 static inline struct ds_context *ds_get_context(struct task_struct *task)
-{
-       struct ds_context *context;
-       unsigned long irq;
-
-       spin_lock_irqsave(&ds_lock, irq);
-
-       context = (task ? task->thread.ds_ctx : this_system_context);
-       if (context)
-               context->count++;
-
-       spin_unlock_irqrestore(&ds_lock, irq);
-
-       return context;
-}
-
-/*
- * Same as ds_get_context, but allocates the context and it's DS
- * structure, if necessary; returns NULL; if out of memory.
- */
-static inline struct ds_context *ds_alloc_context(struct task_struct *task)
 {
        struct ds_context **p_context =
                (task ? &task->thread.ds_ctx : &this_system_context);
@@ -238,16 +206,9 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
                if (!context)
                        return NULL;
 
-               context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
-               if (!context->ds) {
-                       kfree(context);
-                       return NULL;
-               }
-
                spin_lock_irqsave(&ds_lock, irq);
 
                if (*p_context) {
-                       kfree(context->ds);
                        kfree(context);
 
                        context = *p_context;
@@ -272,10 +233,6 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
        return context;
 }
 
-/*
- * Decreases the use count of the parameter context, if not NULL.
- * Deallocates the context, if the use count reaches zero.
- */
 static inline void ds_put_context(struct ds_context *context)
 {
        unsigned long irq;
@@ -296,13 +253,6 @@ static inline void ds_put_context(struct ds_context *context)
        if (!context->task || (context->task == current))
                wrmsrl(MSR_IA32_DS_AREA, 0);
 
-       put_tracer(context->task);
-
-       /* free any leftover buffers from tracers that did not
-        * deallocate them properly. */
-       kfree(context->buffer[ds_bts]);
-       kfree(context->buffer[ds_pebs]);
-       kfree(context->ds);
        kfree(context);
  out:
        spin_unlock_irqrestore(&ds_lock, irq);
@@ -312,345 +262,342 @@ static inline void ds_put_context(struct ds_context *context)
 /*
  * Handle a buffer overflow
  *
- * task: the task whose buffers are overflowing;
- *       NULL for a buffer overflow on the current cpu
  * context: the ds context
  * qual: the buffer type
  */
-static void ds_overflow(struct task_struct *task, struct ds_context *context,
-                       enum ds_qualifier qual)
-{
-       if (!context)
-               return;
-
-       if (context->callback[qual])
-               (*context->callback[qual])(task);
-
-       /* todo: do some more overflow handling */
+static void ds_overflow(struct ds_context *context, enum ds_qualifier qual)
+{
+       switch (qual) {
+       case ds_bts: {
+               struct bts_tracer *tracer =
+                       container_of(context->owner[qual],
+                                    struct bts_tracer, ds);
+               if (tracer->ovfl)
+                       tracer->ovfl(tracer);
+       }
+               break;
+       case ds_pebs: {
+               struct pebs_tracer *tracer =
+                       container_of(context->owner[qual],
+                                    struct pebs_tracer, ds);
+               if (tracer->ovfl)
+                       tracer->ovfl(tracer);
+       }
+               break;
+       }
 }
 
 
-/*
- * Allocate a non-pageable buffer of the parameter size.
- * Checks the memory and the locked memory rlimit.
- *
- * Returns the buffer, if successful;
- *         NULL, if out of memory or rlimit exceeded.
- *
- * size: the requested buffer size in bytes
- * pages (out): if not NULL, contains the number of pages reserved
- */
-static inline void *ds_allocate_buffer(size_t size, unsigned int *pages)
+static void ds_install_ds_config(struct ds_context *context,
+                                enum ds_qualifier qual,
+                                void *base, size_t size, size_t ith)
 {
-       unsigned long rlim, vm, pgsz;
-       void *buffer;
-
-       pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
-       rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
-       vm   = current->mm->total_vm  + pgsz;
-       if (rlim < vm)
-               return NULL;
+       unsigned long buffer, adj;
 
-       rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
-       vm   = current->mm->locked_vm  + pgsz;
-       if (rlim < vm)
-               return NULL;
+       /* adjust the buffer address and size to meet alignment
+        * constraints:
+        * - buffer is double-word aligned
+        * - size is multiple of record size
+        *
+        * We checked the size at the very beginning; we have enough
+        * space to do the adjustment.
+        */
+       buffer = (unsigned long)base;
 
-       buffer = kzalloc(size, GFP_KERNEL);
-       if (!buffer)
-               return NULL;
+       adj = ALIGN(buffer, DS_ALIGNMENT) - buffer;
+       buffer += adj;
+       size   -= adj;
 
-       current->mm->total_vm  += pgsz;
-       current->mm->locked_vm += pgsz;
+       size /= ds_cfg.sizeof_rec[qual];
+       size *= ds_cfg.sizeof_rec[qual];
 
-       if (pages)
-               *pages = pgsz;
+       ds_set(context->ds, qual, ds_buffer_base, buffer);
+       ds_set(context->ds, qual, ds_index, buffer);
+       ds_set(context->ds, qual, ds_absolute_maximum, buffer + size);
 
-       return buffer;
+       /* The value for 'no threshold' is -1, which will set the
+        * threshold outside of the buffer, just like we want it.
+        */
+       ds_set(context->ds, qual,
+              ds_interrupt_threshold, buffer + size - ith);
 }
 
-static int ds_request(struct task_struct *task, void *base, size_t size,
-                     ds_ovfl_callback_t ovfl, enum ds_qualifier qual)
+static int ds_request(struct ds_tracer *tracer, enum ds_qualifier qual,
+                     struct task_struct *task,
+                     void *base, size_t size, size_t th)
 {
        struct ds_context *context;
-       unsigned long buffer, adj;
-       const unsigned long alignment = (1 << 3);
        unsigned long irq;
-       int error = 0;
+       int error;
 
+       error = -EOPNOTSUPP;
        if (!ds_cfg.sizeof_ds)
-               return -EOPNOTSUPP;
+               goto out;
+
+       error = -EINVAL;
+       if (!base)
+               goto out;
 
        /* we require some space to do alignment adjustments below */
-       if (size < (alignment + ds_cfg.sizeof_rec[qual]))
-               return -EINVAL;
+       error = -EINVAL;
+       if (size < (DS_ALIGNMENT + ds_cfg.sizeof_rec[qual]))
+               goto out;
 
-       /* buffer overflow notification is not yet implemented */
-       if (ovfl)
-               return -EOPNOTSUPP;
+       if (th != (size_t)-1) {
+               th *= ds_cfg.sizeof_rec[qual];
+
+               error = -EINVAL;
+               if (size <= th)
+                       goto out;
+       }
 
+       tracer->buffer = base;
+       tracer->size = size;
 
-       context = ds_alloc_context(task);
+       error = -ENOMEM;
+       context = ds_get_context(task);
        if (!context)
-               return -ENOMEM;
+               goto out;
+       tracer->context = context;
+
 
        spin_lock_irqsave(&ds_lock, irq);
 
        error = -EPERM;
        if (!check_tracer(task))
                goto out_unlock;
-
        get_tracer(task);
 
-       error = -EALREADY;
-       if (context->owner[qual] == current)
-               goto out_put_tracer;
        error = -EPERM;
-       if (context->owner[qual] != NULL)
+       if (context->owner[qual])
                goto out_put_tracer;
-       context->owner[qual] = current;
+       context->owner[qual] = tracer;
 
        spin_unlock_irqrestore(&ds_lock, irq);
 
 
-       error = -ENOMEM;
-       if (!base) {
-               base = ds_allocate_buffer(size, &context->pages[qual]);
-               if (!base)
-                       goto out_release;
-
-               context->buffer[qual]   = base;
-       }
-       error = 0;
+       ds_install_ds_config(context, qual, base, size, th);
 
-       context->callback[qual] = ovfl;
-
-       /* adjust the buffer address and size to meet alignment
-        * constraints:
-        * - buffer is double-word aligned
-        * - size is multiple of record size
-        *
-        * We checked the size at the very beginning; we have enough
-        * space to do the adjustment.
-        */
-       buffer = (unsigned long)base;
-
-       adj = ALIGN(buffer, alignment) - buffer;
-       buffer += adj;
-       size   -= adj;
-
-       size /= ds_cfg.sizeof_rec[qual];
-       size *= ds_cfg.sizeof_rec[qual];
-
-       ds_set(context->ds, qual, ds_buffer_base, buffer);
-       ds_set(context->ds, qual, ds_index, buffer);
-       ds_set(context->ds, qual, ds_absolute_maximum, buffer + size);
-
-       if (ovfl) {
-               /* todo: select a suitable interrupt threshold */
-       } else
-               ds_set(context->ds, qual,
-                      ds_interrupt_threshold, buffer + size + 1);
-
-       /* we keep the context until ds_release */
-       return error;
-
- out_release:
-       context->owner[qual] = NULL;
-       ds_put_context(context);
-       put_tracer(task);
-       return error;
+       return 0;
 
  out_put_tracer:
-       spin_unlock_irqrestore(&ds_lock, irq);
-       ds_put_context(context);
        put_tracer(task);
-       return error;
-
  out_unlock:
        spin_unlock_irqrestore(&ds_lock, irq);
        ds_put_context(context);
+       tracer->context = NULL;
+ out:
        return error;
 }
 
-int ds_request_bts(struct task_struct *task, void *base, size_t size,
-                  ds_ovfl_callback_t ovfl)
+struct bts_tracer *ds_request_bts(struct task_struct *task,
+                                 void *base, size_t size,
+                                 bts_ovfl_callback_t ovfl, size_t th)
 {
-       return ds_request(task, base, size, ovfl, ds_bts);
-}
+       struct bts_tracer *tracer;
+       int error;
 
-int ds_request_pebs(struct task_struct *task, void *base, size_t size,
-                   ds_ovfl_callback_t ovfl)
-{
-       return ds_request(task, base, size, ovfl, ds_pebs);
+       /* buffer overflow notification is not yet implemented */
+       error = -EOPNOTSUPP;
+       if (ovfl)
+               goto out;
+
+       error = -ENOMEM;
+       tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
+       if (!tracer)
+               goto out;
+       tracer->ovfl = ovfl;
+
+       error = ds_request(&tracer->ds, ds_bts, task, base, size, th);
+       if (error < 0)
+               goto out_tracer;
+
+       return tracer;
+
+ out_tracer:
+       kfree(tracer);
+ out:
+       return ERR_PTR(error);
 }
 
-static int ds_release(struct task_struct *task, enum ds_qualifier qual)
+struct pebs_tracer *ds_request_pebs(struct task_struct *task,
+                                   void *base, size_t size,
+                                   pebs_ovfl_callback_t ovfl, size_t th)
 {
-       struct ds_context *context;
+       struct pebs_tracer *tracer;
        int error;
 
-       context = ds_get_context(task);
-       error = ds_validate_access(context, qual);
-       if (error < 0)
+       /* buffer overflow notification is not yet implemented */
+       error = -EOPNOTSUPP;
+       if (ovfl)
                goto out;
 
-       kfree(context->buffer[qual]);
-       context->buffer[qual] = NULL;
+       error = -ENOMEM;
+       tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
+       if (!tracer)
+               goto out;
+       tracer->ovfl = ovfl;
 
-       current->mm->total_vm  -= context->pages[qual];
-       current->mm->locked_vm -= context->pages[qual];
-       context->pages[qual] = 0;
-       context->owner[qual] = NULL;
+       error = ds_request(&tracer->ds, ds_pebs, task, base, size, th);
+       if (error < 0)
+               goto out_tracer;
 
-       /*
-        * we put the context twice:
-        *   once for the ds_get_context
-        *   once for the corresponding ds_request
-        */
-       ds_put_context(context);
+       return tracer;
+
+ out_tracer:
+       kfree(tracer);
  out:
-       ds_put_context(context);
-       return error;
+       return ERR_PTR(error);
 }
 
-int ds_release_bts(struct task_struct *task)
+static void ds_release(struct ds_tracer *tracer, enum ds_qualifier qual)
 {
-       return ds_release(task, ds_bts);
+       BUG_ON(tracer->context->owner[qual] != tracer);
+       tracer->context->owner[qual] = NULL;
+
+       put_tracer(tracer->context->task);
+       ds_put_context(tracer->context);
 }
 
-int ds_release_pebs(struct task_struct *task)
+int ds_release_bts(struct bts_tracer *tracer)
 {
-       return ds_release(task, ds_pebs);
+       if (!tracer)
+               return -EINVAL;
+
+       ds_release(&tracer->ds, ds_bts);
+       kfree(tracer);
+
+       return 0;
 }
 
-static int ds_get_index(struct task_struct *task, size_t *pos,
-                       enum ds_qualifier qual)
+int ds_release_pebs(struct pebs_tracer *tracer)
 {
-       struct ds_context *context;
-       unsigned long base, index;
-       int error;
+       if (!tracer)
+               return -EINVAL;
 
-       context = ds_get_context(task);
-       error = ds_validate_access(context, qual);
-       if (error < 0)
-               goto out;
+       ds_release(&tracer->ds, ds_pebs);
+       kfree(tracer);
+
+       return 0;
+}
+
+static size_t ds_get_index(struct ds_context *context, enum ds_qualifier qual)
+{
+       unsigned long base, index;
 
        base  = ds_get(context->ds, qual, ds_buffer_base);
        index = ds_get(context->ds, qual, ds_index);
 
-       error = ((index - base) / ds_cfg.sizeof_rec[qual]);
-       if (pos)
-               *pos = error;
- out:
-       ds_put_context(context);
-       return error;
+       return (index - base) / ds_cfg.sizeof_rec[qual];
 }
 
-int ds_get_bts_index(struct task_struct *task, size_t *pos)
+int ds_get_bts_index(struct bts_tracer *tracer, size_t *pos)
 {
-       return ds_get_index(task, pos, ds_bts);
+       if (!tracer)
+               return -EINVAL;
+
+       if (!pos)
+               return -EINVAL;
+
+       *pos = ds_get_index(tracer->ds.context, ds_bts);
+
+       return 0;
 }
 
-int ds_get_pebs_index(struct task_struct *task, size_t *pos)
+int ds_get_pebs_index(struct pebs_tracer *tracer, size_t *pos)
 {
-       return ds_get_index(task, pos, ds_pebs);
+       if (!tracer)
+               return -EINVAL;
+
+       if (!pos)
+               return -EINVAL;
+
+       *pos = ds_get_index(tracer->ds.context, ds_pebs);
+
+       return 0;
 }
 
-static int ds_get_end(struct task_struct *task, size_t *pos,
-                     enum ds_qualifier qual)
+static size_t ds_get_end(struct ds_context *context, enum ds_qualifier qual)
 {
-       struct ds_context *context;
-       unsigned long base, end;
-       int error;
-
-       context = ds_get_context(task);
-       error = ds_validate_access(context, qual);
-       if (error < 0)
-               goto out;
+       unsigned long base, max;
 
        base = ds_get(context->ds, qual, ds_buffer_base);
-       end  = ds_get(context->ds, qual, ds_absolute_maximum);
+       max  = ds_get(context->ds, qual, ds_absolute_maximum);
 
-       error = ((end - base) / ds_cfg.sizeof_rec[qual]);
-       if (pos)
-               *pos = error;
- out:
-       ds_put_context(context);
-       return error;
+       return (max - base) / ds_cfg.sizeof_rec[qual];
 }
 
-int ds_get_bts_end(struct task_struct *task, size_t *pos)
+int ds_get_bts_end(struct bts_tracer *tracer, size_t *pos)
 {
-       return ds_get_end(task, pos, ds_bts);
+       if (!tracer)
+               return -EINVAL;
+
+       if (!pos)
+               return -EINVAL;
+
+       *pos = ds_get_end(tracer->ds.context, ds_bts);
+
+       return 0;
 }
 
-int ds_get_pebs_end(struct task_struct *task, size_t *pos)
+int ds_get_pebs_end(struct pebs_tracer *tracer, size_t *pos)
 {
-       return ds_get_end(task, pos, ds_pebs);
+       if (!tracer)
+               return -EINVAL;
+
+       if (!pos)
+               return -EINVAL;
+
+       *pos = ds_get_end(tracer->ds.context, ds_pebs);
+
+       return 0;
 }
 
-static int ds_access(struct task_struct *task, size_t index,
-                    const void **record, enum ds_qualifier qual)
+static int ds_access(struct ds_context *context, enum ds_qualifier qual,
+                    size_t index, const void **record)
 {
-       struct ds_context *context;
        unsigned long base, idx;
-       int error;
 
        if (!record)
                return -EINVAL;
 
-       context = ds_get_context(task);
-       error = ds_validate_access(context, qual);
-       if (error < 0)
-               goto out;
-
        base = ds_get(context->ds, qual, ds_buffer_base);
        idx = base + (index * ds_cfg.sizeof_rec[qual]);
 
-       error = -EINVAL;
        if (idx > ds_get(context->ds, qual, ds_absolute_maximum))
-               goto out;
+               return -EINVAL;
 
        *record = (const void *)idx;
-       error = ds_cfg.sizeof_rec[qual];
- out:
-       ds_put_context(context);
-       return error;
+
+       return ds_cfg.sizeof_rec[qual];
 }
 
-int ds_access_bts(struct task_struct *task, size_t index, const void **record)
+int ds_access_bts(struct bts_tracer *tracer, size_t index,
+                 const void **record)
 {
-       return ds_access(task, index, record, ds_bts);
+       if (!tracer)
+               return -EINVAL;
+
+       return ds_access(tracer->ds.context, ds_bts, index, record);
 }
 
-int ds_access_pebs(struct task_struct *task, size_t index, const void **record)
+int ds_access_pebs(struct pebs_tracer *tracer, size_t index,
+                  const void **record)
 {
-       return ds_access(task, index, record, ds_pebs);
+       if (!tracer)
+               return -EINVAL;
+
+       return ds_access(tracer->ds.context, ds_pebs, index, record);
 }
 
-static int ds_write(struct task_struct *task, const void *record, size_t size,
-                   enum ds_qualifier qual, int force)
+static int ds_write(struct ds_context *context, enum ds_qualifier qual,
+                   const void *record, size_t size)
 {
-       struct ds_context *context;
-       int error;
+       int bytes_written = 0;
 
        if (!record)
                return -EINVAL;
 
-       error = -EPERM;
-       context = ds_get_context(task);
-       if (!context)
-               goto out;
-
-       if (!force) {
-               error = ds_validate_access(context, qual);
-               if (error < 0)
-                       goto out;
-       }
-
-       error = 0;
        while (size) {
                unsigned long base, index, end, write_end, int_th;
                unsigned long write_size, adj_write_size;
@@ -678,14 +625,14 @@ static int ds_write(struct task_struct *task, const void *record, size_t size,
                        write_end = end;
 
                if (write_end <= index)
-                       goto out;
+                       break;
 
                write_size = min((unsigned long) size, write_end - index);
                memcpy((void *)index, record, write_size);
 
                record = (const char *)record + write_size;
-               size  -= write_size;
-               error += write_size;
+               size -= write_size;
+               bytes_written += write_size;
 
                adj_write_size = write_size / ds_cfg.sizeof_rec[qual];
                adj_write_size *= ds_cfg.sizeof_rec[qual];
@@ -700,47 +647,32 @@ static int ds_write(struct task_struct *task, const void *record, size_t size,
                ds_set(context->ds, qual, ds_index, index);
 
                if (index >= int_th)
-                       ds_overflow(task, context, qual);
+                       ds_overflow(context, qual);
        }
 
- out:
-       ds_put_context(context);
-       return error;
+       return bytes_written;
 }
 
-int ds_write_bts(struct task_struct *task, const void *record, size_t size)
+int ds_write_bts(struct bts_tracer *tracer, const void *record, size_t size)
 {
-       return ds_write(task, record, size, ds_bts, /* force = */ 0);
-}
+       if (!tracer)
+               return -EINVAL;
 
-int ds_write_pebs(struct task_struct *task, const void *record, size_t size)
-{
-       return ds_write(task, record, size, ds_pebs, /* force = */ 0);
+       return ds_write(tracer->ds.context, ds_bts, record, size);
 }
 
-int ds_unchecked_write_bts(struct task_struct *task,
-                          const void *record, size_t size)
+int ds_write_pebs(struct pebs_tracer *tracer, const void *record, size_t size)
 {
-       return ds_write(task, record, size, ds_bts, /* force = */ 1);
-}
+       if (!tracer)
+               return -EINVAL;
 
-int ds_unchecked_write_pebs(struct task_struct *task,
-                           const void *record, size_t size)
-{
-       return ds_write(task, record, size, ds_pebs, /* force = */ 1);
+       return ds_write(tracer->ds.context, ds_pebs, record, size);
 }
 
-static int ds_reset_or_clear(struct task_struct *task,
-                            enum ds_qualifier qual, int clear)
+static void ds_reset_or_clear(struct ds_context *context,
+                             enum ds_qualifier qual, int clear)
 {
-       struct ds_context *context;
        unsigned long base, end;
-       int error;
-
-       context = ds_get_context(task);
-       error = ds_validate_access(context, qual);
-       if (error < 0)
-               goto out;
 
        base = ds_get(context->ds, qual, ds_buffer_base);
        end  = ds_get(context->ds, qual, ds_absolute_maximum);
@@ -749,70 +681,69 @@ static int ds_reset_or_clear(struct task_struct *task,
                memset((void *)base, 0, end - base);
 
        ds_set(context->ds, qual, ds_index, base);
-
-       error = 0;
- out:
-       ds_put_context(context);
-       return error;
 }
 
-int ds_reset_bts(struct task_struct *task)
+int ds_reset_bts(struct bts_tracer *tracer)
 {
-       return ds_reset_or_clear(task, ds_bts, /* clear = */ 0);
+       if (!tracer)
+               return -EINVAL;
+
+       ds_reset_or_clear(tracer->ds.context, ds_bts, /* clear = */ 0);
+
+       return 0;
 }
 
-int ds_reset_pebs(struct task_struct *task)
+int ds_reset_pebs(struct pebs_tracer *tracer)
 {
-       return ds_reset_or_clear(task, ds_pebs, /* clear = */ 0);
+       if (!tracer)
+               return -EINVAL;
+
+       ds_reset_or_clear(tracer->ds.context, ds_pebs, /* clear = */ 0);
+
+       return 0;
 }
 
-int ds_clear_bts(struct task_struct *task)
+int ds_clear_bts(struct bts_tracer *tracer)
 {
-       return ds_reset_or_clear(task, ds_bts, /* clear = */ 1);
+       if (!tracer)
+               return -EINVAL;
+
+       ds_reset_or_clear(tracer->ds.context, ds_bts, /* clear = */ 1);
+
+       return 0;
 }
 
-int ds_clear_pebs(struct task_struct *task)
+int ds_clear_pebs(struct pebs_tracer *tracer)
 {
-       return ds_reset_or_clear(task, ds_pebs, /* clear = */ 1);
+       if (!tracer)
+               return -EINVAL;
+
+       ds_reset_or_clear(tracer->ds.context, ds_pebs, /* clear = */ 1);
+
+       return 0;
 }
 
-int ds_get_pebs_reset(struct task_struct *task, u64 *value)
+int ds_get_pebs_reset(struct pebs_tracer *tracer, u64 *value)
 {
-       struct ds_context *context;
-       int error;
+       if (!tracer)
+               return -EINVAL;
 
        if (!value)
                return -EINVAL;
 
-       context = ds_get_context(task);
-       error = ds_validate_access(context, ds_pebs);
-       if (error < 0)
-               goto out;
+       *value = *(u64 *)(tracer->ds.context->ds + (ds_cfg.sizeof_field * 8));
 
-       *value = *(u64 *)(context->ds + (ds_cfg.sizeof_field * 8));
-
-       error = 0;
- out:
-       ds_put_context(context);
-       return error;
+       return 0;
 }
 
-int ds_set_pebs_reset(struct task_struct *task, u64 value)
+int ds_set_pebs_reset(struct pebs_tracer *tracer, u64 value)
 {
-       struct ds_context *context;
-       int error;
-
-       context = ds_get_context(task);
-       error = ds_validate_access(context, ds_pebs);
-       if (error < 0)
-               goto out;
+       if (!tracer)
+               return -EINVAL;
 
-       *(u64 *)(context->ds + (ds_cfg.sizeof_field * 8)) = value;
+       *(u64 *)(tracer->ds.context->ds + (ds_cfg.sizeof_field * 8)) = value;
 
-       error = 0;
- out:
-       ds_put_context(context);
-       return error;
+       return 0;
 }
 
 static const struct ds_configuration ds_cfg_var = {
@@ -840,6 +771,10 @@ static inline void
 ds_configure(const struct ds_configuration *cfg)
 {
        ds_cfg = *cfg;
+
+       printk(KERN_INFO "DS available\n");
+
+       BUG_ON(MAX_SIZEOF_DS < ds_cfg.sizeof_ds);
 }
 
 void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
@@ -847,17 +782,16 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
        switch (c->x86) {
        case 0x6:
                switch (c->x86_model) {
+               case 0 ... 0xC:
+                       /* sorry, don't know about them */
+                       break;
                case 0xD:
                case 0xE: /* Pentium M */
                        ds_configure(&ds_cfg_var);
                        break;
-               case 0xF: /* Core2 */
-               case 0x1C: /* Atom */
+               default: /* Core2, Atom, ... */
                        ds_configure(&ds_cfg_64);
                        break;
-               default:
-                       /* sorry, don't know about them */
-                       break;
                }
                break;
        case 0xF:
@@ -884,6 +818,8 @@ void ds_free(struct ds_context *context)
         * is dying. There should not be any user of that context left
         * to disturb us, anymore. */
        unsigned long leftovers = context->count;
-       while (leftovers--)
+       while (leftovers--) {
+               put_tracer(context->task);
                ds_put_context(context);
+       }
 }
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
new file mode 100644 (file)
index 0000000..5962176
--- /dev/null
@@ -0,0 +1,319 @@
+/*
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
+ */
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <linux/uaccess.h>
+#include <linux/utsname.h>
+#include <linux/hardirq.h>
+#include <linux/kdebug.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/kexec.h>
+#include <linux/bug.h>
+#include <linux/nmi.h>
+#include <linux/sysfs.h>
+
+#include <asm/stacktrace.h>
+
+#include "dumpstack.h"
+
+int panic_on_unrecovered_nmi;
+unsigned int code_bytes = 64;
+int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
+static int die_counter;
+
+void printk_address(unsigned long address, int reliable)
+{
+       printk(" [<%p>] %s%pS\n", (void *) address,
+                       reliable ? "" : "? ", (void *) address);
+}
+
+/*
+ * x86-64 can have up to three kernel stacks:
+ * process stack
+ * interrupt stack
+ * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
+ */
+
+static inline int valid_stack_ptr(struct thread_info *tinfo,
+                       void *p, unsigned int size, void *end)
+{
+       void *t = tinfo;
+       if (end) {
+               if (p < end && p >= (end-THREAD_SIZE))
+                       return 1;
+               else
+                       return 0;
+       }
+       return p > t && p < t + THREAD_SIZE - size;
+}
+
+unsigned long
+print_context_stack(struct thread_info *tinfo,
+               unsigned long *stack, unsigned long bp,
+               const struct stacktrace_ops *ops, void *data,
+               unsigned long *end)
+{
+       struct stack_frame *frame = (struct stack_frame *)bp;
+
+       while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
+               unsigned long addr;
+
+               addr = *stack;
+               if (__kernel_text_address(addr)) {
+                       if ((unsigned long) stack == bp + sizeof(long)) {
+                               ops->address(data, addr, 1);
+                               frame = frame->next_frame;
+                               bp = (unsigned long) frame;
+                       } else {
+                               ops->address(data, addr, bp == 0);
+                       }
+               }
+               stack++;
+       }
+       return bp;
+}
+
+
+static void
+print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
+{
+       printk(data);
+       print_symbol(msg, symbol);
+       printk("\n");
+}
+
+static void print_trace_warning(void *data, char *msg)
+{
+       printk("%s%s\n", (char *)data, msg);
+}
+
+static int print_trace_stack(void *data, char *name)
+{
+       printk("%s <%s> ", (char *)data, name);
+       return 0;
+}
+
+/*
+ * Print one address/symbol entries per line.
+ */
+static void print_trace_address(void *data, unsigned long addr, int reliable)
+{
+       touch_nmi_watchdog();
+       printk(data);
+       printk_address(addr, reliable);
+}
+
+static const struct stacktrace_ops print_trace_ops = {
+       .warning = print_trace_warning,
+       .warning_symbol = print_trace_warning_symbol,
+       .stack = print_trace_stack,
+       .address = print_trace_address,
+};
+
+void
+show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
+               unsigned long *stack, unsigned long bp, char *log_lvl)
+{
+       printk("%sCall Trace:\n", log_lvl);
+       dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
+}
+
+void show_trace(struct task_struct *task, struct pt_regs *regs,
+               unsigned long *stack, unsigned long bp)
+{
+       show_trace_log_lvl(task, regs, stack, bp, "");
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+       show_stack_log_lvl(task, NULL, sp, 0, "");
+}
+
+/*
+ * The architecture-independent dump_stack generator
+ */
+void dump_stack(void)
+{
+       unsigned long bp = 0;
+       unsigned long stack;
+
+#ifdef CONFIG_FRAME_POINTER
+       if (!bp)
+               get_bp(bp);
+#endif
+
+       printk("Pid: %d, comm: %.20s %s %s %.*s\n",
+               current->pid, current->comm, print_tainted(),
+               init_utsname()->release,
+               (int)strcspn(init_utsname()->version, " "),
+               init_utsname()->version);
+       show_trace(NULL, NULL, &stack, bp);
+}
+EXPORT_SYMBOL(dump_stack);
+
+static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static int die_owner = -1;
+static unsigned int die_nest_count;
+
+unsigned __kprobes long oops_begin(void)
+{
+       int cpu;
+       unsigned long flags;
+
+       oops_enter();
+
+       /* racy, but better than risking deadlock. */
+       raw_local_irq_save(flags);
+       cpu = smp_processor_id();
+       if (!__raw_spin_trylock(&die_lock)) {
+               if (cpu == die_owner)
+                       /* nested oops. should stop eventually */;
+               else
+                       __raw_spin_lock(&die_lock);
+       }
+       die_nest_count++;
+       die_owner = cpu;
+       console_verbose();
+       bust_spinlocks(1);
+       return flags;
+}
+
+void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+{
+       if (regs && kexec_should_crash(current))
+               crash_kexec(regs);
+
+       bust_spinlocks(0);
+       die_owner = -1;
+       add_taint(TAINT_DIE);
+       die_nest_count--;
+       if (!die_nest_count)
+               /* Nest count reaches zero, release the lock. */
+               __raw_spin_unlock(&die_lock);
+       raw_local_irq_restore(flags);
+       oops_exit();
+
+       if (!signr)
+               return;
+       if (in_interrupt())
+               panic("Fatal exception in interrupt");
+       if (panic_on_oops)
+               panic("Fatal exception");
+       do_exit(signr);
+}
+
+int __kprobes __die(const char *str, struct pt_regs *regs, long err)
+{
+#ifdef CONFIG_X86_32
+       unsigned short ss;
+       unsigned long sp;
+#endif
+       printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
+#ifdef CONFIG_PREEMPT
+       printk("PREEMPT ");
+#endif
+#ifdef CONFIG_SMP
+       printk("SMP ");
+#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       printk("DEBUG_PAGEALLOC");
+#endif
+       printk("\n");
+       sysfs_printk_last_file();
+       if (notify_die(DIE_OOPS, str, regs, err,
+                       current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
+               return 1;
+
+       show_registers(regs);
+#ifdef CONFIG_X86_32
+       sp = (unsigned long) (&regs->sp);
+       savesegment(ss, ss);
+       if (user_mode(regs)) {
+               sp = regs->sp;
+               ss = regs->ss & 0xffff;
+       }
+       printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
+       print_symbol("%s", regs->ip);
+       printk(" SS:ESP %04x:%08lx\n", ss, sp);
+#else
+       /* Executive summary in case the oops scrolled away */
+       printk(KERN_ALERT "RIP ");
+       printk_address(regs->ip, 1);
+       printk(" RSP <%016lx>\n", regs->sp);
+#endif
+       return 0;
+}
+
+/*
+ * This is gone through when something in the kernel has done something bad
+ * and is about to be terminated:
+ */
+void die(const char *str, struct pt_regs *regs, long err)
+{
+       unsigned long flags = oops_begin();
+       int sig = SIGSEGV;
+
+       if (!user_mode_vm(regs))
+               report_bug(regs->ip, regs);
+
+       if (__die(str, regs, err))
+               sig = 0;
+       oops_end(flags, regs, sig);
+}
+
+void notrace __kprobes
+die_nmi(char *str, struct pt_regs *regs, int do_panic)
+{
+       unsigned long flags;
+
+       if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
+               return;
+
+       /*
+        * We are in trouble anyway, lets at least try
+        * to get a message out.
+        */
+       flags = oops_begin();
+       printk(KERN_EMERG "%s", str);
+       printk(" on CPU%d, ip %08lx, registers:\n",
+               smp_processor_id(), regs->ip);
+       show_registers(regs);
+       oops_end(flags, regs, 0);
+       if (do_panic || panic_on_oops)
+               panic("Non maskable interrupt");
+       nmi_exit();
+       local_irq_enable();
+       do_exit(SIGBUS);
+}
+
+static int __init oops_setup(char *s)
+{
+       if (!s)
+               return -EINVAL;
+       if (!strcmp(s, "panic"))
+               panic_on_oops = 1;
+       return 0;
+}
+early_param("oops", oops_setup);
+
+static int __init kstack_setup(char *s)
+{
+       if (!s)
+               return -EINVAL;
+       kstack_depth_to_print = simple_strtoul(s, NULL, 0);
+       return 0;
+}
+early_param("kstack", kstack_setup);
+
+static int __init code_bytes_setup(char *s)
+{
+       code_bytes = simple_strtoul(s, NULL, 0);
+       if (code_bytes > 8192)
+               code_bytes = 8192;
+
+       return 1;
+}
+__setup("code_bytes=", code_bytes_setup);
diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
new file mode 100644 (file)
index 0000000..3119a80
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
+ */
+
+#ifndef DUMPSTACK_H
+#define DUMPSTACK_H
+
+#ifdef CONFIG_X86_32
+#define STACKSLOTS_PER_LINE 8
+#define get_bp(bp) asm("movl %%ebp, %0" : "=r" (bp) :)
+#else
+#define STACKSLOTS_PER_LINE 4
+#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
+#endif
+
+extern unsigned long
+print_context_stack(struct thread_info *tinfo,
+               unsigned long *stack, unsigned long bp,
+               const struct stacktrace_ops *ops, void *data,
+               unsigned long *end);
+
+extern void
+show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
+               unsigned long *stack, unsigned long bp, char *log_lvl);
+
+extern void
+show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
+               unsigned long *sp, unsigned long bp, char *log_lvl);
+
+extern unsigned int code_bytes;
+extern int kstack_depth_to_print;
+
+/* The form of the top of the frame on the stack */
+struct stack_frame {
+       struct stack_frame *next_frame;
+       unsigned long return_address;
+};
+#endif
index b3614752197b6c6e3c0cf53c7b718dd0167fbdea..7b031b106ec8f65cb40ee4042eb8341c356e4a09 100644 (file)
 
 #include <asm/stacktrace.h>
 
-#define STACKSLOTS_PER_LINE 8
-#define get_bp(bp) asm("movl %%ebp, %0" : "=r" (bp) :)
-
-int panic_on_unrecovered_nmi;
-int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
-static unsigned int code_bytes = 64;
-static int die_counter;
-
-void printk_address(unsigned long address, int reliable)
-{
-       printk(" [<%p>] %s%pS\n", (void *) address,
-                       reliable ? "" : "? ", (void *) address);
-}
-
-static inline int valid_stack_ptr(struct thread_info *tinfo,
-                       void *p, unsigned int size, void *end)
-{
-       void *t = tinfo;
-       if (end) {
-               if (p < end && p >= (end-THREAD_SIZE))
-                       return 1;
-               else
-                       return 0;
-       }
-       return p > t && p < t + THREAD_SIZE - size;
-}
-
-/* The form of the top of the frame on the stack */
-struct stack_frame {
-       struct stack_frame *next_frame;
-       unsigned long return_address;
-};
-
-static inline unsigned long
-print_context_stack(struct thread_info *tinfo,
-               unsigned long *stack, unsigned long bp,
-               const struct stacktrace_ops *ops, void *data,
-               unsigned long *end)
-{
-       struct stack_frame *frame = (struct stack_frame *)bp;
-
-       while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
-               unsigned long addr;
-
-               addr = *stack;
-               if (__kernel_text_address(addr)) {
-                       if ((unsigned long) stack == bp + sizeof(long)) {
-                               ops->address(data, addr, 1);
-                               frame = frame->next_frame;
-                               bp = (unsigned long) frame;
-                       } else {
-                               ops->address(data, addr, bp == 0);
-                       }
-               }
-               stack++;
-       }
-       return bp;
-}
+#include "dumpstack.h"
 
 void dump_trace(struct task_struct *task, struct pt_regs *regs,
                unsigned long *stack, unsigned long bp,
@@ -119,57 +62,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
 }
 EXPORT_SYMBOL(dump_trace);
 
-static void
-print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
-{
-       printk(data);
-       print_symbol(msg, symbol);
-       printk("\n");
-}
-
-static void print_trace_warning(void *data, char *msg)
-{
-       printk("%s%s\n", (char *)data, msg);
-}
-
-static int print_trace_stack(void *data, char *name)
-{
-       printk("%s <%s> ", (char *)data, name);
-       return 0;
-}
-
-/*
- * Print one address/symbol entries per line.
- */
-static void print_trace_address(void *data, unsigned long addr, int reliable)
-{
-       touch_nmi_watchdog();
-       printk(data);
-       printk_address(addr, reliable);
-}
-
-static const struct stacktrace_ops print_trace_ops = {
-       .warning = print_trace_warning,
-       .warning_symbol = print_trace_warning_symbol,
-       .stack = print_trace_stack,
-       .address = print_trace_address,
-};
-
-static void
-show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
-               unsigned long *stack, unsigned long bp, char *log_lvl)
-{
-       printk("%sCall Trace:\n", log_lvl);
-       dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
-}
-
-void show_trace(struct task_struct *task, struct pt_regs *regs,
-               unsigned long *stack, unsigned long bp)
-{
-       show_trace_log_lvl(task, regs, stack, bp, "");
-}
-
-static void
+void
 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
                unsigned long *sp, unsigned long bp, char *log_lvl)
 {
@@ -196,33 +89,6 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
        show_trace_log_lvl(task, regs, sp, bp, log_lvl);
 }
 
-void show_stack(struct task_struct *task, unsigned long *sp)
-{
-       show_stack_log_lvl(task, NULL, sp, 0, "");
-}
-
-/*
- * The architecture-independent dump_stack generator
- */
-void dump_stack(void)
-{
-       unsigned long bp = 0;
-       unsigned long stack;
-
-#ifdef CONFIG_FRAME_POINTER
-       if (!bp)
-               get_bp(bp);
-#endif
-
-       printk("Pid: %d, comm: %.20s %s %s %.*s\n",
-               current->pid, current->comm, print_tainted(),
-               init_utsname()->release,
-               (int)strcspn(init_utsname()->version, " "),
-               init_utsname()->version);
-       show_trace(NULL, NULL, &stack, bp);
-}
-
-EXPORT_SYMBOL(dump_stack);
 
 void show_registers(struct pt_regs *regs)
 {
@@ -283,167 +149,3 @@ int is_valid_bugaddr(unsigned long ip)
        return ud2 == 0x0b0f;
 }
 
-static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
-static int die_owner = -1;
-static unsigned int die_nest_count;
-
-unsigned __kprobes long oops_begin(void)
-{
-       unsigned long flags;
-
-       oops_enter();
-
-       if (die_owner != raw_smp_processor_id()) {
-               console_verbose();
-               raw_local_irq_save(flags);
-               __raw_spin_lock(&die_lock);
-               die_owner = smp_processor_id();
-               die_nest_count = 0;
-               bust_spinlocks(1);
-       } else {
-               raw_local_irq_save(flags);
-       }
-       die_nest_count++;
-       return flags;
-}
-
-void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
-{
-       bust_spinlocks(0);
-       die_owner = -1;
-       add_taint(TAINT_DIE);
-       __raw_spin_unlock(&die_lock);
-       raw_local_irq_restore(flags);
-
-       if (!regs)
-               return;
-
-       if (kexec_should_crash(current))
-               crash_kexec(regs);
-       if (in_interrupt())
-               panic("Fatal exception in interrupt");
-       if (panic_on_oops)
-               panic("Fatal exception");
-       oops_exit();
-       do_exit(signr);
-}
-
-int __kprobes __die(const char *str, struct pt_regs *regs, long err)
-{
-       unsigned short ss;
-       unsigned long sp;
-
-       printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
-#ifdef CONFIG_PREEMPT
-       printk("PREEMPT ");
-#endif
-#ifdef CONFIG_SMP
-       printk("SMP ");
-#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       printk("DEBUG_PAGEALLOC");
-#endif
-       printk("\n");
-       sysfs_printk_last_file();
-       if (notify_die(DIE_OOPS, str, regs, err,
-                       current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
-               return 1;
-
-       show_registers(regs);
-       /* Executive summary in case the oops scrolled away */
-       sp = (unsigned long) (&regs->sp);
-       savesegment(ss, ss);
-       if (user_mode(regs)) {
-               sp = regs->sp;
-               ss = regs->ss & 0xffff;
-       }
-       printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
-       print_symbol("%s", regs->ip);
-       printk(" SS:ESP %04x:%08lx\n", ss, sp);
-       return 0;
-}
-
-/*
- * This is gone through when something in the kernel has done something bad
- * and is about to be terminated:
- */
-void die(const char *str, struct pt_regs *regs, long err)
-{
-       unsigned long flags = oops_begin();
-
-       if (die_nest_count < 3) {
-               report_bug(regs->ip, regs);
-
-               if (__die(str, regs, err))
-                       regs = NULL;
-       } else {
-               printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
-       }
-
-       oops_end(flags, regs, SIGSEGV);
-}
-
-static DEFINE_SPINLOCK(nmi_print_lock);
-
-void notrace __kprobes
-die_nmi(char *str, struct pt_regs *regs, int do_panic)
-{
-       if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
-               return;
-
-       spin_lock(&nmi_print_lock);
-       /*
-       * We are in trouble anyway, lets at least try
-       * to get a message out:
-       */
-       bust_spinlocks(1);
-       printk(KERN_EMERG "%s", str);
-       printk(" on CPU%d, ip %08lx, registers:\n",
-               smp_processor_id(), regs->ip);
-       show_registers(regs);
-       if (do_panic)
-               panic("Non maskable interrupt");
-       console_silent();
-       spin_unlock(&nmi_print_lock);
-
-       /*
-        * If we are in kernel we are probably nested up pretty bad
-        * and might aswell get out now while we still can:
-        */
-       if (!user_mode_vm(regs)) {
-               current->thread.trap_no = 2;
-               crash_kexec(regs);
-       }
-
-       bust_spinlocks(0);
-       do_exit(SIGSEGV);
-}
-
-static int __init oops_setup(char *s)
-{
-       if (!s)
-               return -EINVAL;
-       if (!strcmp(s, "panic"))
-               panic_on_oops = 1;
-       return 0;
-}
-early_param("oops", oops_setup);
-
-static int __init kstack_setup(char *s)
-{
-       if (!s)
-               return -EINVAL;
-       kstack_depth_to_print = simple_strtoul(s, NULL, 0);
-       return 0;
-}
-early_param("kstack", kstack_setup);
-
-static int __init code_bytes_setup(char *s)
-{
-       code_bytes = simple_strtoul(s, NULL, 0);
-       if (code_bytes > 8192)
-               code_bytes = 8192;
-
-       return 1;
-}
-__setup("code_bytes=", code_bytes_setup);
index 96a5db7da8a747e5192f410c60fe9494e8d8e8d1..33ff10287a5d7b29162068c4d4f6d2eb141724db 100644 (file)
 
 #include <asm/stacktrace.h>
 
-#define STACKSLOTS_PER_LINE 4
-#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
-
-int panic_on_unrecovered_nmi;
-int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
-static unsigned int code_bytes = 64;
-static int die_counter;
-
-void printk_address(unsigned long address, int reliable)
-{
-       printk(" [<%p>] %s%pS\n", (void *) address,
-                       reliable ? "" : "? ", (void *) address);
-}
+#include "dumpstack.h"
 
 static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
                                        unsigned *usedp, char **idp)
@@ -113,51 +101,6 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
  * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
  */
 
-static inline int valid_stack_ptr(struct thread_info *tinfo,
-                       void *p, unsigned int size, void *end)
-{
-       void *t = tinfo;
-       if (end) {
-               if (p < end && p >= (end-THREAD_SIZE))
-                       return 1;
-               else
-                       return 0;
-       }
-       return p > t && p < t + THREAD_SIZE - size;
-}
-
-/* The form of the top of the frame on the stack */
-struct stack_frame {
-       struct stack_frame *next_frame;
-       unsigned long return_address;
-};
-
-static inline unsigned long
-print_context_stack(struct thread_info *tinfo,
-               unsigned long *stack, unsigned long bp,
-               const struct stacktrace_ops *ops, void *data,
-               unsigned long *end)
-{
-       struct stack_frame *frame = (struct stack_frame *)bp;
-
-       while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
-               unsigned long addr;
-
-               addr = *stack;
-               if (__kernel_text_address(addr)) {
-                       if ((unsigned long) stack == bp + sizeof(long)) {
-                               ops->address(data, addr, 1);
-                               frame = frame->next_frame;
-                               bp = (unsigned long) frame;
-                       } else {
-                               ops->address(data, addr, bp == 0);
-                       }
-               }
-               stack++;
-       }
-       return bp;
-}
-
 void dump_trace(struct task_struct *task, struct pt_regs *regs,
                unsigned long *stack, unsigned long bp,
                const struct stacktrace_ops *ops, void *data)
@@ -248,57 +191,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
 }
 EXPORT_SYMBOL(dump_trace);
 
-static void
-print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
-{
-       printk(data);
-       print_symbol(msg, symbol);
-       printk("\n");
-}
-
-static void print_trace_warning(void *data, char *msg)
-{
-       printk("%s%s\n", (char *)data, msg);
-}
-
-static int print_trace_stack(void *data, char *name)
-{
-       printk("%s <%s> ", (char *)data, name);
-       return 0;
-}
-
-/*
- * Print one address/symbol entries per line.
- */
-static void print_trace_address(void *data, unsigned long addr, int reliable)
-{
-       touch_nmi_watchdog();
-       printk(data);
-       printk_address(addr, reliable);
-}
-
-static const struct stacktrace_ops print_trace_ops = {
-       .warning = print_trace_warning,
-       .warning_symbol = print_trace_warning_symbol,
-       .stack = print_trace_stack,
-       .address = print_trace_address,
-};
-
-static void
-show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
-               unsigned long *stack, unsigned long bp, char *log_lvl)
-{
-       printk("%sCall Trace:\n", log_lvl);
-       dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
-}
-
-void show_trace(struct task_struct *task, struct pt_regs *regs,
-               unsigned long *stack, unsigned long bp)
-{
-       show_trace_log_lvl(task, regs, stack, bp, "");
-}
-
-static void
+void
 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
                unsigned long *sp, unsigned long bp, char *log_lvl)
 {
@@ -342,33 +235,6 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
        show_trace_log_lvl(task, regs, sp, bp, log_lvl);
 }
 
-void show_stack(struct task_struct *task, unsigned long *sp)
-{
-       show_stack_log_lvl(task, NULL, sp, 0, "");
-}
-
-/*
- * The architecture-independent dump_stack generator
- */
-void dump_stack(void)
-{
-       unsigned long bp = 0;
-       unsigned long stack;
-
-#ifdef CONFIG_FRAME_POINTER
-       if (!bp)
-               get_bp(bp);
-#endif
-
-       printk("Pid: %d, comm: %.20s %s %s %.*s\n",
-               current->pid, current->comm, print_tainted(),
-               init_utsname()->release,
-               (int)strcspn(init_utsname()->version, " "),
-               init_utsname()->version);
-       show_trace(NULL, NULL, &stack, bp);
-}
-EXPORT_SYMBOL(dump_stack);
-
 void show_registers(struct pt_regs *regs)
 {
        int i;
@@ -429,147 +295,3 @@ int is_valid_bugaddr(unsigned long ip)
        return ud2 == 0x0b0f;
 }
 
-static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
-static int die_owner = -1;
-static unsigned int die_nest_count;
-
-unsigned __kprobes long oops_begin(void)
-{
-       int cpu;
-       unsigned long flags;
-
-       oops_enter();
-
-       /* racy, but better than risking deadlock. */
-       raw_local_irq_save(flags);
-       cpu = smp_processor_id();
-       if (!__raw_spin_trylock(&die_lock)) {
-               if (cpu == die_owner)
-                       /* nested oops. should stop eventually */;
-               else
-                       __raw_spin_lock(&die_lock);
-       }
-       die_nest_count++;
-       die_owner = cpu;
-       console_verbose();
-       bust_spinlocks(1);
-       return flags;
-}
-
-void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
-{
-       die_owner = -1;
-       bust_spinlocks(0);
-       die_nest_count--;
-       if (!die_nest_count)
-               /* Nest count reaches zero, release the lock. */
-               __raw_spin_unlock(&die_lock);
-       raw_local_irq_restore(flags);
-       if (!regs) {
-               oops_exit();
-               return;
-       }
-       if (in_interrupt())
-               panic("Fatal exception in interrupt");
-       if (panic_on_oops)
-               panic("Fatal exception");
-       oops_exit();
-       do_exit(signr);
-}
-
-int __kprobes __die(const char *str, struct pt_regs *regs, long err)
-{
-       printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
-#ifdef CONFIG_PREEMPT
-       printk("PREEMPT ");
-#endif
-#ifdef CONFIG_SMP
-       printk("SMP ");
-#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       printk("DEBUG_PAGEALLOC");
-#endif
-       printk("\n");
-       sysfs_printk_last_file();
-       if (notify_die(DIE_OOPS, str, regs, err,
-                       current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
-               return 1;
-
-       show_registers(regs);
-       add_taint(TAINT_DIE);
-       /* Executive summary in case the oops scrolled away */
-       printk(KERN_ALERT "RIP ");
-       printk_address(regs->ip, 1);
-       printk(" RSP <%016lx>\n", regs->sp);
-       if (kexec_should_crash(current))
-               crash_kexec(regs);
-       return 0;
-}
-
-void die(const char *str, struct pt_regs *regs, long err)
-{
-       unsigned long flags = oops_begin();
-
-       if (!user_mode(regs))
-               report_bug(regs->ip, regs);
-
-       if (__die(str, regs, err))
-               regs = NULL;
-       oops_end(flags, regs, SIGSEGV);
-}
-
-notrace __kprobes void
-die_nmi(char *str, struct pt_regs *regs, int do_panic)
-{
-       unsigned long flags;
-
-       if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
-               return;
-
-       flags = oops_begin();
-       /*
-        * We are in trouble anyway, lets at least try
-        * to get a message out.
-        */
-       printk(KERN_EMERG "%s", str);
-       printk(" on CPU%d, ip %08lx, registers:\n",
-               smp_processor_id(), regs->ip);
-       show_registers(regs);
-       if (kexec_should_crash(current))
-               crash_kexec(regs);
-       if (do_panic || panic_on_oops)
-               panic("Non maskable interrupt");
-       oops_end(flags, NULL, SIGBUS);
-       nmi_exit();
-       local_irq_enable();
-       do_exit(SIGBUS);
-}
-
-static int __init oops_setup(char *s)
-{
-       if (!s)
-               return -EINVAL;
-       if (!strcmp(s, "panic"))
-               panic_on_oops = 1;
-       return 0;
-}
-early_param("oops", oops_setup);
-
-static int __init kstack_setup(char *s)
-{
-       if (!s)
-               return -EINVAL;
-       kstack_depth_to_print = simple_strtoul(s, NULL, 0);
-       return 0;
-}
-early_param("kstack", kstack_setup);
-
-static int __init code_bytes_setup(char *s)
-{
-       code_bytes = simple_strtoul(s, NULL, 0);
-       if (code_bytes > 8192)
-               code_bytes = 8192;
-
-       return 1;
-}
-__setup("code_bytes=", code_bytes_setup);
index 28b597ef9ca16b7992c333f10eae252ea695475c..958af86186c4bbdb22242fc12181bab9ee5dbaf3 100644 (file)
@@ -1157,6 +1157,9 @@ ENTRY(mcount)
 END(mcount)
 
 ENTRY(ftrace_caller)
+       cmpl $0, function_trace_stop
+       jne  ftrace_stub
+
        pushl %eax
        pushl %ecx
        pushl %edx
@@ -1171,6 +1174,11 @@ ftrace_call:
        popl %edx
        popl %ecx
        popl %eax
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+.globl ftrace_graph_call
+ftrace_graph_call:
+       jmp ftrace_stub
+#endif
 
 .globl ftrace_stub
 ftrace_stub:
@@ -1180,8 +1188,15 @@ END(ftrace_caller)
 #else /* ! CONFIG_DYNAMIC_FTRACE */
 
 ENTRY(mcount)
+       cmpl $0, function_trace_stop
+       jne  ftrace_stub
+
        cmpl $ftrace_stub, ftrace_trace_function
        jnz trace
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       cmpl $ftrace_stub, ftrace_graph_return
+       jnz ftrace_graph_caller
+#endif
 .globl ftrace_stub
 ftrace_stub:
        ret
@@ -1200,12 +1215,42 @@ trace:
        popl %edx
        popl %ecx
        popl %eax
-
        jmp ftrace_stub
 END(mcount)
 #endif /* CONFIG_DYNAMIC_FTRACE */
 #endif /* CONFIG_FUNCTION_TRACER */
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+       cmpl $0, function_trace_stop
+       jne ftrace_stub
+
+       pushl %eax
+       pushl %ecx
+       pushl %edx
+       movl 0xc(%esp), %edx
+       lea 0x4(%ebp), %eax
+       call prepare_ftrace_return
+       popl %edx
+       popl %ecx
+       popl %eax
+       ret
+END(ftrace_graph_caller)
+
+.globl return_to_handler
+return_to_handler:
+       pushl $0
+       pushl %eax
+       pushl %ecx
+       pushl %edx
+       call ftrace_return_to_handler
+       movl %eax, 0xc(%esp)
+       popl %edx
+       popl %ecx
+       popl %eax
+       ret
+#endif
+
 .section .rodata,"a"
 #include "syscall_table_32.S"
 
index b86f332c96a66596f0fe076d7c0eda78ea05dbe5..2aa0526ac30e9f8c7936c31f62959aedc39b01ff 100644 (file)
@@ -68,6 +68,8 @@ ENTRY(mcount)
 END(mcount)
 
 ENTRY(ftrace_caller)
+       cmpl $0, function_trace_stop
+       jne  ftrace_stub
 
        /* taken from glibc */
        subq $0x38, %rsp
@@ -96,6 +98,12 @@ ftrace_call:
        movq (%rsp), %rax
        addq $0x38, %rsp
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+.globl ftrace_graph_call
+ftrace_graph_call:
+       jmp ftrace_stub
+#endif
+
 .globl ftrace_stub
 ftrace_stub:
        retq
@@ -103,8 +111,17 @@ END(ftrace_caller)
 
 #else /* ! CONFIG_DYNAMIC_FTRACE */
 ENTRY(mcount)
+       cmpl $0, function_trace_stop
+       jne  ftrace_stub
+
        cmpq $ftrace_stub, ftrace_trace_function
        jnz trace
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       cmpq $ftrace_stub, ftrace_graph_return
+       jnz ftrace_graph_caller
+#endif
+
 .globl ftrace_stub
 ftrace_stub:
        retq
@@ -140,6 +157,68 @@ END(mcount)
 #endif /* CONFIG_DYNAMIC_FTRACE */
 #endif /* CONFIG_FUNCTION_TRACER */
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+       cmpl $0, function_trace_stop
+       jne ftrace_stub
+
+       subq $0x38, %rsp
+       movq %rax, (%rsp)
+       movq %rcx, 8(%rsp)
+       movq %rdx, 16(%rsp)
+       movq %rsi, 24(%rsp)
+       movq %rdi, 32(%rsp)
+       movq %r8, 40(%rsp)
+       movq %r9, 48(%rsp)
+
+       leaq 8(%rbp), %rdi
+       movq 0x38(%rsp), %rsi
+
+       call    prepare_ftrace_return
+
+       movq 48(%rsp), %r9
+       movq 40(%rsp), %r8
+       movq 32(%rsp), %rdi
+       movq 24(%rsp), %rsi
+       movq 16(%rsp), %rdx
+       movq 8(%rsp), %rcx
+       movq (%rsp), %rax
+       addq $0x38, %rsp
+       retq
+END(ftrace_graph_caller)
+
+
+.globl return_to_handler
+return_to_handler:
+       subq  $80, %rsp
+
+       movq %rax, (%rsp)
+       movq %rcx, 8(%rsp)
+       movq %rdx, 16(%rsp)
+       movq %rsi, 24(%rsp)
+       movq %rdi, 32(%rsp)
+       movq %r8, 40(%rsp)
+       movq %r9, 48(%rsp)
+       movq %r10, 56(%rsp)
+       movq %r11, 64(%rsp)
+
+       call ftrace_return_to_handler
+
+       movq %rax, 72(%rsp)
+       movq 64(%rsp), %r11
+       movq 56(%rsp), %r10
+       movq 48(%rsp), %r9
+       movq 40(%rsp), %r8
+       movq 32(%rsp), %rdi
+       movq 24(%rsp), %rsi
+       movq 16(%rsp), %rdx
+       movq 8(%rsp), %rcx
+       movq (%rsp), %rax
+       addq $72, %rsp
+       retq
+#endif
+
+
 #ifndef CONFIG_PREEMPT
 #define retint_kernel retint_restore_args
 #endif 
index 50ea0ac8c9bf2c27a53323b93b5d473bd7e1d028..58832478b94e53af0437ef527f4f22ea9d54ea40 100644 (file)
 #include <linux/uaccess.h>
 #include <linux/ftrace.h>
 #include <linux/percpu.h>
+#include <linux/sched.h>
 #include <linux/init.h>
 #include <linux/list.h>
 
 #include <asm/ftrace.h>
+#include <linux/ftrace.h>
 #include <asm/nops.h>
+#include <asm/nmi.h>
 
 
-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
+#ifdef CONFIG_DYNAMIC_FTRACE
 
 union ftrace_code_union {
        char code[MCOUNT_INSN_SIZE];
@@ -31,18 +34,12 @@ union ftrace_code_union {
        } __attribute__((packed));
 };
 
-
 static int ftrace_calc_offset(long ip, long addr)
 {
        return (int)(addr - ip);
 }
 
-unsigned char *ftrace_nop_replace(void)
-{
-       return ftrace_nop;
-}
-
-unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
 {
        static union ftrace_code_union calc;
 
@@ -56,7 +53,142 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
        return calc.code;
 }
 
-int
+/*
+ * Modifying code must take extra care. On an SMP machine, if
+ * the code being modified is also being executed on another CPU
+ * that CPU will have undefined results and possibly take a GPF.
+ * We use kstop_machine to stop other CPUS from exectuing code.
+ * But this does not stop NMIs from happening. We still need
+ * to protect against that. We separate out the modification of
+ * the code to take care of this.
+ *
+ * Two buffers are added: An IP buffer and a "code" buffer.
+ *
+ * 1) Put the instruction pointer into the IP buffer
+ *    and the new code into the "code" buffer.
+ * 2) Set a flag that says we are modifying code
+ * 3) Wait for any running NMIs to finish.
+ * 4) Write the code
+ * 5) clear the flag.
+ * 6) Wait for any running NMIs to finish.
+ *
+ * If an NMI is executed, the first thing it does is to call
+ * "ftrace_nmi_enter". This will check if the flag is set to write
+ * and if it is, it will write what is in the IP and "code" buffers.
+ *
+ * The trick is, it does not matter if everyone is writing the same
+ * content to the code location. Also, if a CPU is executing code
+ * it is OK to write to that code location if the contents being written
+ * are the same as what exists.
+ */
+
+static atomic_t in_nmi = ATOMIC_INIT(0);
+static int mod_code_status;            /* holds return value of text write */
+static int mod_code_write;             /* set when NMI should do the write */
+static void *mod_code_ip;              /* holds the IP to write to */
+static void *mod_code_newcode;         /* holds the text to write to the IP */
+
+static unsigned nmi_wait_count;
+static atomic_t nmi_update_count = ATOMIC_INIT(0);
+
+int ftrace_arch_read_dyn_info(char *buf, int size)
+{
+       int r;
+
+       r = snprintf(buf, size, "%u %u",
+                    nmi_wait_count,
+                    atomic_read(&nmi_update_count));
+       return r;
+}
+
+static void ftrace_mod_code(void)
+{
+       /*
+        * Yes, more than one CPU process can be writing to mod_code_status.
+        *    (and the code itself)
+        * But if one were to fail, then they all should, and if one were
+        * to succeed, then they all should.
+        */
+       mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
+                                            MCOUNT_INSN_SIZE);
+}
+
+void ftrace_nmi_enter(void)
+{
+       atomic_inc(&in_nmi);
+       /* Must have in_nmi seen before reading write flag */
+       smp_mb();
+       if (mod_code_write) {
+               ftrace_mod_code();
+               atomic_inc(&nmi_update_count);
+       }
+}
+
+void ftrace_nmi_exit(void)
+{
+       /* Finish all executions before clearing in_nmi */
+       smp_wmb();
+       atomic_dec(&in_nmi);
+}
+
+static void wait_for_nmi(void)
+{
+       int waited = 0;
+
+       while (atomic_read(&in_nmi)) {
+               waited = 1;
+               cpu_relax();
+       }
+
+       if (waited)
+               nmi_wait_count++;
+}
+
+static int
+do_ftrace_mod_code(unsigned long ip, void *new_code)
+{
+       mod_code_ip = (void *)ip;
+       mod_code_newcode = new_code;
+
+       /* The buffers need to be visible before we let NMIs write them */
+       smp_wmb();
+
+       mod_code_write = 1;
+
+       /* Make sure write bit is visible before we wait on NMIs */
+       smp_mb();
+
+       wait_for_nmi();
+
+       /* Make sure all running NMIs have finished before we write the code */
+       smp_mb();
+
+       ftrace_mod_code();
+
+       /* Make sure the write happens before clearing the bit */
+       smp_wmb();
+
+       mod_code_write = 0;
+
+       /* make sure NMIs see the cleared bit */
+       smp_mb();
+
+       wait_for_nmi();
+
+       return mod_code_status;
+}
+
+
+
+
+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
+
+static unsigned char *ftrace_nop_replace(void)
+{
+       return ftrace_nop;
+}
+
+static int
 ftrace_modify_code(unsigned long ip, unsigned char *old_code,
                   unsigned char *new_code)
 {
@@ -81,7 +213,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
                return -EINVAL;
 
        /* replace the text with the new text */
-       if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
+       if (do_ftrace_mod_code(ip, new_code))
                return -EPERM;
 
        sync_core();
@@ -89,6 +221,29 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
        return 0;
 }
 
+int ftrace_make_nop(struct module *mod,
+                   struct dyn_ftrace *rec, unsigned long addr)
+{
+       unsigned char *new, *old;
+       unsigned long ip = rec->ip;
+
+       old = ftrace_call_replace(ip, addr);
+       new = ftrace_nop_replace();
+
+       return ftrace_modify_code(rec->ip, old, new);
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+       unsigned char *new, *old;
+       unsigned long ip = rec->ip;
+
+       old = ftrace_nop_replace();
+       new = ftrace_call_replace(ip, addr);
+
+       return ftrace_modify_code(rec->ip, old, new);
+}
+
 int ftrace_update_ftrace_func(ftrace_func_t func)
 {
        unsigned long ip = (unsigned long)(&ftrace_call);
@@ -165,3 +320,203 @@ int __init ftrace_dyn_arch_init(void *data)
 
        return 0;
 }
+#endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern void ftrace_graph_call(void);
+
+static int ftrace_mod_jmp(unsigned long ip,
+                         int old_offset, int new_offset)
+{
+       unsigned char code[MCOUNT_INSN_SIZE];
+
+       if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
+               return -EFAULT;
+
+       if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
+               return -EINVAL;
+
+       *(int *)(&code[1]) = new_offset;
+
+       if (do_ftrace_mod_code(ip, &code))
+               return -EPERM;
+
+       return 0;
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+       unsigned long ip = (unsigned long)(&ftrace_graph_call);
+       int old_offset, new_offset;
+
+       old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
+       new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
+
+       return ftrace_mod_jmp(ip, old_offset, new_offset);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+       unsigned long ip = (unsigned long)(&ftrace_graph_call);
+       int old_offset, new_offset;
+
+       old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
+       new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
+
+       return ftrace_mod_jmp(ip, old_offset, new_offset);
+}
+
+#else /* CONFIG_DYNAMIC_FTRACE */
+
+/*
+ * These functions are picked from those used on
+ * this page for dynamic ftrace. They have been
+ * simplified to ignore all traces in NMI context.
+ */
+static atomic_t in_nmi;
+
+void ftrace_nmi_enter(void)
+{
+       atomic_inc(&in_nmi);
+}
+
+void ftrace_nmi_exit(void)
+{
+       atomic_dec(&in_nmi);
+}
+
+#endif /* !CONFIG_DYNAMIC_FTRACE */
+
+/* Add a function return address to the trace stack on thread info.*/
+static int push_return_trace(unsigned long ret, unsigned long long time,
+                               unsigned long func, int *depth)
+{
+       int index;
+
+       if (!current->ret_stack)
+               return -EBUSY;
+
+       /* The return trace stack is full */
+       if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
+               atomic_inc(&current->trace_overrun);
+               return -EBUSY;
+       }
+
+       index = ++current->curr_ret_stack;
+       barrier();
+       current->ret_stack[index].ret = ret;
+       current->ret_stack[index].func = func;
+       current->ret_stack[index].calltime = time;
+       *depth = index;
+
+       return 0;
+}
+
+/* Retrieve a function return address to the trace stack on thread info.*/
+static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
+{
+       int index;
+
+       index = current->curr_ret_stack;
+       *ret = current->ret_stack[index].ret;
+       trace->func = current->ret_stack[index].func;
+       trace->calltime = current->ret_stack[index].calltime;
+       trace->overrun = atomic_read(&current->trace_overrun);
+       trace->depth = index;
+       current->curr_ret_stack--;
+}
+
+/*
+ * Send the trace to the ring-buffer.
+ * @return the original return address.
+ */
+unsigned long ftrace_return_to_handler(void)
+{
+       struct ftrace_graph_ret trace;
+       unsigned long ret;
+
+       pop_return_trace(&trace, &ret);
+       trace.rettime = cpu_clock(raw_smp_processor_id());
+       ftrace_graph_return(&trace);
+
+       return ret;
+}
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+{
+       unsigned long old;
+       unsigned long long calltime;
+       int faulted;
+       struct ftrace_graph_ent trace;
+       unsigned long return_hooker = (unsigned long)
+                               &return_to_handler;
+
+       /* Nmi's are currently unsupported */
+       if (atomic_read(&in_nmi))
+               return;
+
+       /*
+        * Protect against fault, even if it shouldn't
+        * happen. This tool is too much intrusive to
+        * ignore such a protection.
+        */
+       asm volatile(
+#ifdef CONFIG_X86_64
+               "1: movq (%[parent_old]), %[old]\n"
+               "2: movq %[return_hooker], (%[parent_replaced])\n"
+#else
+               "1: movl (%[parent_old]), %[old]\n"
+               "2: movl %[return_hooker], (%[parent_replaced])\n"
+#endif
+               "   movl $0, %[faulted]\n"
+
+               ".section .fixup, \"ax\"\n"
+               "3: movl $1, %[faulted]\n"
+               ".previous\n"
+
+               ".section __ex_table, \"a\"\n"
+#ifdef CONFIG_X86_64
+               "   .quad 1b, 3b\n"
+               "   .quad 2b, 3b\n"
+#else
+               "   .long 1b, 3b\n"
+               "   .long 2b, 3b\n"
+#endif
+               ".previous\n"
+
+               : [parent_replaced] "=r" (parent), [old] "=r" (old),
+                 [faulted] "=r" (faulted)
+               : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
+               : "memory"
+       );
+
+       if (WARN_ON(faulted)) {
+               unregister_ftrace_graph();
+               return;
+       }
+
+       if (WARN_ON(!__kernel_text_address(old))) {
+               unregister_ftrace_graph();
+               *parent = old;
+               return;
+       }
+
+       calltime = cpu_clock(raw_smp_processor_id());
+
+       if (push_return_trace(old, calltime,
+                               self_addr, &trace.depth) == -EBUSY) {
+               *parent = old;
+               return;
+       }
+
+       trace.func = self_addr;
+       ftrace_graph_entry(&trace);
+
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
index c622772744d86fcb0dcd3e6c46a6de5323508c40..c27af49a4ede1b525daece26f937b8bfaa6560b5 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/module.h>
 #include <linux/pm.h>
 #include <linux/clockchips.h>
+#include <linux/ftrace.h>
 #include <asm/system.h>
 
 unsigned long idle_halt;
@@ -100,6 +101,9 @@ static inline int hlt_use_halt(void)
 void default_idle(void)
 {
        if (hlt_use_halt()) {
+               struct power_trace it;
+
+               trace_power_start(&it, POWER_CSTATE, 1);
                current_thread_info()->status &= ~TS_POLLING;
                /*
                 * TS_POLLING-cleared state must be visible before we
@@ -112,6 +116,7 @@ void default_idle(void)
                else
                        local_irq_enable();
                current_thread_info()->status |= TS_POLLING;
+               trace_power_end(&it);
        } else {
                local_irq_enable();
                /* loop is done by the caller */
@@ -154,24 +159,31 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
  */
 void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
 {
+       struct power_trace it;
+
+       trace_power_start(&it, POWER_CSTATE, (ax>>4)+1);
        if (!need_resched()) {
                __monitor((void *)&current_thread_info()->flags, 0, 0);
                smp_mb();
                if (!need_resched())
                        __mwait(ax, cx);
        }
+       trace_power_end(&it);
 }
 
 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
 static void mwait_idle(void)
 {
+       struct power_trace it;
        if (!need_resched()) {
+               trace_power_start(&it, POWER_CSTATE, 1);
                __monitor((void *)&current_thread_info()->flags, 0, 0);
                smp_mb();
                if (!need_resched())
                        __sti_mwait(0, 0);
                else
                        local_irq_enable();
+               trace_power_end(&it);
        } else
                local_irq_enable();
 }
@@ -183,9 +195,13 @@ static void mwait_idle(void)
  */
 static void poll_idle(void)
 {
+       struct power_trace it;
+
+       trace_power_start(&it, POWER_CSTATE, 0);
        local_irq_enable();
        while (!need_resched())
                cpu_relax();
+       trace_power_end(&it);
 }
 
 /*
index 0a6d8c12e10dc7742c2e33dd29475874cb9f2b50..2c8ec1ba75e663e63b4ce2d1740b6db1fb2954d3 100644 (file)
@@ -668,14 +668,14 @@ static int ptrace_bts_read_record(struct task_struct *child, size_t index,
        size_t bts_index, bts_end;
        int error;
 
-       error = ds_get_bts_end(child, &bts_end);
+       error = ds_get_bts_end(child->bts, &bts_end);
        if (error < 0)
                return error;
 
        if (bts_end <= index)
                return -EINVAL;
 
-       error = ds_get_bts_index(child, &bts_index);
+       error = ds_get_bts_index(child->bts, &bts_index);
        if (error < 0)
                return error;
 
@@ -684,7 +684,7 @@ static int ptrace_bts_read_record(struct task_struct *child, size_t index,
        if (bts_end <= bts_index)
                bts_index -= bts_end;
 
-       error = ds_access_bts(child, bts_index, &bts_record);
+       error = ds_access_bts(child->bts, bts_index, &bts_record);
        if (error < 0)
                return error;
 
@@ -705,14 +705,14 @@ static int ptrace_bts_drain(struct task_struct *child,
        size_t end, i;
        int error;
 
-       error = ds_get_bts_index(child, &end);
+       error = ds_get_bts_index(child->bts, &end);
        if (error < 0)
                return error;
 
        if (size < (end * sizeof(struct bts_struct)))
                return -EIO;
 
-       error = ds_access_bts(child, 0, (const void **)&raw);
+       error = ds_access_bts(child->bts, 0, (const void **)&raw);
        if (error < 0)
                return error;
 
@@ -723,18 +723,13 @@ static int ptrace_bts_drain(struct task_struct *child,
                        return -EFAULT;
        }
 
-       error = ds_clear_bts(child);
+       error = ds_clear_bts(child->bts);
        if (error < 0)
                return error;
 
        return end;
 }
 
-static void ptrace_bts_ovfl(struct task_struct *child)
-{
-       send_sig(child->thread.bts_ovfl_signal, child, 0);
-}
-
 static int ptrace_bts_config(struct task_struct *child,
                             long cfg_size,
                             const struct ptrace_bts_config __user *ucfg)
@@ -760,23 +755,45 @@ static int ptrace_bts_config(struct task_struct *child,
                goto errout;
 
        if (cfg.flags & PTRACE_BTS_O_ALLOC) {
-               ds_ovfl_callback_t ovfl = NULL;
+               bts_ovfl_callback_t ovfl = NULL;
                unsigned int sig = 0;
 
-               /* we ignore the error in case we were not tracing child */
-               (void)ds_release_bts(child);
+               error = -EINVAL;
+               if (cfg.size < (10 * bts_cfg.sizeof_bts))
+                       goto errout;
 
                if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
                        if (!cfg.signal)
                                goto errout;
 
+                       error = -EOPNOTSUPP;
+                       goto errout;
+
                        sig  = cfg.signal;
-                       ovfl = ptrace_bts_ovfl;
                }
 
-               error = ds_request_bts(child, /* base = */ NULL, cfg.size, ovfl);
-               if (error < 0)
+               if (child->bts) {
+                       (void)ds_release_bts(child->bts);
+                       kfree(child->bts_buffer);
+
+                       child->bts = NULL;
+                       child->bts_buffer = NULL;
+               }
+
+               error = -ENOMEM;
+               child->bts_buffer = kzalloc(cfg.size, GFP_KERNEL);
+               if (!child->bts_buffer)
+                       goto errout;
+
+               child->bts = ds_request_bts(child, child->bts_buffer, cfg.size,
+                                           ovfl, /* th = */ (size_t)-1);
+               if (IS_ERR(child->bts)) {
+                       error = PTR_ERR(child->bts);
+                       kfree(child->bts_buffer);
+                       child->bts = NULL;
+                       child->bts_buffer = NULL;
                        goto errout;
+               }
 
                child->thread.bts_ovfl_signal = sig;
        }
@@ -823,15 +840,15 @@ static int ptrace_bts_status(struct task_struct *child,
        if (cfg_size < sizeof(cfg))
                return -EIO;
 
-       error = ds_get_bts_end(child, &end);
+       error = ds_get_bts_end(child->bts, &end);
        if (error < 0)
                return error;
 
-       error = ds_access_bts(child, /* index = */ 0, &base);
+       error = ds_access_bts(child->bts, /* index = */ 0, &base);
        if (error < 0)
                return error;
 
-       error = ds_access_bts(child, /* index = */ end, &max);
+       error = ds_access_bts(child->bts, /* index = */ end, &max);
        if (error < 0)
                return error;
 
@@ -884,10 +901,7 @@ static int ptrace_bts_write_record(struct task_struct *child,
                return -EINVAL;
        }
 
-       /* The writing task will be the switched-to task on a context
-        * switch. It needs to write into the switched-from task's BTS
-        * buffer. */
-       return ds_unchecked_write_bts(child, bts_record, bts_cfg.sizeof_bts);
+       return ds_write_bts(child->bts, bts_record, bts_cfg.sizeof_bts);
 }
 
 void ptrace_bts_take_timestamp(struct task_struct *tsk,
@@ -929,17 +943,16 @@ void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *c)
        switch (c->x86) {
        case 0x6:
                switch (c->x86_model) {
+               case 0 ... 0xC:
+                       /* sorry, don't know about them */
+                       break;
                case 0xD:
                case 0xE: /* Pentium M */
                        bts_configure(&bts_cfg_pentium_m);
                        break;
-               case 0xF: /* Core2 */
-        case 0x1C: /* Atom */
+               default: /* Core2, Atom, ... */
                        bts_configure(&bts_cfg_core2);
                        break;
-               default:
-                       /* sorry, don't know about them */
-                       break;
                }
                break;
        case 0xF:
@@ -973,13 +986,17 @@ void ptrace_disable(struct task_struct *child)
        clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 #endif
 #ifdef CONFIG_X86_PTRACE_BTS
-       (void)ds_release_bts(child);
+       if (child->bts) {
+               (void)ds_release_bts(child->bts);
+               kfree(child->bts_buffer);
+               child->bts_buffer = NULL;
 
-       child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
-       if (!child->thread.debugctlmsr)
-               clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
+               child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
+               if (!child->thread.debugctlmsr)
+                       clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
 
-       clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
+               clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
+       }
 #endif /* CONFIG_X86_PTRACE_BTS */
 }
 
@@ -1111,9 +1128,16 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        (child, data, (struct ptrace_bts_config __user *)addr);
                break;
 
-       case PTRACE_BTS_SIZE:
-               ret = ds_get_bts_index(child, /* pos = */ NULL);
+       case PTRACE_BTS_SIZE: {
+               size_t size;
+
+               ret = ds_get_bts_index(child->bts, &size);
+               if (ret == 0) {
+                       BUG_ON(size != (int) size);
+                       ret = (int) size;
+               }
                break;
+       }
 
        case PTRACE_BTS_GET:
                ret = ptrace_bts_read_record
@@ -1121,7 +1145,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                break;
 
        case PTRACE_BTS_CLEAR:
-               ret = ds_clear_bts(child);
+               ret = ds_clear_bts(child->bts);
                break;
 
        case PTRACE_BTS_DRAIN:
index a03e7f6d90c35af5f4638f6394e4a39f6e2e4020..10786af95545ea37654bcf7125d7a880edc03540 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/sched.h>
 #include <linux/stacktrace.h>
 #include <linux/module.h>
+#include <linux/uaccess.h>
 #include <asm/stacktrace.h>
 
 static void save_stack_warning(void *data, char *msg)
@@ -83,3 +84,66 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
                trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
+
+struct stack_frame {
+       const void __user       *next_fp;
+       unsigned long           ret_addr;
+};
+
+static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
+{
+       int ret;
+
+       if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
+               return 0;
+
+       ret = 1;
+       pagefault_disable();
+       if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
+               ret = 0;
+       pagefault_enable();
+
+       return ret;
+}
+
+static inline void __save_stack_trace_user(struct stack_trace *trace)
+{
+       const struct pt_regs *regs = task_pt_regs(current);
+       const void __user *fp = (const void __user *)regs->bp;
+
+       if (trace->nr_entries < trace->max_entries)
+               trace->entries[trace->nr_entries++] = regs->ip;
+
+       while (trace->nr_entries < trace->max_entries) {
+               struct stack_frame frame;
+
+               frame.next_fp = NULL;
+               frame.ret_addr = 0;
+               if (!copy_stack_frame(fp, &frame))
+                       break;
+               if ((unsigned long)fp < regs->sp)
+                       break;
+               if (frame.ret_addr) {
+                       trace->entries[trace->nr_entries++] =
+                               frame.ret_addr;
+               }
+               if (fp == frame.next_fp)
+                       break;
+               fp = frame.next_fp;
+       }
+}
+
+void save_stack_trace_user(struct stack_trace *trace)
+{
+       /*
+        * Trace user stack if we are not a kernel thread
+        */
+       if (current->mm) {
+               __save_stack_trace_user(trace);
+       }
+       if (trace->nr_entries < trace->max_entries)
+               trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+
index 0b8b6690a86d184959703177b9cb6011bfd605ca..6f3d3d4cd97338162e6889f4eaae86b744f2a2ab 100644 (file)
@@ -17,6 +17,9 @@
  *  want per guest time just set the kernel.vsyscall64 sysctl to 0.
  */
 
+/* Disable profiling for userspace code: */
+#define DISABLE_BRANCH_PROFILING
+
 #include <linux/time.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
index fea4565ff576b9f52f76d9286ece00ed52a4f141..d8cc96a2738f739a9f5dba76f77ee8c9a0885e6a 100644 (file)
@@ -8,9 +8,8 @@ obj-$(CONFIG_X86_PTDUMP)        += dump_pagetables.o
 
 obj-$(CONFIG_HIGHMEM)          += highmem_32.o
 
-obj-$(CONFIG_MMIOTRACE_HOOKS)  += kmmio.o
 obj-$(CONFIG_MMIOTRACE)                += mmiotrace.o
-mmiotrace-y                    := pf_in.o mmio-mod.o
+mmiotrace-y                    := kmmio.o pf_in.o mmio-mod.o
 obj-$(CONFIG_MMIOTRACE_TEST)   += testmmiotrace.o
 
 obj-$(CONFIG_NUMA)             += numa_$(BITS).o
index 31e8730fa2463214f36c2f6b3df9d0f75f6be346..21e996a70d68f9a85cc5abc88bc36f2f0d43b1a1 100644 (file)
@@ -53,7 +53,7 @@
 
 static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr)
 {
-#ifdef CONFIG_MMIOTRACE_HOOKS
+#ifdef CONFIG_MMIOTRACE
        if (unlikely(is_kmmio_active()))
                if (kmmio_handler(regs, addr) == 1)
                        return -1;
@@ -413,6 +413,7 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
                                 unsigned long error_code)
 {
        unsigned long flags = oops_begin();
+       int sig = SIGKILL;
        struct task_struct *tsk;
 
        printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
@@ -423,8 +424,8 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
        tsk->thread.trap_no = 14;
        tsk->thread.error_code = error_code;
        if (__die("Bad pagetable", regs, error_code))
-               regs = NULL;
-       oops_end(flags, regs, SIGKILL);
+               sig = 0;
+       oops_end(flags, regs, sig);
 }
 #endif
 
@@ -590,6 +591,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
        int fault;
 #ifdef CONFIG_X86_64
        unsigned long flags;
+       int sig;
 #endif
 
        tsk = current;
@@ -849,11 +851,12 @@ no_context:
        bust_spinlocks(0);
        do_exit(SIGKILL);
 #else
+       sig = SIGKILL;
        if (__die("Oops", regs, error_code))
-               regs = NULL;
+               sig = 0;
        /* Executive summary in case the body of the oops scrolled away */
        printk(KERN_EMERG "CR2: %016lx\n", address);
-       oops_end(flags, regs, SIGKILL);
+       oops_end(flags, regs, sig);
 #endif
 
 /*
index 1ef0f90813d626ed6be436b93d3d5b6550dbb392..d9d35824c56f30e56266445cdf9ed5877f94bf52 100644 (file)
@@ -9,6 +9,9 @@
  * Also alternative() doesn't work.
  */
 
+/* Disable profiling for userspace code: */
+#define DISABLE_BRANCH_PROFILING
+
 #include <linux/kernel.h>
 #include <linux/posix-timers.h>
 #include <linux/time.h>
index 1ab7c15c8d7a58bc2df639a759a0b719bb2c25da..290b219fad9c5a1b825d495c8ea427bda7b04c5c 100644 (file)
@@ -47,6 +47,7 @@ config BLK_DEV_IO_TRACE
        depends on SYSFS
        select RELAY
        select DEBUG_FS
+       select TRACEPOINTS
        help
          Say Y here if you want to be able to trace the block layer actions
          on a given queue. Tracing allows you to see any traffic happening
index 10e8a64a5a5b1b213cbc2886755b22dedce76313..0c06cf5aaaf83f1e8b30d58599f5eef5bf07c993 100644 (file)
 #include <linux/task_io_accounting_ops.h>
 #include <linux/blktrace_api.h>
 #include <linux/fault-inject.h>
+#include <trace/block.h>
 
 #include "blk.h"
 
+DEFINE_TRACE(block_plug);
+DEFINE_TRACE(block_unplug_io);
+DEFINE_TRACE(block_unplug_timer);
+DEFINE_TRACE(block_getrq);
+DEFINE_TRACE(block_sleeprq);
+DEFINE_TRACE(block_rq_requeue);
+DEFINE_TRACE(block_bio_backmerge);
+DEFINE_TRACE(block_bio_frontmerge);
+DEFINE_TRACE(block_bio_queue);
+DEFINE_TRACE(block_rq_complete);
+DEFINE_TRACE(block_remap);     /* Also used in drivers/md/dm.c */
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
+
 static int __make_request(struct request_queue *q, struct bio *bio);
 
 /*
@@ -205,7 +219,7 @@ void blk_plug_device(struct request_queue *q)
 
        if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
                mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
-               blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
+               trace_block_plug(q);
        }
 }
 EXPORT_SYMBOL(blk_plug_device);
@@ -292,9 +306,7 @@ void blk_unplug_work(struct work_struct *work)
        struct request_queue *q =
                container_of(work, struct request_queue, unplug_work);
 
-       blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
-                               q->rq.count[READ] + q->rq.count[WRITE]);
-
+       trace_block_unplug_io(q);
        q->unplug_fn(q);
 }
 
@@ -302,9 +314,7 @@ void blk_unplug_timeout(unsigned long data)
 {
        struct request_queue *q = (struct request_queue *)data;
 
-       blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
-                               q->rq.count[READ] + q->rq.count[WRITE]);
-
+       trace_block_unplug_timer(q);
        kblockd_schedule_work(q, &q->unplug_work);
 }
 
@@ -314,9 +324,7 @@ void blk_unplug(struct request_queue *q)
         * devices don't necessarily have an ->unplug_fn defined
         */
        if (q->unplug_fn) {
-               blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
-                                       q->rq.count[READ] + q->rq.count[WRITE]);
-
+               trace_block_unplug_io(q);
                q->unplug_fn(q);
        }
 }
@@ -822,7 +830,7 @@ rq_starved:
        if (ioc_batching(q, ioc))
                ioc->nr_batch_requests--;
 
-       blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
+       trace_block_getrq(q, bio, rw);
 out:
        return rq;
 }
@@ -848,7 +856,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
                prepare_to_wait_exclusive(&rl->wait[rw], &wait,
                                TASK_UNINTERRUPTIBLE);
 
-               blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
+               trace_block_sleeprq(q, bio, rw);
 
                __generic_unplug_device(q);
                spin_unlock_irq(q->queue_lock);
@@ -928,7 +936,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
 {
        blk_delete_timer(rq);
        blk_clear_rq_complete(rq);
-       blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
+       trace_block_rq_requeue(q, rq);
 
        if (blk_rq_tagged(rq))
                blk_queue_end_tag(q, rq);
@@ -1167,7 +1175,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
                if (!ll_back_merge_fn(q, req, bio))
                        break;
 
-               blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+               trace_block_bio_backmerge(q, bio);
 
                req->biotail->bi_next = bio;
                req->biotail = bio;
@@ -1186,7 +1194,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
                if (!ll_front_merge_fn(q, req, bio))
                        break;
 
-               blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+               trace_block_bio_frontmerge(q, bio);
 
                bio->bi_next = req->bio;
                req->bio = bio;
@@ -1269,7 +1277,7 @@ static inline void blk_partition_remap(struct bio *bio)
                bio->bi_sector += p->start_sect;
                bio->bi_bdev = bdev->bd_contains;
 
-               blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
+               trace_block_remap(bdev_get_queue(bio->bi_bdev), bio,
                                    bdev->bd_dev, bio->bi_sector,
                                    bio->bi_sector - p->start_sect);
        }
@@ -1441,10 +1449,10 @@ end_io:
                        goto end_io;
 
                if (old_sector != -1)
-                       blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
+                       trace_block_remap(q, bio, old_dev, bio->bi_sector,
                                            old_sector);
 
-               blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
+               trace_block_bio_queue(q, bio);
 
                old_sector = bio->bi_sector;
                old_dev = bio->bi_bdev->bd_dev;
@@ -1656,7 +1664,7 @@ static int __end_that_request_first(struct request *req, int error,
        int total_bytes, bio_nbytes, next_idx = 0;
        struct bio *bio;
 
-       blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
+       trace_block_rq_complete(req->q, req);
 
        /*
         * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual
index 85049a7e7a179a97c283eb4ebe6c1fe7285f80cf..b0a2cae886dbdc4dde9a2efa99345ea2462a54b0 100644 (file)
 #include <linux/mutex.h>
 #include <linux/debugfs.h>
 #include <linux/time.h>
+#include <trace/block.h>
 #include <asm/uaccess.h>
 
 static unsigned int blktrace_seq __read_mostly = 1;
 
+/* Global reference count of probes */
+static DEFINE_MUTEX(blk_probe_mutex);
+static atomic_t blk_probes_ref = ATOMIC_INIT(0);
+
+static int blk_register_tracepoints(void);
+static void blk_unregister_tracepoints(void);
+
 /*
  * Send out a notify message.
  */
@@ -119,7 +127,7 @@ static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK
  * The worker for the various blk_add_trace*() types. Fills out a
  * blk_io_trace structure and places it in a per-cpu subbuffer.
  */
-void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
+static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
                     int rw, u32 what, int error, int pdu_len, void *pdu_data)
 {
        struct task_struct *tsk = current;
@@ -177,8 +185,6 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
        local_irq_restore(flags);
 }
 
-EXPORT_SYMBOL_GPL(__blk_add_trace);
-
 static struct dentry *blk_tree_root;
 static DEFINE_MUTEX(blk_tree_mutex);
 static unsigned int root_users;
@@ -237,6 +243,10 @@ static void blk_trace_cleanup(struct blk_trace *bt)
        free_percpu(bt->sequence);
        free_percpu(bt->msg_data);
        kfree(bt);
+       mutex_lock(&blk_probe_mutex);
+       if (atomic_dec_and_test(&blk_probes_ref))
+               blk_unregister_tracepoints();
+       mutex_unlock(&blk_probe_mutex);
 }
 
 int blk_trace_remove(struct request_queue *q)
@@ -428,6 +438,14 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
        bt->pid = buts->pid;
        bt->trace_state = Blktrace_setup;
 
+       mutex_lock(&blk_probe_mutex);
+       if (atomic_add_return(1, &blk_probes_ref) == 1) {
+               ret = blk_register_tracepoints();
+               if (ret)
+                       goto probe_err;
+       }
+       mutex_unlock(&blk_probe_mutex);
+
        ret = -EBUSY;
        old_bt = xchg(&q->blk_trace, bt);
        if (old_bt) {
@@ -436,6 +454,9 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
        }
 
        return 0;
+probe_err:
+       atomic_dec(&blk_probes_ref);
+       mutex_unlock(&blk_probe_mutex);
 err:
        if (dir)
                blk_remove_tree(dir);
@@ -562,3 +583,308 @@ void blk_trace_shutdown(struct request_queue *q)
                blk_trace_remove(q);
        }
 }
+
+/*
+ * blktrace probes
+ */
+
+/**
+ * blk_add_trace_rq - Add a trace for a request oriented action
+ * @q:         queue the io is for
+ * @rq:                the source request
+ * @what:      the action
+ *
+ * Description:
+ *     Records an action against a request. Will log the bio offset + size.
+ *
+ **/
+static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
+                                   u32 what)
+{
+       struct blk_trace *bt = q->blk_trace;
+       int rw = rq->cmd_flags & 0x03;
+
+       if (likely(!bt))
+               return;
+
+       if (blk_discard_rq(rq))
+               rw |= (1 << BIO_RW_DISCARD);
+
+       if (blk_pc_request(rq)) {
+               what |= BLK_TC_ACT(BLK_TC_PC);
+               __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors,
+                               sizeof(rq->cmd), rq->cmd);
+       } else  {
+               what |= BLK_TC_ACT(BLK_TC_FS);
+               __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
+                               rw, what, rq->errors, 0, NULL);
+       }
+}
+
+static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq)
+{
+       blk_add_trace_rq(q, rq, BLK_TA_ABORT);
+}
+
+static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq)
+{
+       blk_add_trace_rq(q, rq, BLK_TA_INSERT);
+}
+
+static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq)
+{
+       blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
+}
+
+static void blk_add_trace_rq_requeue(struct request_queue *q, struct request *rq)
+{
+       blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
+}
+
+static void blk_add_trace_rq_complete(struct request_queue *q, struct request *rq)
+{
+       blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
+}
+
+/**
+ * blk_add_trace_bio - Add a trace for a bio oriented action
+ * @q:         queue the io is for
+ * @bio:       the source bio
+ * @what:      the action
+ *
+ * Description:
+ *     Records an action against a bio. Will log the bio offset + size.
+ *
+ **/
+static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
+                                    u32 what)
+{
+       struct blk_trace *bt = q->blk_trace;
+
+       if (likely(!bt))
+               return;
+
+       __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
+                       !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
+}
+
+static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio)
+{
+       blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
+}
+
+static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio)
+{
+       blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
+}
+
+static void blk_add_trace_bio_backmerge(struct request_queue *q, struct bio *bio)
+{
+       blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+}
+
+static void blk_add_trace_bio_frontmerge(struct request_queue *q, struct bio *bio)
+{
+       blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+}
+
+static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio)
+{
+       blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
+}
+
+static void blk_add_trace_getrq(struct request_queue *q, struct bio *bio, int rw)
+{
+       if (bio)
+               blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
+       else {
+               struct blk_trace *bt = q->blk_trace;
+
+               if (bt)
+                       __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
+       }
+}
+
+
+static void blk_add_trace_sleeprq(struct request_queue *q, struct bio *bio, int rw)
+{
+       if (bio)
+               blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
+       else {
+               struct blk_trace *bt = q->blk_trace;
+
+               if (bt)
+                       __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ, 0, 0, NULL);
+       }
+}
+
+static void blk_add_trace_plug(struct request_queue *q)
+{
+       struct blk_trace *bt = q->blk_trace;
+
+       if (bt)
+               __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
+}
+
+static void blk_add_trace_unplug_io(struct request_queue *q)
+{
+       struct blk_trace *bt = q->blk_trace;
+
+       if (bt) {
+               unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
+               __be64 rpdu = cpu_to_be64(pdu);
+
+               __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
+                               sizeof(rpdu), &rpdu);
+       }
+}
+
+static void blk_add_trace_unplug_timer(struct request_queue *q)
+{
+       struct blk_trace *bt = q->blk_trace;
+
+       if (bt) {
+               unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
+               __be64 rpdu = cpu_to_be64(pdu);
+
+               __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
+                               sizeof(rpdu), &rpdu);
+       }
+}
+
+static void blk_add_trace_split(struct request_queue *q, struct bio *bio,
+                               unsigned int pdu)
+{
+       struct blk_trace *bt = q->blk_trace;
+
+       if (bt) {
+               __be64 rpdu = cpu_to_be64(pdu);
+
+               __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
+                               BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
+                               sizeof(rpdu), &rpdu);
+       }
+}
+
+/**
+ * blk_add_trace_remap - Add a trace for a remap operation
+ * @q:         queue the io is for
+ * @bio:       the source bio
+ * @dev:       target device
+ * @from:      source sector
+ * @to:                target sector
+ *
+ * Description:
+ *     Device mapper or raid target sometimes need to split a bio because
+ *     it spans a stripe (or similar). Add a trace for that action.
+ *
+ **/
+static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
+                                      dev_t dev, sector_t from, sector_t to)
+{
+       struct blk_trace *bt = q->blk_trace;
+       struct blk_io_trace_remap r;
+
+       if (likely(!bt))
+               return;
+
+       r.device = cpu_to_be32(dev);
+       r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
+       r.sector = cpu_to_be64(to);
+
+       __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP,
+                       !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
+}
+
+/**
+ * blk_add_driver_data - Add binary message with driver-specific data
+ * @q:         queue the io is for
+ * @rq:                io request
+ * @data:      driver-specific data
+ * @len:       length of driver-specific data
+ *
+ * Description:
+ *     Some drivers might want to write driver-specific data per request.
+ *
+ **/
+void blk_add_driver_data(struct request_queue *q,
+                        struct request *rq,
+                        void *data, size_t len)
+{
+       struct blk_trace *bt = q->blk_trace;
+
+       if (likely(!bt))
+               return;
+
+       if (blk_pc_request(rq))
+               __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
+                               rq->errors, len, data);
+       else
+               __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
+                               0, BLK_TA_DRV_DATA, rq->errors, len, data);
+}
+EXPORT_SYMBOL_GPL(blk_add_driver_data);
+
+static int blk_register_tracepoints(void)
+{
+       int ret;
+
+       ret = register_trace_block_rq_abort(blk_add_trace_rq_abort);
+       WARN_ON(ret);
+       ret = register_trace_block_rq_insert(blk_add_trace_rq_insert);
+       WARN_ON(ret);
+       ret = register_trace_block_rq_issue(blk_add_trace_rq_issue);
+       WARN_ON(ret);
+       ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue);
+       WARN_ON(ret);
+       ret = register_trace_block_rq_complete(blk_add_trace_rq_complete);
+       WARN_ON(ret);
+       ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce);
+       WARN_ON(ret);
+       ret = register_trace_block_bio_complete(blk_add_trace_bio_complete);
+       WARN_ON(ret);
+       ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
+       WARN_ON(ret);
+       ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
+       WARN_ON(ret);
+       ret = register_trace_block_bio_queue(blk_add_trace_bio_queue);
+       WARN_ON(ret);
+       ret = register_trace_block_getrq(blk_add_trace_getrq);
+       WARN_ON(ret);
+       ret = register_trace_block_sleeprq(blk_add_trace_sleeprq);
+       WARN_ON(ret);
+       ret = register_trace_block_plug(blk_add_trace_plug);
+       WARN_ON(ret);
+       ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer);
+       WARN_ON(ret);
+       ret = register_trace_block_unplug_io(blk_add_trace_unplug_io);
+       WARN_ON(ret);
+       ret = register_trace_block_split(blk_add_trace_split);
+       WARN_ON(ret);
+       ret = register_trace_block_remap(blk_add_trace_remap);
+       WARN_ON(ret);
+       return 0;
+}
+
+static void blk_unregister_tracepoints(void)
+{
+       unregister_trace_block_remap(blk_add_trace_remap);
+       unregister_trace_block_split(blk_add_trace_split);
+       unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
+       unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer);
+       unregister_trace_block_plug(blk_add_trace_plug);
+       unregister_trace_block_sleeprq(blk_add_trace_sleeprq);
+       unregister_trace_block_getrq(blk_add_trace_getrq);
+       unregister_trace_block_bio_queue(blk_add_trace_bio_queue);
+       unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
+       unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
+       unregister_trace_block_bio_complete(blk_add_trace_bio_complete);
+       unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce);
+       unregister_trace_block_rq_complete(blk_add_trace_rq_complete);
+       unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue);
+       unregister_trace_block_rq_issue(blk_add_trace_rq_issue);
+       unregister_trace_block_rq_insert(blk_add_trace_rq_insert);
+       unregister_trace_block_rq_abort(blk_add_trace_rq_abort);
+
+       tracepoint_synchronize_unregister();
+}
index 9ac82dde99dddcd5434659f65a1e81c2a166b639..e5677fe4f4128a2ca0949eca672404a1b1377a4e 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/compiler.h>
 #include <linux/delay.h>
 #include <linux/blktrace_api.h>
+#include <trace/block.h>
 #include <linux/hash.h>
 #include <linux/uaccess.h>
 
@@ -41,6 +42,8 @@
 static DEFINE_SPINLOCK(elv_list_lock);
 static LIST_HEAD(elv_list);
 
+DEFINE_TRACE(block_rq_abort);
+
 /*
  * Merge hash stuff.
  */
@@ -52,6 +55,9 @@ static const int elv_hash_shift = 6;
 #define rq_hash_key(rq)                ((rq)->sector + (rq)->nr_sectors)
 #define ELV_ON_HASH(rq)                (!hlist_unhashed(&(rq)->hash))
 
+DEFINE_TRACE(block_rq_insert);
+DEFINE_TRACE(block_rq_issue);
+
 /*
  * Query io scheduler to see if the current process issuing bio may be
  * merged with rq.
@@ -586,7 +592,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
        unsigned ordseq;
        int unplug_it = 1;
 
-       blk_add_trace_rq(q, rq, BLK_TA_INSERT);
+       trace_block_rq_insert(q, rq);
 
        rq->q = q;
 
@@ -772,7 +778,7 @@ struct request *elv_next_request(struct request_queue *q)
                         * not be passed by new incoming requests
                         */
                        rq->cmd_flags |= REQ_STARTED;
-                       blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
+                       trace_block_rq_issue(q, rq);
                }
 
                if (!q->boundary_rq || q->boundary_rq == rq) {
@@ -921,7 +927,7 @@ void elv_abort_queue(struct request_queue *q)
        while (!list_empty(&q->queue_head)) {
                rq = list_entry_rq(q->queue_head.next);
                rq->cmd_flags |= REQ_QUIET;
-               blk_add_trace_rq(q, rq, BLK_TA_ABORT);
+               trace_block_rq_abort(q, rq);
                __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
        }
 }
index ce0d9da52a8ab808a24e8d30bff27d631b474713..94966edfb44dedc340d4d81f230c43a0aec9b7bf 100644 (file)
@@ -274,6 +274,22 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = {
        .enable_mask    = SYSRQ_ENABLE_DUMP,
 };
 
+#ifdef CONFIG_TRACING
+#include <linux/ftrace.h>
+
+static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
+{
+       ftrace_dump();
+}
+static struct sysrq_key_op sysrq_ftrace_dump_op = {
+       .handler        = sysrq_ftrace_dump,
+       .help_msg       = "dumpZ-ftrace-buffer",
+       .action_msg     = "Dump ftrace buffer",
+       .enable_mask    = SYSRQ_ENABLE_DUMP,
+};
+#else
+#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)0)
+#endif
 
 static void sysrq_handle_showmem(int key, struct tty_struct *tty)
 {
@@ -406,7 +422,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
        NULL,                           /* x */
        /* y: May be registered on sparc64 for global register dump */
        NULL,                           /* y */
-       NULL                            /* z */
+       &sysrq_ftrace_dump_op,          /* z */
 };
 
 /* key2index calculation, -1 on invalid index */
index c99e4728ff4162ed16c4a7ec99fdc0c27c8cb35f..343094c3feeb834c55f8aa1ecd3baa57890eca1d 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/idr.h>
 #include <linux/hdreg.h>
 #include <linux/blktrace_api.h>
+#include <trace/block.h>
 
 #define DM_MSG_PREFIX "core"
 
@@ -51,6 +52,8 @@ struct dm_target_io {
        union map_info info;
 };
 
+DEFINE_TRACE(block_bio_complete);
+
 union map_info *dm_get_mapinfo(struct bio *bio)
 {
        if (bio && bio->bi_private)
@@ -504,8 +507,7 @@ static void dec_pending(struct dm_io *io, int error)
                end_io_acct(io);
 
                if (io->error != DM_ENDIO_REQUEUE) {
-                       blk_add_trace_bio(io->md->queue, io->bio,
-                                         BLK_TA_COMPLETE);
+                       trace_block_bio_complete(io->md->queue, io->bio);
 
                        bio_endio(io->bio, io->error);
                }
@@ -598,7 +600,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,
        if (r == DM_MAPIO_REMAPPED) {
                /* the bio has been remapped so dispatch it */
 
-               blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone,
+               trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
                                    tio->io->bio->bi_bdev->bd_dev,
                                    clone->bi_sector, sector);
 
index 77a55bcceedbc6afc79f7a081c0f0af5e7c4f46e..df99c882b807549f25c30b13416a3ae8ca43d7e0 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
 #include <linux/mempool.h>
 #include <linux/workqueue.h>
 #include <linux/blktrace_api.h>
+#include <trace/block.h>
 #include <scsi/sg.h>           /* for struct sg_iovec */
 
+DEFINE_TRACE(block_split);
+
 static struct kmem_cache *bio_slab __read_mostly;
 
 static mempool_t *bio_split_pool __read_mostly;
@@ -1263,7 +1266,7 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors)
        if (!bp)
                return bp;
 
-       blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi,
+       trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
                                bi->bi_sector + first_sectors);
 
        BUG_ON(bi->bi_vcnt != 1);
index eba2eabcd2b86a40495845053d9d647e228540f9..16c211558c2253f38b51af2f9eca76a2d52cb19f 100644 (file)
@@ -357,7 +357,18 @@ int seq_printf(struct seq_file *m, const char *f, ...)
 }
 EXPORT_SYMBOL(seq_printf);
 
-static char *mangle_path(char *s, char *p, char *esc)
+/**
+ *     mangle_path -   mangle and copy path to buffer beginning
+ *     @s: buffer start
+ *     @p: beginning of path in above buffer
+ *     @esc: set of characters that need escaping
+ *
+ *      Copy the path from @p to @s, replacing each occurrence of character from
+ *      @esc with usual octal escape.
+ *      Returns pointer past last written character in @s, or NULL in case of
+ *      failure.
+ */
+char *mangle_path(char *s, char *p, char *esc)
 {
        while (s <= p) {
                char c = *p++;
@@ -376,6 +387,7 @@ static char *mangle_path(char *s, char *p, char *esc)
        }
        return NULL;
 }
+EXPORT_SYMBOL(mangle_path);
 
 /*
  * return the absolute path of 'dentry' residing in mount 'mnt'.
index 80744606bad172b57d2ab52dc01bb0bf33af293e..eba835a2c2cd5a39c295e700015a9497d69151f9 100644 (file)
 #define MCOUNT_REC()
 #endif
 
+#ifdef CONFIG_TRACE_BRANCH_PROFILING
+#define LIKELY_PROFILE()       VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
+                               *(_ftrace_annotated_branch)                           \
+                               VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
+#else
+#define LIKELY_PROFILE()
+#endif
+
+#ifdef CONFIG_PROFILE_ALL_BRANCHES
+#define BRANCH_PROFILE()       VMLINUX_SYMBOL(__start_branch_profile) = .;   \
+                               *(_ftrace_branch)                             \
+                               VMLINUX_SYMBOL(__stop_branch_profile) = .;
+#else
+#define BRANCH_PROFILE()
+#endif
+
 /* .data section */
 #define DATA_DATA                                                      \
        *(.data)                                                        \
        VMLINUX_SYMBOL(__start___markers) = .;                          \
        *(__markers)                                                    \
        VMLINUX_SYMBOL(__stop___markers) = .;                           \
+       . = ALIGN(32);                                                  \
        VMLINUX_SYMBOL(__start___tracepoints) = .;                      \
        *(__tracepoints)                                                \
-       VMLINUX_SYMBOL(__stop___tracepoints) = .;
+       VMLINUX_SYMBOL(__stop___tracepoints) = .;                       \
+       LIKELY_PROFILE()                                                \
+       BRANCH_PROFILE()
 
 #define RO_DATA(align)                                                 \
        . = ALIGN((align));                                             \
index bdf505d33e77c1ded7d9008b3f0bf6e3421d9890..1dba3493d520b56e4465aa59c37811b784fb80a8 100644 (file)
@@ -160,7 +160,6 @@ struct blk_trace {
 
 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
 extern void blk_trace_shutdown(struct request_queue *);
-extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
 extern int do_blk_trace_setup(struct request_queue *q,
        char *name, dev_t dev, struct blk_user_trace_setup *buts);
 extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
@@ -186,168 +185,8 @@ extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
        } while (0)
 #define BLK_TN_MAX_MSG         128
 
-/**
- * blk_add_trace_rq - Add a trace for a request oriented action
- * @q:         queue the io is for
- * @rq:                the source request
- * @what:      the action
- *
- * Description:
- *     Records an action against a request. Will log the bio offset + size.
- *
- **/
-static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
-                                   u32 what)
-{
-       struct blk_trace *bt = q->blk_trace;
-       int rw = rq->cmd_flags & 0x03;
-
-       if (likely(!bt))
-               return;
-
-       if (blk_discard_rq(rq))
-               rw |= (1 << BIO_RW_DISCARD);
-
-       if (blk_pc_request(rq)) {
-               what |= BLK_TC_ACT(BLK_TC_PC);
-               __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
-       } else  {
-               what |= BLK_TC_ACT(BLK_TC_FS);
-               __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
-       }
-}
-
-/**
- * blk_add_trace_bio - Add a trace for a bio oriented action
- * @q:         queue the io is for
- * @bio:       the source bio
- * @what:      the action
- *
- * Description:
- *     Records an action against a bio. Will log the bio offset + size.
- *
- **/
-static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
-                                    u32 what)
-{
-       struct blk_trace *bt = q->blk_trace;
-
-       if (likely(!bt))
-               return;
-
-       __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
-}
-
-/**
- * blk_add_trace_generic - Add a trace for a generic action
- * @q:         queue the io is for
- * @bio:       the source bio
- * @rw:                the data direction
- * @what:      the action
- *
- * Description:
- *     Records a simple trace
- *
- **/
-static inline void blk_add_trace_generic(struct request_queue *q,
-                                        struct bio *bio, int rw, u32 what)
-{
-       struct blk_trace *bt = q->blk_trace;
-
-       if (likely(!bt))
-               return;
-
-       if (bio)
-               blk_add_trace_bio(q, bio, what);
-       else
-               __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
-}
-
-/**
- * blk_add_trace_pdu_int - Add a trace for a bio with an integer payload
- * @q:         queue the io is for
- * @what:      the action
- * @bio:       the source bio
- * @pdu:       the integer payload
- *
- * Description:
- *     Adds a trace with some integer payload. This might be an unplug
- *     option given as the action, with the depth at unplug time given
- *     as the payload
- *
- **/
-static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what,
-                                        struct bio *bio, unsigned int pdu)
-{
-       struct blk_trace *bt = q->blk_trace;
-       __be64 rpdu = cpu_to_be64(pdu);
-
-       if (likely(!bt))
-               return;
-
-       if (bio)
-               __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu);
-       else
-               __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
-}
-
-/**
- * blk_add_trace_remap - Add a trace for a remap operation
- * @q:         queue the io is for
- * @bio:       the source bio
- * @dev:       target device
- * @from:      source sector
- * @to:                target sector
- *
- * Description:
- *     Device mapper or raid target sometimes need to split a bio because
- *     it spans a stripe (or similar). Add a trace for that action.
- *
- **/
-static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
-                                      dev_t dev, sector_t from, sector_t to)
-{
-       struct blk_trace *bt = q->blk_trace;
-       struct blk_io_trace_remap r;
-
-       if (likely(!bt))
-               return;
-
-       r.device = cpu_to_be32(dev);
-       r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
-       r.sector = cpu_to_be64(to);
-
-       __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
-}
-
-/**
- * blk_add_driver_data - Add binary message with driver-specific data
- * @q:         queue the io is for
- * @rq:                io request
- * @data:      driver-specific data
- * @len:       length of driver-specific data
- *
- * Description:
- *     Some drivers might want to write driver-specific data per request.
- *
- **/
-static inline void blk_add_driver_data(struct request_queue *q,
-                                      struct request *rq,
-                                      void *data, size_t len)
-{
-       struct blk_trace *bt = q->blk_trace;
-
-       if (likely(!bt))
-               return;
-
-       if (blk_pc_request(rq))
-               __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
-                               rq->errors, len, data);
-       else
-               __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
-                               0, BLK_TA_DRV_DATA, rq->errors, len, data);
-}
-
+extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
+                               void *data, size_t len);
 extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
                           char __user *arg);
 extern int blk_trace_startstop(struct request_queue *q, int start);
@@ -356,13 +195,8 @@ extern int blk_trace_remove(struct request_queue *q);
 #else /* !CONFIG_BLK_DEV_IO_TRACE */
 #define blk_trace_ioctl(bdev, cmd, arg)                (-ENOTTY)
 #define blk_trace_shutdown(q)                  do { } while (0)
-#define blk_add_trace_rq(q, rq, what)          do { } while (0)
-#define blk_add_trace_bio(q, rq, what)         do { } while (0)
-#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
-#define blk_add_trace_pdu_int(q, what, bio, pdu)       do { } while (0)
-#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0)
-#define blk_add_driver_data(q, rq, data, len)  do {} while (0)
 #define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY)
+#define blk_add_driver_data(q, rq, data, len)  do {} while (0)
 #define blk_trace_setup(q, name, dev, arg)     (-ENOTTY)
 #define blk_trace_startstop(q, start)          (-ENOTTY)
 #define blk_trace_remove(q)                    (-ENOTTY)
index 98115d9d04daa6c8008b528bee3014a8cee11078..ea7c6be354b7d96b636f72e1580a85188b370db6 100644 (file)
@@ -59,8 +59,88 @@ extern void __chk_io_ptr(const volatile void __iomem *);
  * specific implementations come from the above header files
  */
 
-#define likely(x)      __builtin_expect(!!(x), 1)
-#define unlikely(x)    __builtin_expect(!!(x), 0)
+struct ftrace_branch_data {
+       const char *func;
+       const char *file;
+       unsigned line;
+       union {
+               struct {
+                       unsigned long correct;
+                       unsigned long incorrect;
+               };
+               struct {
+                       unsigned long miss;
+                       unsigned long hit;
+               };
+       };
+};
+
+/*
+ * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
+ * to disable branch tracing on a per file basis.
+ */
+#if defined(CONFIG_TRACE_BRANCH_PROFILING) && !defined(DISABLE_BRANCH_PROFILING)
+void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+
+#define likely_notrace(x)      __builtin_expect(!!(x), 1)
+#define unlikely_notrace(x)    __builtin_expect(!!(x), 0)
+
+#define __branch_check__(x, expect) ({                                 \
+                       int ______r;                                    \
+                       static struct ftrace_branch_data                \
+                               __attribute__((__aligned__(4)))         \
+                               __attribute__((section("_ftrace_annotated_branch"))) \
+                               ______f = {                             \
+                               .func = __func__,                       \
+                               .file = __FILE__,                       \
+                               .line = __LINE__,                       \
+                       };                                              \
+                       ______r = likely_notrace(x);                    \
+                       ftrace_likely_update(&______f, ______r, expect); \
+                       ______r;                                        \
+               })
+
+/*
+ * Using __builtin_constant_p(x) to ignore cases where the return
+ * value is always the same.  This idea is taken from a similar patch
+ * written by Daniel Walker.
+ */
+# ifndef likely
+#  define likely(x)    (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
+# endif
+# ifndef unlikely
+#  define unlikely(x)  (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
+# endif
+
+#ifdef CONFIG_PROFILE_ALL_BRANCHES
+/*
+ * "Define 'is'", Bill Clinton
+ * "Define 'if'", Steven Rostedt
+ */
+#define if(cond) if (__builtin_constant_p((cond)) ? !!(cond) :         \
+       ({                                                              \
+               int ______r;                                            \
+               static struct ftrace_branch_data                        \
+                       __attribute__((__aligned__(4)))                 \
+                       __attribute__((section("_ftrace_branch")))      \
+                       ______f = {                                     \
+                               .func = __func__,                       \
+                               .file = __FILE__,                       \
+                               .line = __LINE__,                       \
+                       };                                              \
+               ______r = !!(cond);                                     \
+               if (______r)                                            \
+                       ______f.hit++;                                  \
+               else                                                    \
+                       ______f.miss++;                                 \
+               ______r;                                                \
+       }))
+#endif /* CONFIG_PROFILE_ALL_BRANCHES */
+
+#else
+# define likely(x)     __builtin_expect(!!(x), 1)
+# define unlikely(x)   __builtin_expect(!!(x), 0)
+#endif
 
 /* Optimization barrier */
 #ifndef barrier
index 703eb53cfa2b2a1512b7ce97d9c1da218ad9ac0f..afba918c623c5a2ab86e4ac8363e1b24e9c95c3f 100644 (file)
@@ -23,6 +23,45 @@ struct ftrace_ops {
        struct ftrace_ops *next;
 };
 
+extern int function_trace_stop;
+
+/*
+ * Type of the current tracing.
+ */
+enum ftrace_tracing_type_t {
+       FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
+       FTRACE_TYPE_RETURN,     /* Hook the return of the function */
+};
+
+/* Current tracing type, default is FTRACE_TYPE_ENTER */
+extern enum ftrace_tracing_type_t ftrace_tracing_type;
+
+/**
+ * ftrace_stop - stop function tracer.
+ *
+ * A quick way to stop the function tracer. Note this an on off switch,
+ * it is not something that is recursive like preempt_disable.
+ * This does not disable the calling of mcount, it only stops the
+ * calling of functions from mcount.
+ */
+static inline void ftrace_stop(void)
+{
+       function_trace_stop = 1;
+}
+
+/**
+ * ftrace_start - start the function tracer.
+ *
+ * This function is the inverse of ftrace_stop. This does not enable
+ * the function tracing if the function tracer is disabled. This only
+ * sets the function tracer flag to continue calling the functions
+ * from mcount.
+ */
+static inline void ftrace_start(void)
+{
+       function_trace_stop = 0;
+}
+
 /*
  * The ftrace_ops must be a static and should also
  * be read_mostly.  These functions do modify read_mostly variables
@@ -41,9 +80,13 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1);
 # define unregister_ftrace_function(ops) do { } while (0)
 # define clear_ftrace_function(ops) do { } while (0)
 static inline void ftrace_kill(void) { }
+static inline void ftrace_stop(void) { }
+static inline void ftrace_start(void) { }
 #endif /* CONFIG_FUNCTION_TRACER */
 
 #ifdef CONFIG_DYNAMIC_FTRACE
+/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */
+#include <asm/ftrace.h>
 
 enum {
        FTRACE_FL_FREE          = (1 << 0),
@@ -59,6 +102,7 @@ struct dyn_ftrace {
        struct list_head        list;
        unsigned long           ip; /* address of mcount call-site */
        unsigned long           flags;
+       struct dyn_arch_ftrace  arch;
 };
 
 int ftrace_force_update(void);
@@ -66,19 +110,48 @@ void ftrace_set_filter(unsigned char *buf, int len, int reset);
 
 /* defined in arch */
 extern int ftrace_ip_converted(unsigned long ip);
-extern unsigned char *ftrace_nop_replace(void);
-extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr);
 extern int ftrace_dyn_arch_init(void *data);
 extern int ftrace_update_ftrace_func(ftrace_func_t func);
 extern void ftrace_caller(void);
 extern void ftrace_call(void);
 extern void mcount_call(void);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+extern void ftrace_graph_caller(void);
+extern int ftrace_enable_ftrace_graph_caller(void);
+extern int ftrace_disable_ftrace_graph_caller(void);
+#else
+static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
+static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
+#endif
+
+/**
+ * ftrace_make_nop - convert code into top
+ * @mod: module structure if called by module load initialization
+ * @rec: the mcount call site record
+ * @addr: the address that the call site should be calling
+ *
+ * This is a very sensitive operation and great care needs
+ * to be taken by the arch.  The operation should carefully
+ * read the location, check to see if what is read is indeed
+ * what we expect it to be, and then on success of the compare,
+ * it should write to the location.
+ *
+ * The code segment at @rec->ip should be a caller to @addr
+ *
+ * Return must be:
+ *  0 on success
+ *  -EFAULT on error reading the location
+ *  -EINVAL on a failed compare of the contents
+ *  -EPERM  on error writing to the location
+ * Any other value will be considered a failure.
+ */
+extern int ftrace_make_nop(struct module *mod,
+                          struct dyn_ftrace *rec, unsigned long addr);
 
 /**
- * ftrace_modify_code - modify code segment
- * @ip: the address of the code segment
- * @old_code: the contents of what is expected to be there
- * @new_code: the code to patch in
+ * ftrace_make_call - convert a nop call site into a call to addr
+ * @rec: the mcount call site record
+ * @addr: the address that the call site should call
  *
  * This is a very sensitive operation and great care needs
  * to be taken by the arch.  The operation should carefully
@@ -86,6 +159,8 @@ extern void mcount_call(void);
  * what we expect it to be, and then on success of the compare,
  * it should write to the location.
  *
+ * The code segment at @rec->ip should be a nop
+ *
  * Return must be:
  *  0 on success
  *  -EFAULT on error reading the location
@@ -93,8 +168,11 @@ extern void mcount_call(void);
  *  -EPERM  on error writing to the location
  * Any other value will be considered a failure.
  */
-extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
-                             unsigned char *new_code);
+extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
+
+
+/* May be defined in arch */
+extern int ftrace_arch_read_dyn_info(char *buf, int size);
 
 extern int skip_trace(unsigned long ip);
 
@@ -102,7 +180,6 @@ extern void ftrace_release(void *start, unsigned long size);
 
 extern void ftrace_disable_daemon(void);
 extern void ftrace_enable_daemon(void);
-
 #else
 # define skip_trace(ip)                                ({ 0; })
 # define ftrace_force_update()                 ({ 0; })
@@ -181,6 +258,12 @@ static inline void __ftrace_enabled_restore(int enabled)
 #endif
 
 #ifdef CONFIG_TRACING
+extern int ftrace_dump_on_oops;
+
+extern void tracing_start(void);
+extern void tracing_stop(void);
+extern void ftrace_off_permanent(void);
+
 extern void
 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
 
@@ -211,6 +294,9 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
 static inline int
 ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0)));
 
+static inline void tracing_start(void) { }
+static inline void tracing_stop(void) { }
+static inline void ftrace_off_permanent(void) { }
 static inline int
 ftrace_printk(const char *fmt, ...)
 {
@@ -221,33 +307,86 @@ static inline void ftrace_dump(void) { }
 
 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
 extern void ftrace_init(void);
-extern void ftrace_init_module(unsigned long *start, unsigned long *end);
+extern void ftrace_init_module(struct module *mod,
+                              unsigned long *start, unsigned long *end);
 #else
 static inline void ftrace_init(void) { }
 static inline void
-ftrace_init_module(unsigned long *start, unsigned long *end) { }
+ftrace_init_module(struct module *mod,
+                  unsigned long *start, unsigned long *end) { }
 #endif
 
+enum {
+       POWER_NONE = 0,
+       POWER_CSTATE = 1,
+       POWER_PSTATE = 2,
+};
 
-struct boot_trace {
-       pid_t                   caller;
-       char                    func[KSYM_NAME_LEN];
-       int                     result;
-       unsigned long long      duration;               /* usecs */
-       ktime_t                 calltime;
-       ktime_t                 rettime;
+struct power_trace {
+#ifdef CONFIG_POWER_TRACER
+       ktime_t                 stamp;
+       ktime_t                 end;
+       int                     type;
+       int                     state;
+#endif
 };
 
-#ifdef CONFIG_BOOT_TRACER
-extern void trace_boot(struct boot_trace *it, initcall_t fn);
-extern void start_boot_trace(void);
-extern void stop_boot_trace(void);
+#ifdef CONFIG_POWER_TRACER
+extern void trace_power_start(struct power_trace *it, unsigned int type,
+                                       unsigned int state);
+extern void trace_power_mark(struct power_trace *it, unsigned int type,
+                                       unsigned int state);
+extern void trace_power_end(struct power_trace *it);
 #else
-static inline void trace_boot(struct boot_trace *it, initcall_t fn) { }
-static inline void start_boot_trace(void) { }
-static inline void stop_boot_trace(void) { }
+static inline void trace_power_start(struct power_trace *it, unsigned int type,
+                                       unsigned int state) { }
+static inline void trace_power_mark(struct power_trace *it, unsigned int type,
+                                       unsigned int state) { }
+static inline void trace_power_end(struct power_trace *it) { }
 #endif
 
 
+/*
+ * Structure that defines an entry function trace.
+ */
+struct ftrace_graph_ent {
+       unsigned long func; /* Current function */
+       int depth;
+};
+
+/*
+ * Structure that defines a return function trace.
+ */
+struct ftrace_graph_ret {
+       unsigned long func; /* Current function */
+       unsigned long long calltime;
+       unsigned long long rettime;
+       /* Number of functions that overran the depth limit for current task */
+       unsigned long overrun;
+       int depth;
+};
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#define FTRACE_RETFUNC_DEPTH 50
+#define FTRACE_RETSTACK_ALLOC_SIZE 32
+/* Type of the callback handlers for tracing function graph*/
+typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
+typedef void (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
+
+extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+                               trace_func_graph_ent_t entryfunc);
+
+/* The current handlers in use */
+extern trace_func_graph_ret_t ftrace_graph_return;
+extern trace_func_graph_ent_t ftrace_graph_entry;
+
+extern void unregister_ftrace_graph(void);
+
+extern void ftrace_graph_init_task(struct task_struct *t);
+extern void ftrace_graph_exit_task(struct task_struct *t);
+#else
+static inline void ftrace_graph_init_task(struct task_struct *t) { }
+static inline void ftrace_graph_exit_task(struct task_struct *t) { }
+#endif
 
 #endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
new file mode 100644 (file)
index 0000000..366a054
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef _LINUX_FTRACE_IRQ_H
+#define _LINUX_FTRACE_IRQ_H
+
+
+#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
+extern void ftrace_nmi_enter(void);
+extern void ftrace_nmi_exit(void);
+#else
+static inline void ftrace_nmi_enter(void) { }
+static inline void ftrace_nmi_exit(void) { }
+#endif
+
+#endif /* _LINUX_FTRACE_IRQ_H */
index 181006cc94a03ecd29630d80d91762589fc1c316..89a56d79e4c6c4987531a10ad8fed17f3d597bf7 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/preempt.h>
 #include <linux/smp_lock.h>
 #include <linux/lockdep.h>
+#include <linux/ftrace_irq.h>
 #include <asm/hardirq.h>
 #include <asm/system.h>
 
@@ -161,7 +162,17 @@ extern void irq_enter(void);
  */
 extern void irq_exit(void);
 
-#define nmi_enter()            do { lockdep_off(); __irq_enter(); } while (0)
-#define nmi_exit()             do { __irq_exit(); lockdep_on(); } while (0)
+#define nmi_enter()                            \
+       do {                                    \
+               ftrace_nmi_enter();             \
+               lockdep_off();                  \
+               __irq_enter();                  \
+       } while (0)
+#define nmi_exit()                             \
+       do {                                    \
+               __irq_exit();                   \
+               lockdep_on();                   \
+               ftrace_nmi_exit();              \
+       } while (0)
 
 #endif /* LINUX_HARDIRQ_H */
index 889196c7fbb1e77cc5b4561e2b0b7f937b864434..b85e74ca782ff1fb026732717ffa8db5d688a1d9 100644 (file)
@@ -12,6 +12,7 @@
  * See the file COPYING for more details.
  */
 
+#include <stdarg.h>
 #include <linux/types.h>
 
 struct module;
@@ -48,10 +49,28 @@ struct marker {
        void (*call)(const struct marker *mdata, void *call_private, ...);
        struct marker_probe_closure single;
        struct marker_probe_closure *multi;
+       const char *tp_name;    /* Optional tracepoint name */
+       void *tp_cb;            /* Optional tracepoint callback */
 } __attribute__((aligned(8)));
 
 #ifdef CONFIG_MARKERS
 
+#define _DEFINE_MARKER(name, tp_name_str, tp_cb, format)               \
+               static const char __mstrtab_##name[]                    \
+               __attribute__((section("__markers_strings")))           \
+               = #name "\0" format;                                    \
+               static struct marker __mark_##name                      \
+               __attribute__((section("__markers"), aligned(8))) =     \
+               { __mstrtab_##name, &__mstrtab_##name[sizeof(#name)],   \
+                 0, 0, marker_probe_cb, { __mark_empty_function, NULL},\
+                 NULL, tp_name_str, tp_cb }
+
+#define DEFINE_MARKER(name, format)                                    \
+               _DEFINE_MARKER(name, NULL, NULL, format)
+
+#define DEFINE_MARKER_TP(name, tp_name, tp_cb, format)                 \
+               _DEFINE_MARKER(name, #tp_name, tp_cb, format)
+
 /*
  * Note : the empty asm volatile with read constraint is used here instead of a
  * "used" attribute to fix a gcc 4.1.x bug.
@@ -65,14 +84,7 @@ struct marker {
  */
 #define __trace_mark(generic, name, call_private, format, args...)     \
        do {                                                            \
-               static const char __mstrtab_##name[]                    \
-               __attribute__((section("__markers_strings")))           \
-               = #name "\0" format;                                    \
-               static struct marker __mark_##name                      \
-               __attribute__((section("__markers"), aligned(8))) =     \
-               { __mstrtab_##name, &__mstrtab_##name[sizeof(#name)],   \
-               0, 0, marker_probe_cb,                                  \
-               { __mark_empty_function, NULL}, NULL };                 \
+               DEFINE_MARKER(name, format);                            \
                __mark_check_format(format, ## args);                   \
                if (unlikely(__mark_##name.state)) {                    \
                        (*__mark_##name.call)                           \
@@ -80,14 +92,39 @@ struct marker {
                }                                                       \
        } while (0)
 
+#define __trace_mark_tp(name, call_private, tp_name, tp_cb, format, args...) \
+       do {                                                            \
+               void __check_tp_type(void)                              \
+               {                                                       \
+                       register_trace_##tp_name(tp_cb);                \
+               }                                                       \
+               DEFINE_MARKER_TP(name, tp_name, tp_cb, format);         \
+               __mark_check_format(format, ## args);                   \
+               (*__mark_##name.call)(&__mark_##name, call_private,     \
+                                       ## args);                       \
+       } while (0)
+
 extern void marker_update_probe_range(struct marker *begin,
        struct marker *end);
+
+#define GET_MARKER(name)       (__mark_##name)
+
 #else /* !CONFIG_MARKERS */
+#define DEFINE_MARKER(name, tp_name, tp_cb, format)
 #define __trace_mark(generic, name, call_private, format, args...) \
                __mark_check_format(format, ## args)
+#define __trace_mark_tp(name, call_private, tp_name, tp_cb, format, args...) \
+       do {                                                            \
+               void __check_tp_type(void)                              \
+               {                                                       \
+                       register_trace_##tp_name(tp_cb);                \
+               }                                                       \
+               __mark_check_format(format, ## args);                   \
+       } while (0)
 static inline void marker_update_probe_range(struct marker *begin,
        struct marker *end)
 { }
+#define GET_MARKER(name)
 #endif /* CONFIG_MARKERS */
 
 /**
@@ -116,6 +153,20 @@ static inline void marker_update_probe_range(struct marker *begin,
 #define _trace_mark(name, format, args...) \
        __trace_mark(1, name, NULL, format, ## args)
 
+/**
+ * trace_mark_tp - Marker in a tracepoint callback
+ * @name: marker name, not quoted.
+ * @tp_name: tracepoint name, not quoted.
+ * @tp_cb: tracepoint callback. Should have an associated global symbol so it
+ *         is not optimized away by the compiler (should not be static).
+ * @format: format string
+ * @args...: variable argument list
+ *
+ * Places a marker in a tracepoint callback.
+ */
+#define trace_mark_tp(name, tp_name, tp_cb, format, args...)   \
+       __trace_mark_tp(name, NULL, tp_name, tp_cb, format, ## args)
+
 /**
  * MARK_NOARGS - Format string for a marker with no argument.
  */
@@ -136,8 +187,6 @@ extern marker_probe_func __mark_empty_function;
 
 extern void marker_probe_cb(const struct marker *mdata,
        void *call_private, ...);
-extern void marker_probe_cb_noarg(const struct marker *mdata,
-       void *call_private, ...);
 
 /*
  * Connect a probe to a marker.
@@ -162,8 +211,10 @@ extern void *marker_get_private_data(const char *name, marker_probe_func *probe,
 
 /*
  * marker_synchronize_unregister must be called between the last marker probe
- * unregistration and the end of module exit to make sure there is no caller
- * executing a probe when it is freed.
+ * unregistration and the first one of
+ * - the end of module exit function
+ * - the free of any resource used by the probes
+ * to ensure the code and data are valid for any possibly running probes.
  */
 #define marker_synchronize_unregister() synchronize_sched()
 
index 86f1f5e43e333766ec6a9fe5276875046c2f2526..895dc9c1088c767ce4706814b806c20248423241 100644 (file)
@@ -142,6 +142,7 @@ struct rcu_head {
  * on the write-side to insure proper synchronization.
  */
 #define rcu_read_lock_sched() preempt_disable()
+#define rcu_read_lock_sched_notrace() preempt_disable_notrace()
 
 /*
  * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
@@ -149,6 +150,7 @@ struct rcu_head {
  * See rcu_read_lock_sched for more information.
  */
 #define rcu_read_unlock_sched() preempt_enable()
+#define rcu_read_unlock_sched_notrace() preempt_enable_notrace()
 
 
 
index e097c2e6b6dcaa212f26bf6b08b66e4c9021cb21..3bb87a753fa387aa0d105c6cdef45975bd99c9a1 100644 (file)
@@ -122,6 +122,7 @@ void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
 
 void tracing_on(void);
 void tracing_off(void);
+void tracing_off_permanent(void);
 
 enum ring_buffer_flags {
        RB_FL_OVERWRITE         = 1 << 0,
index 55e30d11447790dd433d0e874285ced3a5499328..2d0a93c3122837ea55d8a51dd4113313a0181629 100644 (file)
@@ -96,6 +96,7 @@ struct exec_domain;
 struct futex_pi_state;
 struct robust_list_head;
 struct bio;
+struct bts_tracer;
 
 /*
  * List of flags we want to share for kernel threads,
@@ -1165,6 +1166,18 @@ struct task_struct {
        struct list_head ptraced;
        struct list_head ptrace_entry;
 
+#ifdef CONFIG_X86_PTRACE_BTS
+       /*
+        * This is the tracer handle for the ptrace BTS extension.
+        * This field actually belongs to the ptracer task.
+        */
+       struct bts_tracer *bts;
+       /*
+        * The buffer to hold the BTS data.
+        */
+       void *bts_buffer;
+#endif /* CONFIG_X86_PTRACE_BTS */
+
        /* PID/PID hash table linkage. */
        struct pid_link pids[PIDTYPE_MAX];
        struct list_head thread_group;
@@ -1356,6 +1369,17 @@ struct task_struct {
        unsigned long default_timer_slack_ns;
 
        struct list_head        *scm_work_list;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       /* Index of current stored adress in ret_stack */
+       int curr_ret_stack;
+       /* Stack of return addresses for return function tracing */
+       struct ftrace_ret_stack *ret_stack;
+       /*
+        * Number of functions that haven't been traced
+        * because of depth overrun.
+        */
+       atomic_t trace_overrun;
+#endif
 };
 
 /*
index dc50bcc282a888ba1611a5485c53467d4a2ab953..b3dfa72f13b902bd48275ac923b551f0375d7e0f 100644 (file)
@@ -34,6 +34,7 @@ struct seq_operations {
 
 #define SEQ_SKIP 1
 
+char *mangle_path(char *s, char *p, char *esc);
 int seq_open(struct file *, const struct seq_operations *);
 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
 loff_t seq_lseek(struct file *, loff_t, int);
index b106fd8e0d5c4298d6495e1d48cefdd81d21910d..1a8cecc4f38cd2b91a90f267a54fd39bb4de196b 100644 (file)
@@ -15,9 +15,17 @@ extern void save_stack_trace_tsk(struct task_struct *tsk,
                                struct stack_trace *trace);
 
 extern void print_stack_trace(struct stack_trace *trace, int spaces);
+
+#ifdef CONFIG_USER_STACKTRACE_SUPPORT
+extern void save_stack_trace_user(struct stack_trace *trace);
+#else
+# define save_stack_trace_user(trace)              do { } while (0)
+#endif
+
 #else
 # define save_stack_trace(trace)                       do { } while (0)
 # define save_stack_trace_tsk(tsk, trace)              do { } while (0)
+# define save_stack_trace_user(trace)                  do { } while (0)
 # define print_stack_trace(trace, spaces)              do { } while (0)
 #endif
 
index c5bb39c7a7703cdb7db04faf0e2b0ce2086aa19d..757005458366edc2ae54939f2798d35709f48602 100644 (file)
@@ -24,8 +24,12 @@ struct tracepoint {
        const char *name;               /* Tracepoint name */
        int state;                      /* State. */
        void **funcs;
-} __attribute__((aligned(8)));
-
+} __attribute__((aligned(32)));                /*
+                                        * Aligned on 32 bytes because it is
+                                        * globally visible and gcc happily
+                                        * align these on the structure size.
+                                        * Keep in sync with vmlinux.lds.h.
+                                        */
 
 #define TPPROTO(args...)       args
 #define TPARGS(args...)                args
@@ -40,14 +44,14 @@ struct tracepoint {
        do {                                                            \
                void **it_func;                                         \
                                                                        \
-               rcu_read_lock_sched();                                  \
+               rcu_read_lock_sched_notrace();                          \
                it_func = rcu_dereference((tp)->funcs);                 \
                if (it_func) {                                          \
                        do {                                            \
                                ((void(*)(proto))(*it_func))(args);     \
                        } while (*(++it_func));                         \
                }                                                       \
-               rcu_read_unlock_sched();                                \
+               rcu_read_unlock_sched_notrace();                        \
        } while (0)
 
 /*
@@ -55,35 +59,40 @@ struct tracepoint {
  * not add unwanted padding between the beginning of the section and the
  * structure. Force alignment to the same alignment as the section start.
  */
-#define DEFINE_TRACE(name, proto, args)                                        \
+#define DECLARE_TRACE(name, proto, args)                               \
+       extern struct tracepoint __tracepoint_##name;                   \
        static inline void trace_##name(proto)                          \
        {                                                               \
-               static const char __tpstrtab_##name[]                   \
-               __attribute__((section("__tracepoints_strings")))       \
-               = #name ":" #proto;                                     \
-               static struct tracepoint __tracepoint_##name            \
-               __attribute__((section("__tracepoints"), aligned(8))) = \
-               { __tpstrtab_##name, 0, NULL };                         \
                if (unlikely(__tracepoint_##name.state))                \
                        __DO_TRACE(&__tracepoint_##name,                \
                                TPPROTO(proto), TPARGS(args));          \
        }                                                               \
        static inline int register_trace_##name(void (*probe)(proto))   \
        {                                                               \
-               return tracepoint_probe_register(#name ":" #proto,      \
-                       (void *)probe);                                 \
+               return tracepoint_probe_register(#name, (void *)probe); \
        }                                                               \
-       static inline void unregister_trace_##name(void (*probe)(proto))\
+       static inline int unregister_trace_##name(void (*probe)(proto)) \
        {                                                               \
-               tracepoint_probe_unregister(#name ":" #proto,           \
-                       (void *)probe);                                 \
+               return tracepoint_probe_unregister(#name, (void *)probe);\
        }
 
+#define DEFINE_TRACE(name)                                             \
+       static const char __tpstrtab_##name[]                           \
+       __attribute__((section("__tracepoints_strings"))) = #name;      \
+       struct tracepoint __tracepoint_##name                           \
+       __attribute__((section("__tracepoints"), aligned(32))) =        \
+               { __tpstrtab_##name, 0, NULL }
+
+#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)                             \
+       EXPORT_SYMBOL_GPL(__tracepoint_##name)
+#define EXPORT_TRACEPOINT_SYMBOL(name)                                 \
+       EXPORT_SYMBOL(__tracepoint_##name)
+
 extern void tracepoint_update_probe_range(struct tracepoint *begin,
        struct tracepoint *end);
 
 #else /* !CONFIG_TRACEPOINTS */
-#define DEFINE_TRACE(name, proto, args)                        \
+#define DECLARE_TRACE(name, proto, args)                               \
        static inline void _do_trace_##name(struct tracepoint *tp, proto) \
        { }                                                             \
        static inline void trace_##name(proto)                          \
@@ -92,8 +101,14 @@ extern void tracepoint_update_probe_range(struct tracepoint *begin,
        {                                                               \
                return -ENOSYS;                                         \
        }                                                               \
-       static inline void unregister_trace_##name(void (*probe)(proto))\
-       { }
+       static inline int unregister_trace_##name(void (*probe)(proto)) \
+       {                                                               \
+               return -ENOSYS;                                         \
+       }
+
+#define DEFINE_TRACE(name)
+#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
+#define EXPORT_TRACEPOINT_SYMBOL(name)
 
 static inline void tracepoint_update_probe_range(struct tracepoint *begin,
        struct tracepoint *end)
@@ -112,6 +127,10 @@ extern int tracepoint_probe_register(const char *name, void *probe);
  */
 extern int tracepoint_probe_unregister(const char *name, void *probe);
 
+extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
+extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
+extern void tracepoint_probe_update_all(void);
+
 struct tracepoint_iter {
        struct module *module;
        struct tracepoint *tracepoint;
index 3b8121d4e36ff90aff5c8f7fa9fe3865be0baacc..eaec37c9d83d0a8dbbb87f7f46f1e0557ce00394 100644 (file)
@@ -325,7 +325,7 @@ extern struct class *tty_class;
  *     go away
  */
 
-extern inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
+static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
 {
        if (tty)
                kref_get(&tty->kref);
diff --git a/include/trace/block.h b/include/trace/block.h
new file mode 100644 (file)
index 0000000..25c6a1f
--- /dev/null
@@ -0,0 +1,76 @@
+#ifndef _TRACE_BLOCK_H
+#define _TRACE_BLOCK_H
+
+#include <linux/blkdev.h>
+#include <linux/tracepoint.h>
+
+DECLARE_TRACE(block_rq_abort,
+       TPPROTO(struct request_queue *q, struct request *rq),
+               TPARGS(q, rq));
+
+DECLARE_TRACE(block_rq_insert,
+       TPPROTO(struct request_queue *q, struct request *rq),
+               TPARGS(q, rq));
+
+DECLARE_TRACE(block_rq_issue,
+       TPPROTO(struct request_queue *q, struct request *rq),
+               TPARGS(q, rq));
+
+DECLARE_TRACE(block_rq_requeue,
+       TPPROTO(struct request_queue *q, struct request *rq),
+               TPARGS(q, rq));
+
+DECLARE_TRACE(block_rq_complete,
+       TPPROTO(struct request_queue *q, struct request *rq),
+               TPARGS(q, rq));
+
+DECLARE_TRACE(block_bio_bounce,
+       TPPROTO(struct request_queue *q, struct bio *bio),
+               TPARGS(q, bio));
+
+DECLARE_TRACE(block_bio_complete,
+       TPPROTO(struct request_queue *q, struct bio *bio),
+               TPARGS(q, bio));
+
+DECLARE_TRACE(block_bio_backmerge,
+       TPPROTO(struct request_queue *q, struct bio *bio),
+               TPARGS(q, bio));
+
+DECLARE_TRACE(block_bio_frontmerge,
+       TPPROTO(struct request_queue *q, struct bio *bio),
+               TPARGS(q, bio));
+
+DECLARE_TRACE(block_bio_queue,
+       TPPROTO(struct request_queue *q, struct bio *bio),
+               TPARGS(q, bio));
+
+DECLARE_TRACE(block_getrq,
+       TPPROTO(struct request_queue *q, struct bio *bio, int rw),
+               TPARGS(q, bio, rw));
+
+DECLARE_TRACE(block_sleeprq,
+       TPPROTO(struct request_queue *q, struct bio *bio, int rw),
+               TPARGS(q, bio, rw));
+
+DECLARE_TRACE(block_plug,
+       TPPROTO(struct request_queue *q),
+               TPARGS(q));
+
+DECLARE_TRACE(block_unplug_timer,
+       TPPROTO(struct request_queue *q),
+               TPARGS(q));
+
+DECLARE_TRACE(block_unplug_io,
+       TPPROTO(struct request_queue *q),
+               TPARGS(q));
+
+DECLARE_TRACE(block_split,
+       TPPROTO(struct request_queue *q, struct bio *bio, unsigned int pdu),
+               TPARGS(q, bio, pdu));
+
+DECLARE_TRACE(block_remap,
+       TPPROTO(struct request_queue *q, struct bio *bio, dev_t dev,
+               sector_t from, sector_t to),
+               TPARGS(q, bio, dev, from, to));
+
+#endif
diff --git a/include/trace/boot.h b/include/trace/boot.h
new file mode 100644 (file)
index 0000000..6b54537
--- /dev/null
@@ -0,0 +1,56 @@
+#ifndef _LINUX_TRACE_BOOT_H
+#define _LINUX_TRACE_BOOT_H
+
+/*
+ * Structure which defines the trace of an initcall
+ * while it is called.
+ * You don't have to fill the func field since it is
+ * only used internally by the tracer.
+ */
+struct boot_trace_call {
+       pid_t                   caller;
+       char                    func[KSYM_NAME_LEN];
+};
+
+/*
+ * Structure which defines the trace of an initcall
+ * while it returns.
+ */
+struct boot_trace_ret {
+       char                    func[KSYM_NAME_LEN];
+       int                             result;
+       unsigned long long      duration;               /* nsecs */
+};
+
+#ifdef CONFIG_BOOT_TRACER
+/* Append the traces on the ring-buffer */
+extern void trace_boot_call(struct boot_trace_call *bt, initcall_t fn);
+extern void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn);
+
+/* Tells the tracer that smp_pre_initcall is finished.
+ * So we can start the tracing
+ */
+extern void start_boot_trace(void);
+
+/* Resume the tracing of other necessary events
+ * such as sched switches
+ */
+extern void enable_boot_trace(void);
+
+/* Suspend this tracing. Actually, only sched_switches tracing have
+ * to be suspended. Initcalls doesn't need it.)
+ */
+extern void disable_boot_trace(void);
+#else
+static inline
+void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) { }
+
+static inline
+void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) { }
+
+static inline void start_boot_trace(void) { }
+static inline void enable_boot_trace(void) { }
+static inline void disable_boot_trace(void) { }
+#endif /* CONFIG_BOOT_TRACER */
+
+#endif /* __LINUX_TRACE_BOOT_H */
index ad47369d01b5957fbc22322482c530ef28e25a33..9b2854abf7e2fcb732452cfdc03261aa5edfe478 100644 (file)
@@ -4,52 +4,52 @@
 #include <linux/sched.h>
 #include <linux/tracepoint.h>
 
-DEFINE_TRACE(sched_kthread_stop,
+DECLARE_TRACE(sched_kthread_stop,
        TPPROTO(struct task_struct *t),
                TPARGS(t));
 
-DEFINE_TRACE(sched_kthread_stop_ret,
+DECLARE_TRACE(sched_kthread_stop_ret,
        TPPROTO(int ret),
                TPARGS(ret));
 
-DEFINE_TRACE(sched_wait_task,
+DECLARE_TRACE(sched_wait_task,
        TPPROTO(struct rq *rq, struct task_struct *p),
                TPARGS(rq, p));
 
-DEFINE_TRACE(sched_wakeup,
+DECLARE_TRACE(sched_wakeup,
        TPPROTO(struct rq *rq, struct task_struct *p),
                TPARGS(rq, p));
 
-DEFINE_TRACE(sched_wakeup_new,
+DECLARE_TRACE(sched_wakeup_new,
        TPPROTO(struct rq *rq, struct task_struct *p),
                TPARGS(rq, p));
 
-DEFINE_TRACE(sched_switch,
+DECLARE_TRACE(sched_switch,
        TPPROTO(struct rq *rq, struct task_struct *prev,
                struct task_struct *next),
                TPARGS(rq, prev, next));
 
-DEFINE_TRACE(sched_migrate_task,
+DECLARE_TRACE(sched_migrate_task,
        TPPROTO(struct rq *rq, struct task_struct *p, int dest_cpu),
                TPARGS(rq, p, dest_cpu));
 
-DEFINE_TRACE(sched_process_free,
+DECLARE_TRACE(sched_process_free,
        TPPROTO(struct task_struct *p),
                TPARGS(p));
 
-DEFINE_TRACE(sched_process_exit,
+DECLARE_TRACE(sched_process_exit,
        TPPROTO(struct task_struct *p),
                TPARGS(p));
 
-DEFINE_TRACE(sched_process_wait,
+DECLARE_TRACE(sched_process_wait,
        TPPROTO(struct pid *pid),
                TPARGS(pid));
 
-DEFINE_TRACE(sched_process_fork,
+DECLARE_TRACE(sched_process_fork,
        TPPROTO(struct task_struct *parent, struct task_struct *child),
                TPARGS(parent, child));
 
-DEFINE_TRACE(sched_signal_send,
+DECLARE_TRACE(sched_signal_send,
        TPPROTO(int sig, struct task_struct *p),
                TPARGS(sig, p));
 
index f763762d544a135a0a06f67f36701b9f0d331140..f291f086caa1c2e89909ffe5f910786a331ff588 100644 (file)
@@ -808,6 +808,7 @@ config TRACEPOINTS
 
 config MARKERS
        bool "Activate markers"
+       depends on TRACEPOINTS
        help
          Place an empty function call at each marker site. Can be
          dynamically changed for a probe function.
index 7e117a231af10313f1b9bd963bf404eecaf94c9e..79213c0785d293adc69165ec37899c2517793046 100644 (file)
@@ -63,6 +63,7 @@
 #include <linux/signal.h>
 #include <linux/idr.h>
 #include <linux/ftrace.h>
+#include <trace/boot.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -703,31 +704,35 @@ core_param(initcall_debug, initcall_debug, bool, 0644);
 int do_one_initcall(initcall_t fn)
 {
        int count = preempt_count();
-       ktime_t delta;
+       ktime_t calltime, delta, rettime;
        char msgbuf[64];
-       struct boot_trace it;
+       struct boot_trace_call call;
+       struct boot_trace_ret ret;
 
        if (initcall_debug) {
-               it.caller = task_pid_nr(current);
-               printk("calling  %pF @ %i\n", fn, it.caller);
-               it.calltime = ktime_get();
+               call.caller = task_pid_nr(current);
+               printk("calling  %pF @ %i\n", fn, call.caller);
+               calltime = ktime_get();
+               trace_boot_call(&call, fn);
+               enable_boot_trace();
        }
 
-       it.result = fn();
+       ret.result = fn();
 
        if (initcall_debug) {
-               it.rettime = ktime_get();
-               delta = ktime_sub(it.rettime, it.calltime);
-               it.duration = (unsigned long long) delta.tv64 >> 10;
+               disable_boot_trace();
+               rettime = ktime_get();
+               delta = ktime_sub(rettime, calltime);
+               ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+               trace_boot_ret(&ret, fn);
                printk("initcall %pF returned %d after %Ld usecs\n", fn,
-                       it.result, it.duration);
-               trace_boot(&it, fn);
+                       ret.result, ret.duration);
        }
 
        msgbuf[0] = 0;
 
-       if (it.result && it.result != -ENODEV && initcall_debug)
-               sprintf(msgbuf, "error code %d ", it.result);
+       if (ret.result && ret.result != -ENODEV && initcall_debug)
+               sprintf(msgbuf, "error code %d ", ret.result);
 
        if (preempt_count() != count) {
                strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
@@ -741,7 +746,7 @@ int do_one_initcall(initcall_t fn)
                printk("initcall %pF returned with %s\n", fn, msgbuf);
        }
 
-       return it.result;
+       return ret.result;
 }
 
 
@@ -882,7 +887,7 @@ static int __init kernel_init(void * unused)
         * we're essentially up and running. Get rid of the
         * initmem segments and start the user-mode stuff..
         */
-       stop_boot_trace();
+
        init_post();
        return 0;
 }
index 19fad003b19d6ac0752597f5a23e18341d1d579a..703cf3b7389cefa72d7dbb311d069a5a90cb5e17 100644 (file)
@@ -21,6 +21,10 @@ CFLAGS_REMOVE_cgroup-debug.o = -pg
 CFLAGS_REMOVE_sched_clock.o = -pg
 CFLAGS_REMOVE_sched.o = -pg
 endif
+ifdef CONFIG_FUNCTION_GRAPH_TRACER
+CFLAGS_REMOVE_extable.o = -pg # For __kernel_text_address()
+CFLAGS_REMOVE_module.o = -pg # For __module_text_address()
+endif
 
 obj-$(CONFIG_FREEZER) += freezer.o
 obj-$(CONFIG_PROFILING) += profile.o
index 2d8be7ebb0f73499f894a1828fd827f0217290f1..e5ae36ebe8afda0efe91c14ad4eec30d313492f0 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/mmu_context.h>
 
+DEFINE_TRACE(sched_process_free);
+DEFINE_TRACE(sched_process_exit);
+DEFINE_TRACE(sched_process_wait);
+
 static void exit_mm(struct task_struct * tsk);
 
 static inline int task_detached(struct task_struct *p)
@@ -1123,7 +1127,6 @@ NORET_TYPE void do_exit(long code)
        preempt_disable();
        /* causes final put_task_struct in finish_task_switch(). */
        tsk->state = TASK_DEAD;
-
        schedule();
        BUG();
        /* Avoid "noreturn function does return".  */
index 2a372a0e206fa2de99dbfdd594f86f6eb927bf40..5f82a999c032105639f8b62850c30b42552e3837 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/mount.h>
 #include <linux/audit.h>
 #include <linux/memcontrol.h>
+#include <linux/ftrace.h>
 #include <linux/profile.h>
 #include <linux/rmap.h>
 #include <linux/acct.h>
@@ -80,6 +81,8 @@ DEFINE_PER_CPU(unsigned long, process_counts) = 0;
 
 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
 
+DEFINE_TRACE(sched_process_fork);
+
 int nr_processes(void)
 {
        int cpu;
@@ -137,6 +140,7 @@ void free_task(struct task_struct *tsk)
        prop_local_destroy_single(&tsk->dirties);
        free_thread_info(tsk->stack);
        rt_mutex_debug_task_free(tsk);
+       ftrace_graph_exit_task(tsk);
        free_task_struct(tsk);
 }
 EXPORT_SYMBOL(free_task);
@@ -1267,6 +1271,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        total_forks++;
        spin_unlock(&current->sighand->siglock);
        write_unlock_irq(&tasklist_lock);
+       ftrace_graph_init_task(p);
        proc_fork_connector(p);
        cgroup_post_fork(p);
        return p;
index 8e7a7ce3ed0a642f99dc7f73c220722c936e88ac..4fbc456f393d0b1fb328667d9e93cc214c39da06 100644 (file)
@@ -21,6 +21,9 @@ static DEFINE_SPINLOCK(kthread_create_lock);
 static LIST_HEAD(kthread_create_list);
 struct task_struct *kthreadd_task;
 
+DEFINE_TRACE(sched_kthread_stop);
+DEFINE_TRACE(sched_kthread_stop_ret);
+
 struct kthread_create_info
 {
        /* Information passed to kthread() from kthreadd. */
index e9c6b2bc9400627cf183382ee55933333f0ee83b..ea54f2647868726428faa630114d6fcd673c4735 100644 (file)
@@ -43,6 +43,7 @@ static DEFINE_MUTEX(markers_mutex);
  */
 #define MARKER_HASH_BITS 6
 #define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS)
+static struct hlist_head marker_table[MARKER_TABLE_SIZE];
 
 /*
  * Note about RCU :
@@ -64,11 +65,10 @@ struct marker_entry {
        void *oldptr;
        int rcu_pending;
        unsigned char ptype:1;
+       unsigned char format_allocated:1;
        char name[0];   /* Contains name'\0'format'\0' */
 };
 
-static struct hlist_head marker_table[MARKER_TABLE_SIZE];
-
 /**
  * __mark_empty_function - Empty probe callback
  * @probe_private: probe private data
@@ -81,7 +81,7 @@ static struct hlist_head marker_table[MARKER_TABLE_SIZE];
  * though the function pointer change and the marker enabling are two distinct
  * operations that modifies the execution flow of preemptible code.
  */
-void __mark_empty_function(void *probe_private, void *call_private,
+notrace void __mark_empty_function(void *probe_private, void *call_private,
        const char *fmt, va_list *args)
 {
 }
@@ -97,7 +97,8 @@ EXPORT_SYMBOL_GPL(__mark_empty_function);
  * need to put a full smp_rmb() in this branch. This is why we do not use
  * rcu_dereference() for the pointer read.
  */
-void marker_probe_cb(const struct marker *mdata, void *call_private, ...)
+notrace void marker_probe_cb(const struct marker *mdata,
+               void *call_private, ...)
 {
        va_list args;
        char ptype;
@@ -107,7 +108,7 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...)
         * sure the teardown of the callbacks can be done correctly when they
         * are in modules and they insure RCU read coherency.
         */
-       rcu_read_lock_sched();
+       rcu_read_lock_sched_notrace();
        ptype = mdata->ptype;
        if (likely(!ptype)) {
                marker_probe_func *func;
@@ -145,7 +146,7 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...)
                        va_end(args);
                }
        }
-       rcu_read_unlock_sched();
+       rcu_read_unlock_sched_notrace();
 }
 EXPORT_SYMBOL_GPL(marker_probe_cb);
 
@@ -157,12 +158,13 @@ EXPORT_SYMBOL_GPL(marker_probe_cb);
  *
  * Should be connected to markers "MARK_NOARGS".
  */
-void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
+static notrace void marker_probe_cb_noarg(const struct marker *mdata,
+               void *call_private, ...)
 {
        va_list args;   /* not initialized */
        char ptype;
 
-       rcu_read_lock_sched();
+       rcu_read_lock_sched_notrace();
        ptype = mdata->ptype;
        if (likely(!ptype)) {
                marker_probe_func *func;
@@ -195,9 +197,8 @@ void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
                        multi[i].func(multi[i].probe_private, call_private,
                                mdata->format, &args);
        }
-       rcu_read_unlock_sched();
+       rcu_read_unlock_sched_notrace();
 }
-EXPORT_SYMBOL_GPL(marker_probe_cb_noarg);
 
 static void free_old_closure(struct rcu_head *head)
 {
@@ -416,6 +417,7 @@ static struct marker_entry *add_marker(const char *name, const char *format)
        e->single.probe_private = NULL;
        e->multi = NULL;
        e->ptype = 0;
+       e->format_allocated = 0;
        e->refcount = 0;
        e->rcu_pending = 0;
        hlist_add_head(&e->hlist, head);
@@ -447,6 +449,8 @@ static int remove_marker(const char *name)
        if (e->single.func != __mark_empty_function)
                return -EBUSY;
        hlist_del(&e->hlist);
+       if (e->format_allocated)
+               kfree(e->format);
        /* Make sure the call_rcu has been executed */
        if (e->rcu_pending)
                rcu_barrier_sched();
@@ -457,57 +461,34 @@ static int remove_marker(const char *name)
 /*
  * Set the mark_entry format to the format found in the element.
  */
-static int marker_set_format(struct marker_entry **entry, const char *format)
+static int marker_set_format(struct marker_entry *entry, const char *format)
 {
-       struct marker_entry *e;
-       size_t name_len = strlen((*entry)->name) + 1;
-       size_t format_len = strlen(format) + 1;
-
-
-       e = kmalloc(sizeof(struct marker_entry) + name_len + format_len,
-                       GFP_KERNEL);
-       if (!e)
+       entry->format = kstrdup(format, GFP_KERNEL);
+       if (!entry->format)
                return -ENOMEM;
-       memcpy(&e->name[0], (*entry)->name, name_len);
-       e->format = &e->name[name_len];
-       memcpy(e->format, format, format_len);
-       if (strcmp(e->format, MARK_NOARGS) == 0)
-               e->call = marker_probe_cb_noarg;
-       else
-               e->call = marker_probe_cb;
-       e->single = (*entry)->single;
-       e->multi = (*entry)->multi;
-       e->ptype = (*entry)->ptype;
-       e->refcount = (*entry)->refcount;
-       e->rcu_pending = 0;
-       hlist_add_before(&e->hlist, &(*entry)->hlist);
-       hlist_del(&(*entry)->hlist);
-       /* Make sure the call_rcu has been executed */
-       if ((*entry)->rcu_pending)
-               rcu_barrier_sched();
-       kfree(*entry);
-       *entry = e;
+       entry->format_allocated = 1;
+
        trace_mark(core_marker_format, "name %s format %s",
-                       e->name, e->format);
+                       entry->name, entry->format);
        return 0;
 }
 
 /*
  * Sets the probe callback corresponding to one marker.
  */
-static int set_marker(struct marker_entry **entry, struct marker *elem,
+static int set_marker(struct marker_entry *entry, struct marker *elem,
                int active)
 {
-       int ret;
-       WARN_ON(strcmp((*entry)->name, elem->name) != 0);
+       int ret = 0;
+       WARN_ON(strcmp(entry->name, elem->name) != 0);
 
-       if ((*entry)->format) {
-               if (strcmp((*entry)->format, elem->format) != 0) {
+       if (entry->format) {
+               if (strcmp(entry->format, elem->format) != 0) {
                        printk(KERN_NOTICE
                                "Format mismatch for probe %s "
                                "(%s), marker (%s)\n",
-                               (*entry)->name,
-                               (*entry)->format,
+                               entry->name,
+                               entry->format,
                                elem->format);
                        return -EPERM;
                }
@@ -523,37 +504,67 @@ static int set_marker(struct marker_entry **entry, struct marker *elem,
         * pass from a "safe" callback (with argument) to an "unsafe"
         * callback (does not set arguments).
         */
-       elem->call = (*entry)->call;
+       elem->call = entry->call;
        /*
         * Sanity check :
         * We only update the single probe private data when the ptr is
         * set to a _non_ single probe! (0 -> 1 and N -> 1, N != 1)
         */
        WARN_ON(elem->single.func != __mark_empty_function
-               && elem->single.probe_private
-               != (*entry)->single.probe_private &&
-               !elem->ptype);
-       elem->single.probe_private = (*entry)->single.probe_private;
+               && elem->single.probe_private != entry->single.probe_private
+               && !elem->ptype);
+       elem->single.probe_private = entry->single.probe_private;
        /*
         * Make sure the private data is valid when we update the
         * single probe ptr.
         */
        smp_wmb();
-       elem->single.func = (*entry)->single.func;
+       elem->single.func = entry->single.func;
        /*
         * We also make sure that the new probe callbacks array is consistent
         * before setting a pointer to it.
         */
-       rcu_assign_pointer(elem->multi, (*entry)->multi);
+       rcu_assign_pointer(elem->multi, entry->multi);
        /*
         * Update the function or multi probe array pointer before setting the
         * ptype.
         */
        smp_wmb();
-       elem->ptype = (*entry)->ptype;
+       elem->ptype = entry->ptype;
+
+       if (elem->tp_name && (active ^ elem->state)) {
+               WARN_ON(!elem->tp_cb);
+               /*
+                * It is ok to directly call the probe registration because type
+                * checking has been done in the __trace_mark_tp() macro.
+                */
+
+               if (active) {
+                       /*
+                        * try_module_get should always succeed because we hold
+                        * lock_module() to get the tp_cb address.
+                        */
+                       ret = try_module_get(__module_text_address(
+                               (unsigned long)elem->tp_cb));
+                       BUG_ON(!ret);
+                       ret = tracepoint_probe_register_noupdate(
+                               elem->tp_name,
+                               elem->tp_cb);
+               } else {
+                       ret = tracepoint_probe_unregister_noupdate(
+                               elem->tp_name,
+                               elem->tp_cb);
+                       /*
+                        * tracepoint_probe_update_all() must be called
+                        * before the module containing tp_cb is unloaded.
+                        */
+                       module_put(__module_text_address(
+                               (unsigned long)elem->tp_cb));
+               }
+       }
        elem->state = active;
 
-       return 0;
+       return ret;
 }
 
 /*
@@ -564,7 +575,24 @@ static int set_marker(struct marker_entry **entry, struct marker *elem,
  */
 static void disable_marker(struct marker *elem)
 {
+       int ret;
+
        /* leave "call" as is. It is known statically. */
+       if (elem->tp_name && elem->state) {
+               WARN_ON(!elem->tp_cb);
+               /*
+                * It is ok to directly call the probe registration because type
+                * checking has been done in the __trace_mark_tp() macro.
+                */
+               ret = tracepoint_probe_unregister_noupdate(elem->tp_name,
+                       elem->tp_cb);
+               WARN_ON(ret);
+               /*
+                * tracepoint_probe_update_all() must be called
+                * before the module containing tp_cb is unloaded.
+                */
+               module_put(__module_text_address((unsigned long)elem->tp_cb));
+       }
        elem->state = 0;
        elem->single.func = __mark_empty_function;
        /* Update the function before setting the ptype */
@@ -594,8 +622,7 @@ void marker_update_probe_range(struct marker *begin,
        for (iter = begin; iter < end; iter++) {
                mark_entry = get_marker(iter->name);
                if (mark_entry) {
-                       set_marker(&mark_entry, iter,
-                                       !!mark_entry->refcount);
+                       set_marker(mark_entry, iter, !!mark_entry->refcount);
                        /*
                         * ignore error, continue
                         */
@@ -629,6 +656,7 @@ static void marker_update_probes(void)
        marker_update_probe_range(__start___markers, __stop___markers);
        /* Markers in modules. */
        module_update_markers();
+       tracepoint_probe_update_all();
 }
 
 /**
@@ -657,7 +685,7 @@ int marker_probe_register(const char *name, const char *format,
                        ret = PTR_ERR(entry);
        } else if (format) {
                if (!entry->format)
-                       ret = marker_set_format(&entry, format);
+                       ret = marker_set_format(entry, format);
                else if (strcmp(entry->format, format))
                        ret = -EPERM;
        }
@@ -676,10 +704,11 @@ int marker_probe_register(const char *name, const char *format,
                goto end;
        }
        mutex_unlock(&markers_mutex);
-       marker_update_probes();         /* may update entry */
+       marker_update_probes();
        mutex_lock(&markers_mutex);
        entry = get_marker(name);
-       WARN_ON(!entry);
+       if (!entry)
+               goto end;
        if (entry->rcu_pending)
                rcu_barrier_sched();
        entry->oldptr = old;
@@ -720,7 +749,7 @@ int marker_probe_unregister(const char *name,
                rcu_barrier_sched();
        old = marker_entry_remove_probe(entry, probe, probe_private);
        mutex_unlock(&markers_mutex);
-       marker_update_probes();         /* may update entry */
+       marker_update_probes();
        mutex_lock(&markers_mutex);
        entry = get_marker(name);
        if (!entry)
@@ -801,10 +830,11 @@ int marker_probe_unregister_private_data(marker_probe_func *probe,
                rcu_barrier_sched();
        old = marker_entry_remove_probe(entry, NULL, probe_private);
        mutex_unlock(&markers_mutex);
-       marker_update_probes();         /* may update entry */
+       marker_update_probes();
        mutex_lock(&markers_mutex);
        entry = get_marker_from_private_data(probe, probe_private);
-       WARN_ON(!entry);
+       if (!entry)
+               goto end;
        if (entry->rcu_pending)
                rcu_barrier_sched();
        entry->oldptr = old;
@@ -848,8 +878,6 @@ void *marker_get_private_data(const char *name, marker_probe_func *probe,
                        if (!e->ptype) {
                                if (num == 0 && e->single.func == probe)
                                        return e->single.probe_private;
-                               else
-                                       break;
                        } else {
                                struct marker_probe_closure *closure;
                                int match = 0;
@@ -861,8 +889,42 @@ void *marker_get_private_data(const char *name, marker_probe_func *probe,
                                                return closure[i].probe_private;
                                }
                        }
+                       break;
                }
        }
        return ERR_PTR(-ENOENT);
 }
 EXPORT_SYMBOL_GPL(marker_get_private_data);
+
+#ifdef CONFIG_MODULES
+
+int marker_module_notify(struct notifier_block *self,
+                        unsigned long val, void *data)
+{
+       struct module *mod = data;
+
+       switch (val) {
+       case MODULE_STATE_COMING:
+               marker_update_probe_range(mod->markers,
+                       mod->markers + mod->num_markers);
+               break;
+       case MODULE_STATE_GOING:
+               marker_update_probe_range(mod->markers,
+                       mod->markers + mod->num_markers);
+               break;
+       }
+       return 0;
+}
+
+struct notifier_block marker_module_nb = {
+       .notifier_call = marker_module_notify,
+       .priority = 0,
+};
+
+static int init_markers(void)
+{
+       return register_module_notifier(&marker_module_nb);
+}
+__initcall(init_markers);
+
+#endif /* CONFIG_MODULES */
index 1f4cc00e0c200b7c69272694d121558fd6a10d06..89bcf7c1327d7dc0ffd0f9fbb365bd67f9109767 100644 (file)
@@ -2184,24 +2184,15 @@ static noinline struct module *load_module(void __user *umod,
                struct mod_debug *debug;
                unsigned int num_debug;
 
-#ifdef CONFIG_MARKERS
-               marker_update_probe_range(mod->markers,
-                       mod->markers + mod->num_markers);
-#endif
                debug = section_objs(hdr, sechdrs, secstrings, "__verbose",
                                     sizeof(*debug), &num_debug);
                dynamic_printk_setup(debug, num_debug);
-
-#ifdef CONFIG_TRACEPOINTS
-               tracepoint_update_probe_range(mod->tracepoints,
-                       mod->tracepoints + mod->num_tracepoints);
-#endif
        }
 
        /* sechdrs[0].sh_size is always zero */
        mseg = section_objs(hdr, sechdrs, secstrings, "__mcount_loc",
                            sizeof(*mseg), &num_mcount);
-       ftrace_init_module(mseg, mseg + num_mcount);
+       ftrace_init_module(mod, mseg, mseg + num_mcount);
 
        err = module_finalize(hdr, sechdrs, mod);
        if (err < 0)
index c9d74083746f8839f96a5a37888302c41245797e..f77d3819ef57b2407012fbbf431f27add286fe91 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/console.h>
 #include <linux/cpu.h>
 #include <linux/freezer.h>
-#include <linux/ftrace.h>
 
 #include "power.h"
 
@@ -257,7 +256,7 @@ static int create_image(int platform_mode)
 
 int hibernation_snapshot(int platform_mode)
 {
-       int error, ftrace_save;
+       int error;
 
        /* Free memory before shutting down devices. */
        error = swsusp_shrink_memory();
@@ -269,7 +268,6 @@ int hibernation_snapshot(int platform_mode)
                goto Close;
 
        suspend_console();
-       ftrace_save = __ftrace_enabled_save();
        error = device_suspend(PMSG_FREEZE);
        if (error)
                goto Recover_platform;
@@ -299,7 +297,6 @@ int hibernation_snapshot(int platform_mode)
  Resume_devices:
        device_resume(in_suspend ?
                (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
-       __ftrace_enabled_restore(ftrace_save);
        resume_console();
  Close:
        platform_end(platform_mode);
@@ -370,11 +367,10 @@ static int resume_target_kernel(void)
 
 int hibernation_restore(int platform_mode)
 {
-       int error, ftrace_save;
+       int error;
 
        pm_prepare_console();
        suspend_console();
-       ftrace_save = __ftrace_enabled_save();
        error = device_suspend(PMSG_QUIESCE);
        if (error)
                goto Finish;
@@ -389,7 +385,6 @@ int hibernation_restore(int platform_mode)
        platform_restore_cleanup(platform_mode);
        device_resume(PMSG_RECOVER);
  Finish:
-       __ftrace_enabled_restore(ftrace_save);
        resume_console();
        pm_restore_console();
        return error;
@@ -402,7 +397,7 @@ int hibernation_restore(int platform_mode)
 
 int hibernation_platform_enter(void)
 {
-       int error, ftrace_save;
+       int error;
 
        if (!hibernation_ops)
                return -ENOSYS;
@@ -417,7 +412,6 @@ int hibernation_platform_enter(void)
                goto Close;
 
        suspend_console();
-       ftrace_save = __ftrace_enabled_save();
        error = device_suspend(PMSG_HIBERNATE);
        if (error) {
                if (hibernation_ops->recover)
@@ -452,7 +446,6 @@ int hibernation_platform_enter(void)
        hibernation_ops->finish();
  Resume_devices:
        device_resume(PMSG_RESTORE);
-       __ftrace_enabled_restore(ftrace_save);
        resume_console();
  Close:
        hibernation_ops->end();
index b8f7ce9473e8412099e476dd54a27a1a21bf8ea6..613f16941b853a4ef949eadc2bc9e54668162e70 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/freezer.h>
 #include <linux/vmstat.h>
 #include <linux/syscalls.h>
-#include <linux/ftrace.h>
 
 #include "power.h"
 
@@ -317,7 +316,7 @@ static int suspend_enter(suspend_state_t state)
  */
 int suspend_devices_and_enter(suspend_state_t state)
 {
-       int error, ftrace_save;
+       int error;
 
        if (!suspend_ops)
                return -ENOSYS;
@@ -328,7 +327,6 @@ int suspend_devices_and_enter(suspend_state_t state)
                        goto Close;
        }
        suspend_console();
-       ftrace_save = __ftrace_enabled_save();
        suspend_test_start();
        error = device_suspend(PMSG_SUSPEND);
        if (error) {
@@ -360,7 +358,6 @@ int suspend_devices_and_enter(suspend_state_t state)
        suspend_test_start();
        device_resume(PMSG_RESUME);
        suspend_test_finish("resume devices");
-       __ftrace_enabled_restore(ftrace_save);
        resume_console();
  Close:
        if (suspend_ops->end)
index dc41827fbfeea474c809ebc3adb6e54edcedb861..60adefb59b5e24f4f3d916df1caab686f91bc9f4 100644 (file)
@@ -544,7 +544,7 @@ static const struct file_operations proc_profile_operations = {
 };
 
 #ifdef CONFIG_SMP
-static inline void profile_nop(void *unused)
+static void profile_nop(void *unused)
 {
 }
 
index b7480fb5c3dc21a7bf6513a978cf0ed2e8c19a8f..7729c4bbc8baec10e47529142b788c43b71fa501 100644 (file)
  */
 #define RUNTIME_INF    ((u64)~0ULL)
 
+DEFINE_TRACE(sched_wait_task);
+DEFINE_TRACE(sched_wakeup);
+DEFINE_TRACE(sched_wakeup_new);
+DEFINE_TRACE(sched_switch);
+DEFINE_TRACE(sched_migrate_task);
+
 #ifdef CONFIG_SMP
 /*
  * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
@@ -5896,6 +5902,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
         * The idle tasks have their own, simple scheduling class:
         */
        idle->sched_class = &idle_sched_class;
+       ftrace_graph_init_task(idle);
 }
 
 /*
index 4530fc65445518272ae851fa44e90378cfd908e1..e9afe63da24b524cbd036692945a8dfe634809d2 100644 (file)
@@ -41,6 +41,8 @@
 
 static struct kmem_cache *sigqueue_cachep;
 
+DEFINE_TRACE(sched_signal_send);
+
 static void __user *sig_handler(struct task_struct *t, int sig)
 {
        return t->sighand->action[sig - 1].sa.sa_handler;
index 3d56fe7570daede300fe01f2e6571416107b081b..c83f566e940abaac4cdef8b7744746104aaabd38 100644 (file)
@@ -487,6 +487,16 @@ static struct ctl_table kern_table[] = {
                .proc_handler   = &ftrace_enable_sysctl,
        },
 #endif
+#ifdef CONFIG_TRACING
+       {
+               .ctl_name       = CTL_UNNUMBERED,
+               .procname       = "ftrace_dump_on_oops",
+               .data           = &ftrace_dump_on_oops,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec,
+       },
+#endif
 #ifdef CONFIG_MODULES
        {
                .ctl_name       = KERN_MODPROBE,
index 33dbefd471e88f9571f299f92b433188dd6697de..8b6b673b4d6cedd923a8105cfa95a0bd3b2c31c8 100644 (file)
@@ -3,18 +3,34 @@
 #  select HAVE_FUNCTION_TRACER:
 #
 
+config USER_STACKTRACE_SUPPORT
+       bool
+
 config NOP_TRACER
        bool
 
 config HAVE_FUNCTION_TRACER
        bool
 
+config HAVE_FUNCTION_GRAPH_TRACER
+       bool
+
+config HAVE_FUNCTION_TRACE_MCOUNT_TEST
+       bool
+       help
+        This gets selected when the arch tests the function_trace_stop
+        variable at the mcount call site. Otherwise, this variable
+        is tested by the called function.
+
 config HAVE_DYNAMIC_FTRACE
        bool
 
 config HAVE_FTRACE_MCOUNT_RECORD
        bool
 
+config HAVE_HW_BRANCH_TRACER
+       bool
+
 config TRACER_MAX_TRACE
        bool
 
@@ -47,6 +63,19 @@ config FUNCTION_TRACER
          (the bootup default), then the overhead of the instructions is very
          small and not measurable even in micro-benchmarks.
 
+config FUNCTION_GRAPH_TRACER
+       bool "Kernel Function Graph Tracer"
+       depends on HAVE_FUNCTION_GRAPH_TRACER
+       depends on FUNCTION_TRACER
+       help
+         Enable the kernel to trace a function at both its return
+         and its entry.
+         It's first purpose is to trace the duration of functions and
+         draw a call graph for each thread with some informations like
+         the return value.
+         This is done by setting the current return address on the current
+         task structure into a stack of calls.
+
 config IRQSOFF_TRACER
        bool "Interrupts-off Latency Tracer"
        default n
@@ -138,6 +167,70 @@ config BOOT_TRACER
            selected, because the self-tests are an initcall as well and that
            would invalidate the boot trace. )
 
+config TRACE_BRANCH_PROFILING
+       bool "Trace likely/unlikely profiler"
+       depends on DEBUG_KERNEL
+       select TRACING
+       help
+         This tracer profiles all the the likely and unlikely macros
+         in the kernel. It will display the results in:
+
+         /debugfs/tracing/profile_annotated_branch
+
+         Note: this will add a significant overhead, only turn this
+         on if you need to profile the system's use of these macros.
+
+         Say N if unsure.
+
+config PROFILE_ALL_BRANCHES
+       bool "Profile all if conditionals"
+       depends on TRACE_BRANCH_PROFILING
+       help
+         This tracer profiles all branch conditions. Every if ()
+         taken in the kernel is recorded whether it hit or miss.
+         The results will be displayed in:
+
+         /debugfs/tracing/profile_branch
+
+         This configuration, when enabled, will impose a great overhead
+         on the system. This should only be enabled when the system
+         is to be analyzed
+
+         Say N if unsure.
+
+config TRACING_BRANCHES
+       bool
+       help
+         Selected by tracers that will trace the likely and unlikely
+         conditions. This prevents the tracers themselves from being
+         profiled. Profiling the tracing infrastructure can only happen
+         when the likelys and unlikelys are not being traced.
+
+config BRANCH_TRACER
+       bool "Trace likely/unlikely instances"
+       depends on TRACE_BRANCH_PROFILING
+       select TRACING_BRANCHES
+       help
+         This traces the events of likely and unlikely condition
+         calls in the kernel.  The difference between this and the
+         "Trace likely/unlikely profiler" is that this is not a
+         histogram of the callers, but actually places the calling
+         events into a running trace buffer to see when and where the
+         events happened, as well as their results.
+
+         Say N if unsure.
+
+config POWER_TRACER
+       bool "Trace power consumption behavior"
+       depends on DEBUG_KERNEL
+       depends on X86
+       select TRACING
+       help
+         This tracer helps developers to analyze and optimize the kernels
+         power management decisions, specifically the C-state and P-state
+         behavior.
+
+
 config STACK_TRACER
        bool "Trace max stack"
        depends on HAVE_FUNCTION_TRACER
@@ -157,6 +250,14 @@ config STACK_TRACER
 
          Say N if unsure.
 
+config BTS_TRACER
+       depends on HAVE_HW_BRANCH_TRACER
+       bool "Trace branches"
+       select TRACING
+       help
+         This tracer records all branches on the system in a circular
+         buffer giving access to the last N branches for each cpu.
+
 config DYNAMIC_FTRACE
        bool "enable/disable ftrace tracepoints dynamically"
        depends on FUNCTION_TRACER
index c8228b1a49e924386d3b8af6c02186938edf3571..62dc561b6676ab57ced3b65f5804c0e50f31730a 100644 (file)
@@ -10,6 +10,11 @@ CFLAGS_trace_selftest_dynamic.o = -pg
 obj-y += trace_selftest_dynamic.o
 endif
 
+# If unlikely tracing is enabled, do not trace these files
+ifdef CONFIG_TRACING_BRANCHES
+KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
+endif
+
 obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
 obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
 
@@ -24,5 +29,9 @@ obj-$(CONFIG_NOP_TRACER) += trace_nop.o
 obj-$(CONFIG_STACK_TRACER) += trace_stack.o
 obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
 obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
+obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
+obj-$(CONFIG_BTS_TRACER) += trace_bts.o
+obj-$(CONFIG_POWER_TRACER) += trace_power.o
 
 libftrace-y := ftrace.o
index 78db083390f07baa7993c48d9fc0f7a635c769d0..2e78628443e8509311200484b8ddff4644044e99 100644 (file)
 int ftrace_enabled __read_mostly;
 static int last_ftrace_enabled;
 
+/* ftrace_pid_trace >= 0 will only trace threads with this pid */
+static int ftrace_pid_trace = -1;
+
+/* Quick disabling of function tracer. */
+int function_trace_stop;
+
 /*
  * ftrace_disabled is set when an anomaly is discovered.
  * ftrace_disabled is much stronger than ftrace_enabled.
@@ -55,6 +61,7 @@ static int ftrace_disabled __read_mostly;
 
 static DEFINE_SPINLOCK(ftrace_lock);
 static DEFINE_MUTEX(ftrace_sysctl_lock);
+static DEFINE_MUTEX(ftrace_start_lock);
 
 static struct ftrace_ops ftrace_list_end __read_mostly =
 {
@@ -63,6 +70,8 @@ static struct ftrace_ops ftrace_list_end __read_mostly =
 
 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
+ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
+ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
 
 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
 {
@@ -79,6 +88,21 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
        };
 }
 
+static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
+{
+       if (current->pid != ftrace_pid_trace)
+               return;
+
+       ftrace_pid_function(ip, parent_ip);
+}
+
+static void set_ftrace_pid_function(ftrace_func_t func)
+{
+       /* do not set ftrace_pid_function to itself! */
+       if (func != ftrace_pid_func)
+               ftrace_pid_function = func;
+}
+
 /**
  * clear_ftrace_function - reset the ftrace function
  *
@@ -88,8 +112,24 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
 void clear_ftrace_function(void)
 {
        ftrace_trace_function = ftrace_stub;
+       __ftrace_trace_function = ftrace_stub;
+       ftrace_pid_function = ftrace_stub;
 }
 
+#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
+/*
+ * For those archs that do not test ftrace_trace_stop in their
+ * mcount call site, we need to do it from C.
+ */
+static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
+{
+       if (function_trace_stop)
+               return;
+
+       __ftrace_trace_function(ip, parent_ip);
+}
+#endif
+
 static int __register_ftrace_function(struct ftrace_ops *ops)
 {
        /* should not be called from interrupt context */
@@ -106,14 +146,28 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
        ftrace_list = ops;
 
        if (ftrace_enabled) {
+               ftrace_func_t func;
+
+               if (ops->next == &ftrace_list_end)
+                       func = ops->func;
+               else
+                       func = ftrace_list_func;
+
+               if (ftrace_pid_trace >= 0) {
+                       set_ftrace_pid_function(func);
+                       func = ftrace_pid_func;
+               }
+
                /*
                 * For one func, simply call it directly.
                 * For more than one func, call the chain.
                 */
-               if (ops->next == &ftrace_list_end)
-                       ftrace_trace_function = ops->func;
-               else
-                       ftrace_trace_function = ftrace_list_func;
+#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
+               ftrace_trace_function = func;
+#else
+               __ftrace_trace_function = func;
+               ftrace_trace_function = ftrace_test_stop_func;
+#endif
        }
 
        spin_unlock(&ftrace_lock);
@@ -152,9 +206,19 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
 
        if (ftrace_enabled) {
                /* If we only have one func left, then call that directly */
-               if (ftrace_list == &ftrace_list_end ||
-                   ftrace_list->next == &ftrace_list_end)
-                       ftrace_trace_function = ftrace_list->func;
+               if (ftrace_list->next == &ftrace_list_end) {
+                       ftrace_func_t func = ftrace_list->func;
+
+                       if (ftrace_pid_trace >= 0) {
+                               set_ftrace_pid_function(func);
+                               func = ftrace_pid_func;
+                       }
+#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
+                       ftrace_trace_function = func;
+#else
+                       __ftrace_trace_function = func;
+#endif
+               }
        }
 
  out:
@@ -163,6 +227,36 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
        return ret;
 }
 
+static void ftrace_update_pid_func(void)
+{
+       ftrace_func_t func;
+
+       /* should not be called from interrupt context */
+       spin_lock(&ftrace_lock);
+
+       if (ftrace_trace_function == ftrace_stub)
+               goto out;
+
+       func = ftrace_trace_function;
+
+       if (ftrace_pid_trace >= 0) {
+               set_ftrace_pid_function(func);
+               func = ftrace_pid_func;
+       } else {
+               if (func == ftrace_pid_func)
+                       func = ftrace_pid_function;
+       }
+
+#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
+       ftrace_trace_function = func;
+#else
+       __ftrace_trace_function = func;
+#endif
+
+ out:
+       spin_unlock(&ftrace_lock);
+}
+
 #ifdef CONFIG_DYNAMIC_FTRACE
 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
 # error Dynamic ftrace depends on MCOUNT_RECORD
@@ -182,6 +276,8 @@ enum {
        FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
        FTRACE_ENABLE_MCOUNT            = (1 << 3),
        FTRACE_DISABLE_MCOUNT           = (1 << 4),
+       FTRACE_START_FUNC_RET           = (1 << 5),
+       FTRACE_STOP_FUNC_RET            = (1 << 6),
 };
 
 static int ftrace_filtered;
@@ -308,7 +404,7 @@ ftrace_record_ip(unsigned long ip)
 {
        struct dyn_ftrace *rec;
 
-       if (!ftrace_enabled || ftrace_disabled)
+       if (ftrace_disabled)
                return NULL;
 
        rec = ftrace_alloc_dyn_node(ip);
@@ -322,14 +418,51 @@ ftrace_record_ip(unsigned long ip)
        return rec;
 }
 
-#define FTRACE_ADDR ((long)(ftrace_caller))
+static void print_ip_ins(const char *fmt, unsigned char *p)
+{
+       int i;
+
+       printk(KERN_CONT "%s", fmt);
+
+       for (i = 0; i < MCOUNT_INSN_SIZE; i++)
+               printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
+}
+
+static void ftrace_bug(int failed, unsigned long ip)
+{
+       switch (failed) {
+       case -EFAULT:
+               FTRACE_WARN_ON_ONCE(1);
+               pr_info("ftrace faulted on modifying ");
+               print_ip_sym(ip);
+               break;
+       case -EINVAL:
+               FTRACE_WARN_ON_ONCE(1);
+               pr_info("ftrace failed to modify ");
+               print_ip_sym(ip);
+               print_ip_ins(" actual: ", (unsigned char *)ip);
+               printk(KERN_CONT "\n");
+               break;
+       case -EPERM:
+               FTRACE_WARN_ON_ONCE(1);
+               pr_info("ftrace faulted on writing ");
+               print_ip_sym(ip);
+               break;
+       default:
+               FTRACE_WARN_ON_ONCE(1);
+               pr_info("ftrace faulted on unknown error ");
+               print_ip_sym(ip);
+       }
+}
+
 
 static int
-__ftrace_replace_code(struct dyn_ftrace *rec,
-                     unsigned char *nop, int enable)
+__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
 {
        unsigned long ip, fl;
-       unsigned char *call, *old, *new;
+       unsigned long ftrace_addr;
+
+       ftrace_addr = (unsigned long)ftrace_caller;
 
        ip = rec->ip;
 
@@ -388,34 +521,28 @@ __ftrace_replace_code(struct dyn_ftrace *rec,
                }
        }
 
-       call = ftrace_call_replace(ip, FTRACE_ADDR);
-
-       if (rec->flags & FTRACE_FL_ENABLED) {
-               old = nop;
-               new = call;
-       } else {
-               old = call;
-               new = nop;
-       }
-
-       return ftrace_modify_code(ip, old, new);
+       if (rec->flags & FTRACE_FL_ENABLED)
+               return ftrace_make_call(rec, ftrace_addr);
+       else
+               return ftrace_make_nop(NULL, rec, ftrace_addr);
 }
 
 static void ftrace_replace_code(int enable)
 {
        int i, failed;
-       unsigned char *nop = NULL;
        struct dyn_ftrace *rec;
        struct ftrace_page *pg;
 
-       nop = ftrace_nop_replace();
-
        for (pg = ftrace_pages_start; pg; pg = pg->next) {
                for (i = 0; i < pg->index; i++) {
                        rec = &pg->records[i];
 
-                       /* don't modify code that has already faulted */
-                       if (rec->flags & FTRACE_FL_FAILED)
+                       /*
+                        * Skip over free records and records that have
+                        * failed.
+                        */
+                       if (rec->flags & FTRACE_FL_FREE ||
+                           rec->flags & FTRACE_FL_FAILED)
                                continue;
 
                        /* ignore updates to this record's mcount site */
@@ -426,68 +553,30 @@ static void ftrace_replace_code(int enable)
                                unfreeze_record(rec);
                        }
 
-                       failed = __ftrace_replace_code(rec, nop, enable);
+                       failed = __ftrace_replace_code(rec, enable);
                        if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
                                rec->flags |= FTRACE_FL_FAILED;
                                if ((system_state == SYSTEM_BOOTING) ||
                                    !core_kernel_text(rec->ip)) {
                                        ftrace_free_rec(rec);
-                               }
+                               } else
+                                       ftrace_bug(failed, rec->ip);
                        }
                }
        }
 }
 
-static void print_ip_ins(const char *fmt, unsigned char *p)
-{
-       int i;
-
-       printk(KERN_CONT "%s", fmt);
-
-       for (i = 0; i < MCOUNT_INSN_SIZE; i++)
-               printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
-}
-
 static int
-ftrace_code_disable(struct dyn_ftrace *rec)
+ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
 {
        unsigned long ip;
-       unsigned char *nop, *call;
        int ret;
 
        ip = rec->ip;
 
-       nop = ftrace_nop_replace();
-       call = ftrace_call_replace(ip, mcount_addr);
-
-       ret = ftrace_modify_code(ip, call, nop);
+       ret = ftrace_make_nop(mod, rec, mcount_addr);
        if (ret) {
-               switch (ret) {
-               case -EFAULT:
-                       FTRACE_WARN_ON_ONCE(1);
-                       pr_info("ftrace faulted on modifying ");
-                       print_ip_sym(ip);
-                       break;
-               case -EINVAL:
-                       FTRACE_WARN_ON_ONCE(1);
-                       pr_info("ftrace failed to modify ");
-                       print_ip_sym(ip);
-                       print_ip_ins(" expected: ", call);
-                       print_ip_ins(" actual: ", (unsigned char *)ip);
-                       print_ip_ins(" replace: ", nop);
-                       printk(KERN_CONT "\n");
-                       break;
-               case -EPERM:
-                       FTRACE_WARN_ON_ONCE(1);
-                       pr_info("ftrace faulted on writing ");
-                       print_ip_sym(ip);
-                       break;
-               default:
-                       FTRACE_WARN_ON_ONCE(1);
-                       pr_info("ftrace faulted on unknown error ");
-                       print_ip_sym(ip);
-               }
-
+               ftrace_bug(ret, ip);
                rec->flags |= FTRACE_FL_FAILED;
                return 0;
        }
@@ -506,6 +595,11 @@ static int __ftrace_modify_code(void *data)
        if (*command & FTRACE_UPDATE_TRACE_FUNC)
                ftrace_update_ftrace_func(ftrace_trace_function);
 
+       if (*command & FTRACE_START_FUNC_RET)
+               ftrace_enable_ftrace_graph_caller();
+       else if (*command & FTRACE_STOP_FUNC_RET)
+               ftrace_disable_ftrace_graph_caller();
+
        return 0;
 }
 
@@ -515,43 +609,43 @@ static void ftrace_run_update_code(int command)
 }
 
 static ftrace_func_t saved_ftrace_func;
-static int ftrace_start;
-static DEFINE_MUTEX(ftrace_start_lock);
+static int ftrace_start_up;
 
-static void ftrace_startup(void)
+static void ftrace_startup_enable(int command)
 {
-       int command = 0;
-
-       if (unlikely(ftrace_disabled))
-               return;
-
-       mutex_lock(&ftrace_start_lock);
-       ftrace_start++;
-       command |= FTRACE_ENABLE_CALLS;
-
        if (saved_ftrace_func != ftrace_trace_function) {
                saved_ftrace_func = ftrace_trace_function;
                command |= FTRACE_UPDATE_TRACE_FUNC;
        }
 
        if (!command || !ftrace_enabled)
-               goto out;
+               return;
 
        ftrace_run_update_code(command);
- out:
-       mutex_unlock(&ftrace_start_lock);
 }
 
-static void ftrace_shutdown(void)
+static void ftrace_startup(int command)
 {
-       int command = 0;
+       if (unlikely(ftrace_disabled))
+               return;
+
+       mutex_lock(&ftrace_start_lock);
+       ftrace_start_up++;
+       command |= FTRACE_ENABLE_CALLS;
+
+       ftrace_startup_enable(command);
+
+       mutex_unlock(&ftrace_start_lock);
+}
 
+static void ftrace_shutdown(int command)
+{
        if (unlikely(ftrace_disabled))
                return;
 
        mutex_lock(&ftrace_start_lock);
-       ftrace_start--;
-       if (!ftrace_start)
+       ftrace_start_up--;
+       if (!ftrace_start_up)
                command |= FTRACE_DISABLE_CALLS;
 
        if (saved_ftrace_func != ftrace_trace_function) {
@@ -577,8 +671,8 @@ static void ftrace_startup_sysctl(void)
        mutex_lock(&ftrace_start_lock);
        /* Force update next time */
        saved_ftrace_func = NULL;
-       /* ftrace_start is true if we want ftrace running */
-       if (ftrace_start)
+       /* ftrace_start_up is true if we want ftrace running */
+       if (ftrace_start_up)
                command |= FTRACE_ENABLE_CALLS;
 
        ftrace_run_update_code(command);
@@ -593,8 +687,8 @@ static void ftrace_shutdown_sysctl(void)
                return;
 
        mutex_lock(&ftrace_start_lock);
-       /* ftrace_start is true if ftrace is running */
-       if (ftrace_start)
+       /* ftrace_start_up is true if ftrace is running */
+       if (ftrace_start_up)
                command |= FTRACE_DISABLE_CALLS;
 
        ftrace_run_update_code(command);
@@ -605,7 +699,7 @@ static cycle_t              ftrace_update_time;
 static unsigned long   ftrace_update_cnt;
 unsigned long          ftrace_update_tot_cnt;
 
-static int ftrace_update_code(void)
+static int ftrace_update_code(struct module *mod)
 {
        struct dyn_ftrace *p, *t;
        cycle_t start, stop;
@@ -622,7 +716,7 @@ static int ftrace_update_code(void)
                list_del_init(&p->list);
 
                /* convert record (i.e, patch mcount-call with NOP) */
-               if (ftrace_code_disable(p)) {
+               if (ftrace_code_disable(mod, p)) {
                        p->flags |= FTRACE_FL_CONVERTED;
                        ftrace_update_cnt++;
                } else
@@ -690,7 +784,6 @@ enum {
 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
 
 struct ftrace_iterator {
-       loff_t                  pos;
        struct ftrace_page      *pg;
        unsigned                idx;
        unsigned                flags;
@@ -715,6 +808,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
                        iter->pg = iter->pg->next;
                        iter->idx = 0;
                        goto retry;
+               } else {
+                       iter->idx = -1;
                }
        } else {
                rec = &iter->pg->records[iter->idx++];
@@ -737,8 +832,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
        }
        spin_unlock(&ftrace_lock);
 
-       iter->pos = *pos;
-
        return rec;
 }
 
@@ -746,13 +839,15 @@ static void *t_start(struct seq_file *m, loff_t *pos)
 {
        struct ftrace_iterator *iter = m->private;
        void *p = NULL;
-       loff_t l = -1;
 
-       if (*pos > iter->pos)
-               *pos = iter->pos;
+       if (*pos > 0) {
+               if (iter->idx < 0)
+                       return p;
+               (*pos)--;
+               iter->idx--;
+       }
 
-       l = *pos;
-       p = t_next(m, p, &l);
+       p = t_next(m, p, pos);
 
        return p;
 }
@@ -763,21 +858,15 @@ static void t_stop(struct seq_file *m, void *p)
 
 static int t_show(struct seq_file *m, void *v)
 {
-       struct ftrace_iterator *iter = m->private;
        struct dyn_ftrace *rec = v;
        char str[KSYM_SYMBOL_LEN];
-       int ret = 0;
 
        if (!rec)
                return 0;
 
        kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
 
-       ret = seq_printf(m, "%s\n", str);
-       if (ret < 0) {
-               iter->pos--;
-               iter->idx--;
-       }
+       seq_printf(m, "%s\n", str);
 
        return 0;
 }
@@ -803,7 +892,6 @@ ftrace_avail_open(struct inode *inode, struct file *file)
                return -ENOMEM;
 
        iter->pg = ftrace_pages_start;
-       iter->pos = 0;
 
        ret = seq_open(file, &show_ftrace_seq_ops);
        if (!ret) {
@@ -890,7 +978,6 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
 
        if (file->f_mode & FMODE_READ) {
                iter->pg = ftrace_pages_start;
-               iter->pos = 0;
                iter->flags = enable ? FTRACE_ITER_FILTER :
                        FTRACE_ITER_NOTRACE;
 
@@ -1181,7 +1268,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
 
        mutex_lock(&ftrace_sysctl_lock);
        mutex_lock(&ftrace_start_lock);
-       if (ftrace_start && ftrace_enabled)
+       if (ftrace_start_up && ftrace_enabled)
                ftrace_run_update_code(FTRACE_ENABLE_CALLS);
        mutex_unlock(&ftrace_start_lock);
        mutex_unlock(&ftrace_sysctl_lock);
@@ -1233,13 +1320,10 @@ static struct file_operations ftrace_notrace_fops = {
        .release = ftrace_notrace_release,
 };
 
-static __init int ftrace_init_debugfs(void)
+static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
 {
-       struct dentry *d_tracer;
        struct dentry *entry;
 
-       d_tracer = tracing_init_dentry();
-
        entry = debugfs_create_file("available_filter_functions", 0444,
                                    d_tracer, NULL, &ftrace_avail_fops);
        if (!entry)
@@ -1266,9 +1350,8 @@ static __init int ftrace_init_debugfs(void)
        return 0;
 }
 
-fs_initcall(ftrace_init_debugfs);
-
-static int ftrace_convert_nops(unsigned long *start,
+static int ftrace_convert_nops(struct module *mod,
+                              unsigned long *start,
                               unsigned long *end)
 {
        unsigned long *p;
@@ -1279,23 +1362,32 @@ static int ftrace_convert_nops(unsigned long *start,
        p = start;
        while (p < end) {
                addr = ftrace_call_adjust(*p++);
+               /*
+                * Some architecture linkers will pad between
+                * the different mcount_loc sections of different
+                * object files to satisfy alignments.
+                * Skip any NULL pointers.
+                */
+               if (!addr)
+                       continue;
                ftrace_record_ip(addr);
        }
 
        /* disable interrupts to prevent kstop machine */
        local_irq_save(flags);
-       ftrace_update_code();
+       ftrace_update_code(mod);
        local_irq_restore(flags);
        mutex_unlock(&ftrace_start_lock);
 
        return 0;
 }
 
-void ftrace_init_module(unsigned long *start, unsigned long *end)
+void ftrace_init_module(struct module *mod,
+                       unsigned long *start, unsigned long *end)
 {
        if (ftrace_disabled || start == end)
                return;
-       ftrace_convert_nops(start, end);
+       ftrace_convert_nops(mod, start, end);
 }
 
 extern unsigned long __start_mcount_loc[];
@@ -1325,7 +1417,8 @@ void __init ftrace_init(void)
 
        last_ftrace_enabled = ftrace_enabled = 1;
 
-       ret = ftrace_convert_nops(__start_mcount_loc,
+       ret = ftrace_convert_nops(NULL,
+                                 __start_mcount_loc,
                                  __stop_mcount_loc);
 
        return;
@@ -1342,12 +1435,101 @@ static int __init ftrace_nodyn_init(void)
 }
 device_initcall(ftrace_nodyn_init);
 
-# define ftrace_startup()              do { } while (0)
-# define ftrace_shutdown()             do { } while (0)
+static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
+static inline void ftrace_startup_enable(int command) { }
+/* Keep as macros so we do not need to define the commands */
+# define ftrace_startup(command)       do { } while (0)
+# define ftrace_shutdown(command)      do { } while (0)
 # define ftrace_startup_sysctl()       do { } while (0)
 # define ftrace_shutdown_sysctl()      do { } while (0)
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
+static ssize_t
+ftrace_pid_read(struct file *file, char __user *ubuf,
+                      size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       int r;
+
+       if (ftrace_pid_trace >= 0)
+               r = sprintf(buf, "%u\n", ftrace_pid_trace);
+       else
+               r = sprintf(buf, "no pid\n");
+
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t
+ftrace_pid_write(struct file *filp, const char __user *ubuf,
+                  size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       long val;
+       int ret;
+
+       if (cnt >= sizeof(buf))
+               return -EINVAL;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+
+       ret = strict_strtol(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&ftrace_start_lock);
+       if (ret < 0) {
+               /* disable pid tracing */
+               if (ftrace_pid_trace < 0)
+                       goto out;
+               ftrace_pid_trace = -1;
+
+       } else {
+
+               if (ftrace_pid_trace == val)
+                       goto out;
+
+               ftrace_pid_trace = val;
+       }
+
+       /* update the function call */
+       ftrace_update_pid_func();
+       ftrace_startup_enable(0);
+
+ out:
+       mutex_unlock(&ftrace_start_lock);
+
+       return cnt;
+}
+
+static struct file_operations ftrace_pid_fops = {
+       .read = ftrace_pid_read,
+       .write = ftrace_pid_write,
+};
+
+static __init int ftrace_init_debugfs(void)
+{
+       struct dentry *d_tracer;
+       struct dentry *entry;
+
+       d_tracer = tracing_init_dentry();
+       if (!d_tracer)
+               return 0;
+
+       ftrace_init_dyn_debugfs(d_tracer);
+
+       entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
+                                   NULL, &ftrace_pid_fops);
+       if (!entry)
+               pr_warning("Could not create debugfs "
+                          "'set_ftrace_pid' entry\n");
+       return 0;
+}
+
+fs_initcall(ftrace_init_debugfs);
+
 /**
  * ftrace_kill - kill ftrace
  *
@@ -1381,10 +1563,11 @@ int register_ftrace_function(struct ftrace_ops *ops)
                return -1;
 
        mutex_lock(&ftrace_sysctl_lock);
+
        ret = __register_ftrace_function(ops);
-       ftrace_startup();
-       mutex_unlock(&ftrace_sysctl_lock);
+       ftrace_startup(0);
 
+       mutex_unlock(&ftrace_sysctl_lock);
        return ret;
 }
 
@@ -1400,7 +1583,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
 
        mutex_lock(&ftrace_sysctl_lock);
        ret = __unregister_ftrace_function(ops);
-       ftrace_shutdown();
+       ftrace_shutdown(0);
        mutex_unlock(&ftrace_sysctl_lock);
 
        return ret;
@@ -1449,3 +1632,142 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
        return ret;
 }
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+static atomic_t ftrace_graph_active;
+
+/* The callbacks that hook a function */
+trace_func_graph_ret_t ftrace_graph_return =
+                       (trace_func_graph_ret_t)ftrace_stub;
+trace_func_graph_ent_t ftrace_graph_entry =
+                       (trace_func_graph_ent_t)ftrace_stub;
+
+/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
+static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
+{
+       int i;
+       int ret = 0;
+       unsigned long flags;
+       int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
+       struct task_struct *g, *t;
+
+       for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
+               ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
+                                       * sizeof(struct ftrace_ret_stack),
+                                       GFP_KERNEL);
+               if (!ret_stack_list[i]) {
+                       start = 0;
+                       end = i;
+                       ret = -ENOMEM;
+                       goto free;
+               }
+       }
+
+       read_lock_irqsave(&tasklist_lock, flags);
+       do_each_thread(g, t) {
+               if (start == end) {
+                       ret = -EAGAIN;
+                       goto unlock;
+               }
+
+               if (t->ret_stack == NULL) {
+                       t->curr_ret_stack = -1;
+                       /* Make sure IRQs see the -1 first: */
+                       barrier();
+                       t->ret_stack = ret_stack_list[start++];
+                       atomic_set(&t->trace_overrun, 0);
+               }
+       } while_each_thread(g, t);
+
+unlock:
+       read_unlock_irqrestore(&tasklist_lock, flags);
+free:
+       for (i = start; i < end; i++)
+               kfree(ret_stack_list[i]);
+       return ret;
+}
+
+/* Allocate a return stack for each task */
+static int start_graph_tracing(void)
+{
+       struct ftrace_ret_stack **ret_stack_list;
+       int ret;
+
+       ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
+                               sizeof(struct ftrace_ret_stack *),
+                               GFP_KERNEL);
+
+       if (!ret_stack_list)
+               return -ENOMEM;
+
+       do {
+               ret = alloc_retstack_tasklist(ret_stack_list);
+       } while (ret == -EAGAIN);
+
+       kfree(ret_stack_list);
+       return ret;
+}
+
+int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+                       trace_func_graph_ent_t entryfunc)
+{
+       int ret = 0;
+
+       mutex_lock(&ftrace_sysctl_lock);
+
+       atomic_inc(&ftrace_graph_active);
+       ret = start_graph_tracing();
+       if (ret) {
+               atomic_dec(&ftrace_graph_active);
+               goto out;
+       }
+
+       ftrace_graph_return = retfunc;
+       ftrace_graph_entry = entryfunc;
+
+       ftrace_startup(FTRACE_START_FUNC_RET);
+
+out:
+       mutex_unlock(&ftrace_sysctl_lock);
+       return ret;
+}
+
+void unregister_ftrace_graph(void)
+{
+       mutex_lock(&ftrace_sysctl_lock);
+
+       atomic_dec(&ftrace_graph_active);
+       ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
+       ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub;
+       ftrace_shutdown(FTRACE_STOP_FUNC_RET);
+
+       mutex_unlock(&ftrace_sysctl_lock);
+}
+
+/* Allocate a return stack for newly created task */
+void ftrace_graph_init_task(struct task_struct *t)
+{
+       if (atomic_read(&ftrace_graph_active)) {
+               t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
+                               * sizeof(struct ftrace_ret_stack),
+                               GFP_KERNEL);
+               if (!t->ret_stack)
+                       return;
+               t->curr_ret_stack = -1;
+               atomic_set(&t->trace_overrun, 0);
+       } else
+               t->ret_stack = NULL;
+}
+
+void ftrace_graph_exit_task(struct task_struct *t)
+{
+       struct ftrace_ret_stack *ret_stack = t->ret_stack;
+
+       t->ret_stack = NULL;
+       /* NULL must become visible to IRQs before we free it: */
+       barrier();
+
+       kfree(ret_stack);
+}
+#endif
+
index 668bbb5ef2bd154dc8084fe4480bfd251e484f41..e206951603c1e642bd8b0e516a7bade1f18ed7b7 100644 (file)
 
 #include "trace.h"
 
-/* Global flag to disable all recording to ring buffers */
-static int ring_buffers_off __read_mostly;
+/*
+ * A fast way to enable or disable all ring buffers is to
+ * call tracing_on or tracing_off. Turning off the ring buffers
+ * prevents all ring buffers from being recorded to.
+ * Turning this switch on, makes it OK to write to the
+ * ring buffer, if the ring buffer is enabled itself.
+ *
+ * There's three layers that must be on in order to write
+ * to the ring buffer.
+ *
+ * 1) This global flag must be set.
+ * 2) The ring buffer must be enabled for recording.
+ * 3) The per cpu buffer must be enabled for recording.
+ *
+ * In case of an anomaly, this global flag has a bit set that
+ * will permantly disable all ring buffers.
+ */
+
+/*
+ * Global flag to disable all recording to ring buffers
+ *  This has two bits: ON, DISABLED
+ *
+ *  ON   DISABLED
+ * ---- ----------
+ *   0      0        : ring buffers are off
+ *   1      0        : ring buffers are on
+ *   X      1        : ring buffers are permanently disabled
+ */
+
+enum {
+       RB_BUFFERS_ON_BIT       = 0,
+       RB_BUFFERS_DISABLED_BIT = 1,
+};
+
+enum {
+       RB_BUFFERS_ON           = 1 << RB_BUFFERS_ON_BIT,
+       RB_BUFFERS_DISABLED     = 1 << RB_BUFFERS_DISABLED_BIT,
+};
+
+static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
 
 /**
  * tracing_on - enable all tracing buffers
@@ -29,7 +67,7 @@ static int ring_buffers_off __read_mostly;
  */
 void tracing_on(void)
 {
-       ring_buffers_off = 0;
+       set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
 }
 
 /**
@@ -42,9 +80,22 @@ void tracing_on(void)
  */
 void tracing_off(void)
 {
-       ring_buffers_off = 1;
+       clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
+}
+
+/**
+ * tracing_off_permanent - permanently disable ring buffers
+ *
+ * This function, once called, will disable all ring buffers
+ * permanenty.
+ */
+void tracing_off_permanent(void)
+{
+       set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
 }
 
+#include "trace.h"
+
 /* Up this if you want to test the TIME_EXTENTS and normalization */
 #define DEBUG_SHIFT 0
 
@@ -187,7 +238,8 @@ static inline int test_time_stamp(u64 delta)
 struct ring_buffer_per_cpu {
        int                             cpu;
        struct ring_buffer              *buffer;
-       spinlock_t                      lock;
+       spinlock_t                      reader_lock; /* serialize readers */
+       raw_spinlock_t                  lock;
        struct lock_class_key           lock_key;
        struct list_head                pages;
        struct buffer_page              *head_page;     /* read from head */
@@ -221,32 +273,16 @@ struct ring_buffer_iter {
        u64                             read_stamp;
 };
 
+/* buffer may be either ring_buffer or ring_buffer_per_cpu */
 #define RB_WARN_ON(buffer, cond)                               \
-       do {                                                    \
-               if (unlikely(cond)) {                           \
-                       atomic_inc(&buffer->record_disabled);   \
-                       WARN_ON(1);                             \
-               }                                               \
-       } while (0)
-
-#define RB_WARN_ON_RET(buffer, cond)                           \
-       do {                                                    \
-               if (unlikely(cond)) {                           \
-                       atomic_inc(&buffer->record_disabled);   \
-                       WARN_ON(1);                             \
-                       return -1;                              \
-               }                                               \
-       } while (0)
-
-#define RB_WARN_ON_ONCE(buffer, cond)                          \
-       do {                                                    \
-               static int once;                                \
-               if (unlikely(cond) && !once) {                  \
-                       once++;                                 \
+       ({                                                      \
+               int _____ret = unlikely(cond);                  \
+               if (_____ret) {                                 \
                        atomic_inc(&buffer->record_disabled);   \
                        WARN_ON(1);                             \
                }                                               \
-       } while (0)
+               _____ret;                                       \
+       })
 
 /**
  * check_pages - integrity check of buffer pages
@@ -260,14 +296,18 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
        struct list_head *head = &cpu_buffer->pages;
        struct buffer_page *page, *tmp;
 
-       RB_WARN_ON_RET(cpu_buffer, head->next->prev != head);
-       RB_WARN_ON_RET(cpu_buffer, head->prev->next != head);
+       if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
+               return -1;
+       if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
+               return -1;
 
        list_for_each_entry_safe(page, tmp, head, list) {
-               RB_WARN_ON_RET(cpu_buffer,
-                              page->list.next->prev != &page->list);
-               RB_WARN_ON_RET(cpu_buffer,
-                              page->list.prev->next != &page->list);
+               if (RB_WARN_ON(cpu_buffer,
+                              page->list.next->prev != &page->list))
+                       return -1;
+               if (RB_WARN_ON(cpu_buffer,
+                              page->list.prev->next != &page->list))
+                       return -1;
        }
 
        return 0;
@@ -324,7 +364,8 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
 
        cpu_buffer->cpu = cpu;
        cpu_buffer->buffer = buffer;
-       spin_lock_init(&cpu_buffer->lock);
+       spin_lock_init(&cpu_buffer->reader_lock);
+       cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
        INIT_LIST_HEAD(&cpu_buffer->pages);
 
        page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
@@ -473,13 +514,15 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
        synchronize_sched();
 
        for (i = 0; i < nr_pages; i++) {
-               BUG_ON(list_empty(&cpu_buffer->pages));
+               if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
+                       return;
                p = cpu_buffer->pages.next;
                page = list_entry(p, struct buffer_page, list);
                list_del_init(&page->list);
                free_buffer_page(page);
        }
-       BUG_ON(list_empty(&cpu_buffer->pages));
+       if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
+               return;
 
        rb_reset_cpu(cpu_buffer);
 
@@ -501,7 +544,8 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
        synchronize_sched();
 
        for (i = 0; i < nr_pages; i++) {
-               BUG_ON(list_empty(pages));
+               if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
+                       return;
                p = pages->next;
                page = list_entry(p, struct buffer_page, list);
                list_del_init(&page->list);
@@ -562,7 +606,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
        if (size < buffer_size) {
 
                /* easy case, just free pages */
-               BUG_ON(nr_pages >= buffer->pages);
+               if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
+                       mutex_unlock(&buffer->mutex);
+                       return -1;
+               }
 
                rm_pages = buffer->pages - nr_pages;
 
@@ -581,7 +628,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
         * add these pages to the cpu_buffers. Otherwise we just free
         * them all and return -ENOMEM;
         */
-       BUG_ON(nr_pages <= buffer->pages);
+       if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
+               mutex_unlock(&buffer->mutex);
+               return -1;
+       }
+
        new_pages = nr_pages - buffer->pages;
 
        for_each_buffer_cpu(buffer, cpu) {
@@ -604,7 +655,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
                rb_insert_pages(cpu_buffer, &pages, new_pages);
        }
 
-       BUG_ON(!list_empty(&pages));
+       if (RB_WARN_ON(buffer, !list_empty(&pages))) {
+               mutex_unlock(&buffer->mutex);
+               return -1;
+       }
 
  out:
        buffer->pages = nr_pages;
@@ -693,7 +747,8 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
             head += rb_event_length(event)) {
 
                event = __rb_page_index(cpu_buffer->head_page, head);
-               BUG_ON(rb_null_event(event));
+               if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
+                       return;
                /* Only count data entries */
                if (event->type != RINGBUF_TYPE_DATA)
                        continue;
@@ -746,8 +801,9 @@ rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
        addr &= PAGE_MASK;
 
        while (cpu_buffer->commit_page->page != (void *)addr) {
-               RB_WARN_ON(cpu_buffer,
-                          cpu_buffer->commit_page == cpu_buffer->tail_page);
+               if (RB_WARN_ON(cpu_buffer,
+                         cpu_buffer->commit_page == cpu_buffer->tail_page))
+                       return;
                cpu_buffer->commit_page->commit =
                        cpu_buffer->commit_page->write;
                rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
@@ -894,7 +950,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
        if (write > BUF_PAGE_SIZE) {
                struct buffer_page *next_page = tail_page;
 
-               spin_lock_irqsave(&cpu_buffer->lock, flags);
+               local_irq_save(flags);
+               __raw_spin_lock(&cpu_buffer->lock);
 
                rb_inc_page(cpu_buffer, &next_page);
 
@@ -902,7 +959,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
                reader_page = cpu_buffer->reader_page;
 
                /* we grabbed the lock before incrementing */
-               RB_WARN_ON(cpu_buffer, next_page == reader_page);
+               if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
+                       goto out_unlock;
 
                /*
                 * If for some reason, we had an interrupt storm that made
@@ -970,7 +1028,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
                        rb_set_commit_to_write(cpu_buffer);
                }
 
-               spin_unlock_irqrestore(&cpu_buffer->lock, flags);
+               __raw_spin_unlock(&cpu_buffer->lock);
+               local_irq_restore(flags);
 
                /* fail and let the caller try again */
                return ERR_PTR(-EAGAIN);
@@ -978,7 +1037,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
 
        /* We reserved something on the buffer */
 
-       BUG_ON(write > BUF_PAGE_SIZE);
+       if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
+               return NULL;
 
        event = __rb_page_index(tail_page, tail);
        rb_update_event(event, type, length);
@@ -993,7 +1053,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
        return event;
 
  out_unlock:
-       spin_unlock_irqrestore(&cpu_buffer->lock, flags);
+       __raw_spin_unlock(&cpu_buffer->lock);
+       local_irq_restore(flags);
        return NULL;
 }
 
@@ -1076,10 +1137,8 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
         * storm or we have something buggy.
         * Bail!
         */
-       if (unlikely(++nr_loops > 1000)) {
-               RB_WARN_ON(cpu_buffer, 1);
+       if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
                return NULL;
-       }
 
        ts = ring_buffer_time_stamp(cpu_buffer->cpu);
 
@@ -1175,15 +1234,14 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
        struct ring_buffer_event *event;
        int cpu, resched;
 
-       if (ring_buffers_off)
+       if (ring_buffer_flags != RB_BUFFERS_ON)
                return NULL;
 
        if (atomic_read(&buffer->record_disabled))
                return NULL;
 
        /* If we are tracing schedule, we don't want to recurse */
-       resched = need_resched();
-       preempt_disable_notrace();
+       resched = ftrace_preempt_disable();
 
        cpu = raw_smp_processor_id();
 
@@ -1214,10 +1272,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
        return event;
 
  out:
-       if (resched)
-               preempt_enable_no_resched_notrace();
-       else
-               preempt_enable_notrace();
+       ftrace_preempt_enable(resched);
        return NULL;
 }
 
@@ -1259,12 +1314,9 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
        /*
         * Only the last preempt count needs to restore preemption.
         */
-       if (preempt_count() == 1) {
-               if (per_cpu(rb_need_resched, cpu))
-                       preempt_enable_no_resched_notrace();
-               else
-                       preempt_enable_notrace();
-       } else
+       if (preempt_count() == 1)
+               ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
+       else
                preempt_enable_no_resched_notrace();
 
        return 0;
@@ -1294,14 +1346,13 @@ int ring_buffer_write(struct ring_buffer *buffer,
        int ret = -EBUSY;
        int cpu, resched;
 
-       if (ring_buffers_off)
+       if (ring_buffer_flags != RB_BUFFERS_ON)
                return -EBUSY;
 
        if (atomic_read(&buffer->record_disabled))
                return -EBUSY;
 
-       resched = need_resched();
-       preempt_disable_notrace();
+       resched = ftrace_preempt_disable();
 
        cpu = raw_smp_processor_id();
 
@@ -1327,10 +1378,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
 
        ret = 0;
  out:
-       if (resched)
-               preempt_enable_no_resched_notrace();
-       else
-               preempt_enable_notrace();
+       ftrace_preempt_enable(resched);
 
        return ret;
 }
@@ -1489,14 +1537,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
        return overruns;
 }
 
-/**
- * ring_buffer_iter_reset - reset an iterator
- * @iter: The iterator to reset
- *
- * Resets the iterator, so that it will start from the beginning
- * again.
- */
-void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
+static void rb_iter_reset(struct ring_buffer_iter *iter)
 {
        struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
 
@@ -1514,6 +1555,23 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
                iter->read_stamp = iter->head_page->time_stamp;
 }
 
+/**
+ * ring_buffer_iter_reset - reset an iterator
+ * @iter: The iterator to reset
+ *
+ * Resets the iterator, so that it will start from the beginning
+ * again.
+ */
+void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
+{
+       struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+       rb_iter_reset(iter);
+       spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+}
+
 /**
  * ring_buffer_iter_empty - check if an iterator has no more to read
  * @iter: The iterator to check
@@ -1597,7 +1655,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
        unsigned long flags;
        int nr_loops = 0;
 
-       spin_lock_irqsave(&cpu_buffer->lock, flags);
+       local_irq_save(flags);
+       __raw_spin_lock(&cpu_buffer->lock);
 
  again:
        /*
@@ -1606,8 +1665,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
         * a case where we will loop three times. There should be no
         * reason to loop four times (that I know of).
         */
-       if (unlikely(++nr_loops > 3)) {
-               RB_WARN_ON(cpu_buffer, 1);
+       if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
                reader = NULL;
                goto out;
        }
@@ -1619,8 +1677,9 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
                goto out;
 
        /* Never should we have an index greater than the size */
-       RB_WARN_ON(cpu_buffer,
-                  cpu_buffer->reader_page->read > rb_page_size(reader));
+       if (RB_WARN_ON(cpu_buffer,
+                      cpu_buffer->reader_page->read > rb_page_size(reader)))
+               goto out;
 
        /* check if we caught up to the tail */
        reader = NULL;
@@ -1659,7 +1718,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
        goto again;
 
  out:
-       spin_unlock_irqrestore(&cpu_buffer->lock, flags);
+       __raw_spin_unlock(&cpu_buffer->lock);
+       local_irq_restore(flags);
 
        return reader;
 }
@@ -1673,7 +1733,8 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
        reader = rb_get_reader_page(cpu_buffer);
 
        /* This function should not be called when buffer is empty */
-       BUG_ON(!reader);
+       if (RB_WARN_ON(cpu_buffer, !reader))
+               return;
 
        event = rb_reader_event(cpu_buffer);
 
@@ -1700,7 +1761,9 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
         * Check if we are at the end of the buffer.
         */
        if (iter->head >= rb_page_size(iter->head_page)) {
-               BUG_ON(iter->head_page == cpu_buffer->commit_page);
+               if (RB_WARN_ON(buffer,
+                              iter->head_page == cpu_buffer->commit_page))
+                       return;
                rb_inc_iter(iter);
                return;
        }
@@ -1713,8 +1776,10 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
         * This should not be called to advance the header if we are
         * at the tail of the buffer.
         */
-       BUG_ON((iter->head_page == cpu_buffer->commit_page) &&
-              (iter->head + length > rb_commit_index(cpu_buffer)));
+       if (RB_WARN_ON(cpu_buffer,
+                      (iter->head_page == cpu_buffer->commit_page) &&
+                      (iter->head + length > rb_commit_index(cpu_buffer))))
+               return;
 
        rb_update_iter_read_stamp(iter, event);
 
@@ -1726,17 +1791,8 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
                rb_advance_iter(iter);
 }
 
-/**
- * ring_buffer_peek - peek at the next event to be read
- * @buffer: The ring buffer to read
- * @cpu: The cpu to peak at
- * @ts: The timestamp counter of this event.
- *
- * This will return the event that will be read next, but does
- * not consume the data.
- */
-struct ring_buffer_event *
-ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
+static struct ring_buffer_event *
+rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        struct ring_buffer_event *event;
@@ -1757,10 +1813,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
         * can have.  Nesting 10 deep of interrupts is clearly
         * an anomaly.
         */
-       if (unlikely(++nr_loops > 10)) {
-               RB_WARN_ON(cpu_buffer, 1);
+       if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
                return NULL;
-       }
 
        reader = rb_get_reader_page(cpu_buffer);
        if (!reader)
@@ -1798,16 +1852,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
        return NULL;
 }
 
-/**
- * ring_buffer_iter_peek - peek at the next event to be read
- * @iter: The ring buffer iterator
- * @ts: The timestamp counter of this event.
- *
- * This will return the event that will be read next, but does
- * not increment the iterator.
- */
-struct ring_buffer_event *
-ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
+static struct ring_buffer_event *
+rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
 {
        struct ring_buffer *buffer;
        struct ring_buffer_per_cpu *cpu_buffer;
@@ -1829,10 +1875,8 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
         * can have. Nesting 10 deep of interrupts is clearly
         * an anomaly.
         */
-       if (unlikely(++nr_loops > 10)) {
-               RB_WARN_ON(cpu_buffer, 1);
+       if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
                return NULL;
-       }
 
        if (rb_per_cpu_empty(cpu_buffer))
                return NULL;
@@ -1868,6 +1912,51 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
        return NULL;
 }
 
+/**
+ * ring_buffer_peek - peek at the next event to be read
+ * @buffer: The ring buffer to read
+ * @cpu: The cpu to peak at
+ * @ts: The timestamp counter of this event.
+ *
+ * This will return the event that will be read next, but does
+ * not consume the data.
+ */
+struct ring_buffer_event *
+ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
+{
+       struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
+       struct ring_buffer_event *event;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+       event = rb_buffer_peek(buffer, cpu, ts);
+       spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+       return event;
+}
+
+/**
+ * ring_buffer_iter_peek - peek at the next event to be read
+ * @iter: The ring buffer iterator
+ * @ts: The timestamp counter of this event.
+ *
+ * This will return the event that will be read next, but does
+ * not increment the iterator.
+ */
+struct ring_buffer_event *
+ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
+{
+       struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+       struct ring_buffer_event *event;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+       event = rb_iter_peek(iter, ts);
+       spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+       return event;
+}
+
 /**
  * ring_buffer_consume - return an event and consume it
  * @buffer: The ring buffer to get the next event from
@@ -1879,19 +1968,24 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
 struct ring_buffer_event *
 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
 {
-       struct ring_buffer_per_cpu *cpu_buffer;
+       struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
        struct ring_buffer_event *event;
+       unsigned long flags;
 
        if (!cpu_isset(cpu, buffer->cpumask))
                return NULL;
 
-       event = ring_buffer_peek(buffer, cpu, ts);
+       spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+
+       event = rb_buffer_peek(buffer, cpu, ts);
        if (!event)
-               return NULL;
+               goto out;
 
-       cpu_buffer = buffer->buffers[cpu];
        rb_advance_reader(cpu_buffer);
 
+ out:
+       spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
        return event;
 }
 
@@ -1928,9 +2022,11 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
        atomic_inc(&cpu_buffer->record_disabled);
        synchronize_sched();
 
-       spin_lock_irqsave(&cpu_buffer->lock, flags);
-       ring_buffer_iter_reset(iter);
-       spin_unlock_irqrestore(&cpu_buffer->lock, flags);
+       spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+       __raw_spin_lock(&cpu_buffer->lock);
+       rb_iter_reset(iter);
+       __raw_spin_unlock(&cpu_buffer->lock);
+       spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 
        return iter;
 }
@@ -1962,12 +2058,17 @@ struct ring_buffer_event *
 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
 {
        struct ring_buffer_event *event;
+       struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+       unsigned long flags;
 
-       event = ring_buffer_iter_peek(iter, ts);
+       spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+       event = rb_iter_peek(iter, ts);
        if (!event)
-               return NULL;
+               goto out;
 
        rb_advance_iter(iter);
+ out:
+       spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 
        return event;
 }
@@ -2016,11 +2117,15 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
        if (!cpu_isset(cpu, buffer->cpumask))
                return;
 
-       spin_lock_irqsave(&cpu_buffer->lock, flags);
+       spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+
+       __raw_spin_lock(&cpu_buffer->lock);
 
        rb_reset_cpu(cpu_buffer);
 
-       spin_unlock_irqrestore(&cpu_buffer->lock, flags);
+       __raw_spin_unlock(&cpu_buffer->lock);
+
+       spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 }
 
 /**
@@ -2122,12 +2227,14 @@ static ssize_t
 rb_simple_read(struct file *filp, char __user *ubuf,
               size_t cnt, loff_t *ppos)
 {
-       int *p = filp->private_data;
+       long *p = filp->private_data;
        char buf[64];
        int r;
 
-       /* !ring_buffers_off == tracing_on */
-       r = sprintf(buf, "%d\n", !*p);
+       if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
+               r = sprintf(buf, "permanently disabled\n");
+       else
+               r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
 
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 }
@@ -2136,7 +2243,7 @@ static ssize_t
 rb_simple_write(struct file *filp, const char __user *ubuf,
                size_t cnt, loff_t *ppos)
 {
-       int *p = filp->private_data;
+       long *p = filp->private_data;
        char buf[64];
        long val;
        int ret;
@@ -2153,8 +2260,10 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
        if (ret < 0)
                return ret;
 
-       /* !ring_buffers_off == tracing_on */
-       *p = !val;
+       if (val)
+               set_bit(RB_BUFFERS_ON_BIT, p);
+       else
+               clear_bit(RB_BUFFERS_ON_BIT, p);
 
        (*ppos)++;
 
@@ -2176,7 +2285,7 @@ static __init int rb_init_debugfs(void)
        d_tracer = tracing_init_dentry();
 
        entry = debugfs_create_file("tracing_on", 0644, d_tracer,
-                                   &ring_buffers_off, &rb_simple_fops);
+                                   &ring_buffer_flags, &rb_simple_fops);
        if (!entry)
                pr_warning("Could not create debugfs 'tracing_on' entry\n");
 
index d86e3252f3000024cfaf31c63ffbee765dafac53..91887a280ab964e837ef1bb4a2562c0bd97cb21a 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/gfp.h>
 #include <linux/fs.h>
 #include <linux/kprobes.h>
+#include <linux/seq_file.h>
 #include <linux/writeback.h>
 
 #include <linux/stacktrace.h>
 unsigned long __read_mostly    tracing_max_latency = (cycle_t)ULONG_MAX;
 unsigned long __read_mostly    tracing_thresh;
 
+/* For tracers that don't implement custom flags */
+static struct tracer_opt dummy_tracer_opt[] = {
+       { }
+};
+
+static struct tracer_flags dummy_tracer_flags = {
+       .val = 0,
+       .opts = dummy_tracer_opt
+};
+
+static int dummy_set_flag(u32 old_flags, u32 bit, int set)
+{
+       return 0;
+}
+
+/*
+ * Kill all tracing for good (never come back).
+ * It is initialized to 1 but will turn to zero if the initialization
+ * of the tracer is successful. But that is the only place that sets
+ * this back to zero.
+ */
+int tracing_disabled = 1;
+
 static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
 
 static inline void ftrace_disable_cpu(void)
@@ -62,7 +86,36 @@ static cpumask_t __read_mostly               tracing_buffer_mask;
 #define for_each_tracing_cpu(cpu)      \
        for_each_cpu_mask(cpu, tracing_buffer_mask)
 
-static int tracing_disabled = 1;
+/*
+ * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
+ *
+ * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
+ * is set, then ftrace_dump is called. This will output the contents
+ * of the ftrace buffers to the console.  This is very useful for
+ * capturing traces that lead to crashes and outputing it to a
+ * serial console.
+ *
+ * It is default off, but you can enable it with either specifying
+ * "ftrace_dump_on_oops" in the kernel command line, or setting
+ * /proc/sys/kernel/ftrace_dump_on_oops to true.
+ */
+int ftrace_dump_on_oops;
+
+static int tracing_set_tracer(char *buf);
+
+static int __init set_ftrace(char *str)
+{
+       tracing_set_tracer(str);
+       return 1;
+}
+__setup("ftrace", set_ftrace);
+
+static int __init set_ftrace_dump_on_oops(char *str)
+{
+       ftrace_dump_on_oops = 1;
+       return 1;
+}
+__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
 
 long
 ns2usecs(cycle_t nsec)
@@ -112,6 +165,19 @@ static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
 /* tracer_enabled is used to toggle activation of a tracer */
 static int                     tracer_enabled = 1;
 
+/**
+ * tracing_is_enabled - return tracer_enabled status
+ *
+ * This function is used by other tracers to know the status
+ * of the tracer_enabled flag.  Tracers may use this function
+ * to know if it should enable their features when starting
+ * up. See irqsoff tracer for an example (start_irqsoff_tracer).
+ */
+int tracing_is_enabled(void)
+{
+       return tracer_enabled;
+}
+
 /* function tracing enabled */
 int                            ftrace_function_enabled;
 
@@ -153,8 +219,9 @@ static DEFINE_MUTEX(trace_types_lock);
 /* trace_wait is a waitqueue for tasks blocked on trace_poll */
 static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
 
-/* trace_flags holds iter_ctrl options */
-unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
+/* trace_flags holds trace_options default values */
+unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
+       TRACE_ITER_ANNOTATE;
 
 /**
  * trace_wake_up - wake up tasks waiting for trace input
@@ -193,13 +260,6 @@ unsigned long nsecs_to_usecs(unsigned long nsecs)
        return nsecs / 1000;
 }
 
-/*
- * TRACE_ITER_SYM_MASK masks the options in trace_flags that
- * control the output of kernel symbols.
- */
-#define TRACE_ITER_SYM_MASK \
-       (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
-
 /* These must match the bit postions in trace_iterator_flags */
 static const char *trace_options[] = {
        "print-parent",
@@ -213,6 +273,11 @@ static const char *trace_options[] = {
        "stacktrace",
        "sched-tree",
        "ftrace_printk",
+       "ftrace_preempt",
+       "branch",
+       "annotate",
+       "userstacktrace",
+       "sym-userobj",
        NULL
 };
 
@@ -359,6 +424,28 @@ trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
        return trace_seq_putmem(s, hex, j);
 }
 
+static int
+trace_seq_path(struct trace_seq *s, struct path *path)
+{
+       unsigned char *p;
+
+       if (s->len >= (PAGE_SIZE - 1))
+               return 0;
+       p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
+       if (!IS_ERR(p)) {
+               p = mangle_path(s->buffer + s->len, p, "\n");
+               if (p) {
+                       s->len = p - s->buffer;
+                       return 1;
+               }
+       } else {
+               s->buffer[s->len++] = '?';
+               return 1;
+       }
+
+       return 0;
+}
+
 static void
 trace_seq_reset(struct trace_seq *s)
 {
@@ -470,7 +557,15 @@ int register_tracer(struct tracer *type)
                return -1;
        }
 
+       /*
+        * When this gets called we hold the BKL which means that
+        * preemption is disabled. Various trace selftests however
+        * need to disable and enable preemption for successful tests.
+        * So we drop the BKL here and grab it after the tests again.
+        */
+       unlock_kernel();
        mutex_lock(&trace_types_lock);
+
        for (t = trace_types; t; t = t->next) {
                if (strcmp(type->name, t->name) == 0) {
                        /* already found */
@@ -481,11 +576,18 @@ int register_tracer(struct tracer *type)
                }
        }
 
+       if (!type->set_flag)
+               type->set_flag = &dummy_set_flag;
+       if (!type->flags)
+               type->flags = &dummy_tracer_flags;
+       else
+               if (!type->flags->opts)
+                       type->flags->opts = dummy_tracer_opt;
+
 #ifdef CONFIG_FTRACE_STARTUP_TEST
        if (type->selftest) {
                struct tracer *saved_tracer = current_trace;
                struct trace_array *tr = &global_trace;
-               int saved_ctrl = tr->ctrl;
                int i;
                /*
                 * Run a selftest on this tracer.
@@ -494,25 +596,23 @@ int register_tracer(struct tracer *type)
                 * internal tracing to verify that everything is in order.
                 * If we fail, we do not register this tracer.
                 */
-               for_each_tracing_cpu(i) {
+               for_each_tracing_cpu(i)
                        tracing_reset(tr, i);
-               }
+
                current_trace = type;
-               tr->ctrl = 0;
                /* the test is responsible for initializing and enabling */
                pr_info("Testing tracer %s: ", type->name);
                ret = type->selftest(type, tr);
                /* the test is responsible for resetting too */
                current_trace = saved_tracer;
-               tr->ctrl = saved_ctrl;
                if (ret) {
                        printk(KERN_CONT "FAILED!\n");
                        goto out;
                }
                /* Only reset on passing, to avoid touching corrupted buffers */
-               for_each_tracing_cpu(i) {
+               for_each_tracing_cpu(i)
                        tracing_reset(tr, i);
-               }
+
                printk(KERN_CONT "PASSED\n");
        }
 #endif
@@ -525,6 +625,7 @@ int register_tracer(struct tracer *type)
 
  out:
        mutex_unlock(&trace_types_lock);
+       lock_kernel();
 
        return ret;
 }
@@ -581,6 +682,91 @@ static void trace_init_cmdlines(void)
        cmdline_idx = 0;
 }
 
+static int trace_stop_count;
+static DEFINE_SPINLOCK(tracing_start_lock);
+
+/**
+ * ftrace_off_permanent - disable all ftrace code permanently
+ *
+ * This should only be called when a serious anomally has
+ * been detected.  This will turn off the function tracing,
+ * ring buffers, and other tracing utilites. It takes no
+ * locks and can be called from any context.
+ */
+void ftrace_off_permanent(void)
+{
+       tracing_disabled = 1;
+       ftrace_stop();
+       tracing_off_permanent();
+}
+
+/**
+ * tracing_start - quick start of the tracer
+ *
+ * If tracing is enabled but was stopped by tracing_stop,
+ * this will start the tracer back up.
+ */
+void tracing_start(void)
+{
+       struct ring_buffer *buffer;
+       unsigned long flags;
+
+       if (tracing_disabled)
+               return;
+
+       spin_lock_irqsave(&tracing_start_lock, flags);
+       if (--trace_stop_count)
+               goto out;
+
+       if (trace_stop_count < 0) {
+               /* Someone screwed up their debugging */
+               WARN_ON_ONCE(1);
+               trace_stop_count = 0;
+               goto out;
+       }
+
+
+       buffer = global_trace.buffer;
+       if (buffer)
+               ring_buffer_record_enable(buffer);
+
+       buffer = max_tr.buffer;
+       if (buffer)
+               ring_buffer_record_enable(buffer);
+
+       ftrace_start();
+ out:
+       spin_unlock_irqrestore(&tracing_start_lock, flags);
+}
+
+/**
+ * tracing_stop - quick stop of the tracer
+ *
+ * Light weight way to stop tracing. Use in conjunction with
+ * tracing_start.
+ */
+void tracing_stop(void)
+{
+       struct ring_buffer *buffer;
+       unsigned long flags;
+
+       ftrace_stop();
+       spin_lock_irqsave(&tracing_start_lock, flags);
+       if (trace_stop_count++)
+               goto out;
+
+       buffer = global_trace.buffer;
+       if (buffer)
+               ring_buffer_record_disable(buffer);
+
+       buffer = max_tr.buffer;
+       if (buffer)
+               ring_buffer_record_disable(buffer);
+
+ out:
+       spin_unlock_irqrestore(&tracing_start_lock, flags);
+}
+
 void trace_stop_cmdline_recording(void);
 
 static void trace_save_cmdline(struct task_struct *tsk)
@@ -618,7 +804,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
        spin_unlock(&trace_cmdline_lock);
 }
 
-static char *trace_find_cmdline(int pid)
+char *trace_find_cmdline(int pid)
 {
        char *cmdline = "<...>";
        unsigned map;
@@ -655,6 +841,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
 
        entry->preempt_count            = pc & 0xff;
        entry->pid                      = (tsk) ? tsk->pid : 0;
+       entry->tgid                     = (tsk) ? tsk->tgid : 0;
        entry->flags =
 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
                (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
@@ -691,6 +878,56 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
 }
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static void __trace_graph_entry(struct trace_array *tr,
+                               struct trace_array_cpu *data,
+                               struct ftrace_graph_ent *trace,
+                               unsigned long flags,
+                               int pc)
+{
+       struct ring_buffer_event *event;
+       struct ftrace_graph_ent_entry *entry;
+       unsigned long irq_flags;
+
+       if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
+               return;
+
+       event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
+                                        &irq_flags);
+       if (!event)
+               return;
+       entry   = ring_buffer_event_data(event);
+       tracing_generic_entry_update(&entry->ent, flags, pc);
+       entry->ent.type                 = TRACE_GRAPH_ENT;
+       entry->graph_ent                        = *trace;
+       ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
+}
+
+static void __trace_graph_return(struct trace_array *tr,
+                               struct trace_array_cpu *data,
+                               struct ftrace_graph_ret *trace,
+                               unsigned long flags,
+                               int pc)
+{
+       struct ring_buffer_event *event;
+       struct ftrace_graph_ret_entry *entry;
+       unsigned long irq_flags;
+
+       if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
+               return;
+
+       event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
+                                        &irq_flags);
+       if (!event)
+               return;
+       entry   = ring_buffer_event_data(event);
+       tracing_generic_entry_update(&entry->ent, flags, pc);
+       entry->ent.type                 = TRACE_GRAPH_RET;
+       entry->ret                              = *trace;
+       ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
+}
+#endif
+
 void
 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
        unsigned long ip, unsigned long parent_ip, unsigned long flags,
@@ -742,6 +979,46 @@ void __trace_stack(struct trace_array *tr,
        ftrace_trace_stack(tr, data, flags, skip, preempt_count());
 }
 
+static void ftrace_trace_userstack(struct trace_array *tr,
+                  struct trace_array_cpu *data,
+                  unsigned long flags, int pc)
+{
+#ifdef CONFIG_STACKTRACE
+       struct ring_buffer_event *event;
+       struct userstack_entry *entry;
+       struct stack_trace trace;
+       unsigned long irq_flags;
+
+       if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
+               return;
+
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
+                                        &irq_flags);
+       if (!event)
+               return;
+       entry   = ring_buffer_event_data(event);
+       tracing_generic_entry_update(&entry->ent, flags, pc);
+       entry->ent.type         = TRACE_USER_STACK;
+
+       memset(&entry->caller, 0, sizeof(entry->caller));
+
+       trace.nr_entries        = 0;
+       trace.max_entries       = FTRACE_STACK_ENTRIES;
+       trace.skip              = 0;
+       trace.entries           = entry->caller;
+
+       save_stack_trace_user(&trace);
+       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+#endif
+}
+
+void __trace_userstack(struct trace_array *tr,
+                  struct trace_array_cpu *data,
+                  unsigned long flags)
+{
+       ftrace_trace_userstack(tr, data, flags, preempt_count());
+}
+
 static void
 ftrace_trace_special(void *__tr, void *__data,
                     unsigned long arg1, unsigned long arg2, unsigned long arg3,
@@ -765,6 +1042,7 @@ ftrace_trace_special(void *__tr, void *__data,
        entry->arg3                     = arg3;
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
        ftrace_trace_stack(tr, data, irq_flags, 4, pc);
+       ftrace_trace_userstack(tr, data, irq_flags, pc);
 
        trace_wake_up();
 }
@@ -803,6 +1081,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
        entry->next_cpu = task_cpu(next);
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
        ftrace_trace_stack(tr, data, flags, 5, pc);
+       ftrace_trace_userstack(tr, data, flags, pc);
 }
 
 void
@@ -832,6 +1111,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
        entry->next_cpu                 = task_cpu(wakee);
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
        ftrace_trace_stack(tr, data, flags, 6, pc);
+       ftrace_trace_userstack(tr, data, flags, pc);
 
        trace_wake_up();
 }
@@ -841,26 +1121,28 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
 {
        struct trace_array *tr = &global_trace;
        struct trace_array_cpu *data;
+       unsigned long flags;
        int cpu;
        int pc;
 
-       if (tracing_disabled || !tr->ctrl)
+       if (tracing_disabled)
                return;
 
        pc = preempt_count();
-       preempt_disable_notrace();
+       local_irq_save(flags);
        cpu = raw_smp_processor_id();
        data = tr->data[cpu];
 
-       if (likely(!atomic_read(&data->disabled)))
+       if (likely(atomic_inc_return(&data->disabled) == 1))
                ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
 
-       preempt_enable_notrace();
+       atomic_dec(&data->disabled);
+       local_irq_restore(flags);
 }
 
 #ifdef CONFIG_FUNCTION_TRACER
 static void
-function_trace_call(unsigned long ip, unsigned long parent_ip)
+function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
 {
        struct trace_array *tr = &global_trace;
        struct trace_array_cpu *data;
@@ -873,8 +1155,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
                return;
 
        pc = preempt_count();
-       resched = need_resched();
-       preempt_disable_notrace();
+       resched = ftrace_preempt_disable();
        local_save_flags(flags);
        cpu = raw_smp_processor_id();
        data = tr->data[cpu];
@@ -884,12 +1165,84 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
                trace_function(tr, data, ip, parent_ip, flags, pc);
 
        atomic_dec(&data->disabled);
-       if (resched)
-               preempt_enable_no_resched_notrace();
-       else
-               preempt_enable_notrace();
+       ftrace_preempt_enable(resched);
 }
 
+static void
+function_trace_call(unsigned long ip, unsigned long parent_ip)
+{
+       struct trace_array *tr = &global_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int cpu;
+       int pc;
+
+       if (unlikely(!ftrace_function_enabled))
+               return;
+
+       /*
+        * Need to use raw, since this must be called before the
+        * recursive protection is performed.
+        */
+       local_irq_save(flags);
+       cpu = raw_smp_processor_id();
+       data = tr->data[cpu];
+       disabled = atomic_inc_return(&data->disabled);
+
+       if (likely(disabled == 1)) {
+               pc = preempt_count();
+               trace_function(tr, data, ip, parent_ip, flags, pc);
+       }
+
+       atomic_dec(&data->disabled);
+       local_irq_restore(flags);
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+void trace_graph_entry(struct ftrace_graph_ent *trace)
+{
+       struct trace_array *tr = &global_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int cpu;
+       int pc;
+
+       raw_local_irq_save(flags);
+       cpu = raw_smp_processor_id();
+       data = tr->data[cpu];
+       disabled = atomic_inc_return(&data->disabled);
+       if (likely(disabled == 1)) {
+               pc = preempt_count();
+               __trace_graph_entry(tr, data, trace, flags, pc);
+       }
+       atomic_dec(&data->disabled);
+       raw_local_irq_restore(flags);
+}
+
+void trace_graph_return(struct ftrace_graph_ret *trace)
+{
+       struct trace_array *tr = &global_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int cpu;
+       int pc;
+
+       raw_local_irq_save(flags);
+       cpu = raw_smp_processor_id();
+       data = tr->data[cpu];
+       disabled = atomic_inc_return(&data->disabled);
+       if (likely(disabled == 1)) {
+               pc = preempt_count();
+               __trace_graph_return(tr, data, trace, flags, pc);
+       }
+       atomic_dec(&data->disabled);
+       raw_local_irq_restore(flags);
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
 static struct ftrace_ops trace_ops __read_mostly =
 {
        .func = function_trace_call,
@@ -898,9 +1251,14 @@ static struct ftrace_ops trace_ops __read_mostly =
 void tracing_start_function_trace(void)
 {
        ftrace_function_enabled = 0;
+
+       if (trace_flags & TRACE_ITER_PREEMPTONLY)
+               trace_ops.func = function_trace_call_preempt_only;
+       else
+               trace_ops.func = function_trace_call;
+
        register_ftrace_function(&trace_ops);
-       if (tracer_enabled)
-               ftrace_function_enabled = 1;
+       ftrace_function_enabled = 1;
 }
 
 void tracing_stop_function_trace(void)
@@ -912,6 +1270,7 @@ void tracing_stop_function_trace(void)
 
 enum trace_file_type {
        TRACE_FILE_LAT_FMT      = 1,
+       TRACE_FILE_ANNOTATE     = 2,
 };
 
 static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
@@ -1047,10 +1406,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
 
        atomic_inc(&trace_record_cmdline_disabled);
 
-       /* let the tracer grab locks here if needed */
-       if (current_trace->start)
-               current_trace->start(iter);
-
        if (*pos != iter->pos) {
                iter->ent = NULL;
                iter->cpu = 0;
@@ -1077,14 +1432,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
 
 static void s_stop(struct seq_file *m, void *p)
 {
-       struct trace_iterator *iter = m->private;
-
        atomic_dec(&trace_record_cmdline_disabled);
-
-       /* let the tracer release locks here if needed */
-       if (current_trace && current_trace == iter->trace && iter->trace->stop)
-               iter->trace->stop(iter);
-
        mutex_unlock(&trace_types_lock);
 }
 
@@ -1143,7 +1491,7 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt,
 # define IP_FMT "%016lx"
 #endif
 
-static int
+int
 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
 {
        int ret;
@@ -1164,6 +1512,78 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
        return ret;
 }
 
+static inline int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
+                                   unsigned long ip, unsigned long sym_flags)
+{
+       struct file *file = NULL;
+       unsigned long vmstart = 0;
+       int ret = 1;
+
+       if (mm) {
+               const struct vm_area_struct *vma;
+
+               down_read(&mm->mmap_sem);
+               vma = find_vma(mm, ip);
+               if (vma) {
+                       file = vma->vm_file;
+                       vmstart = vma->vm_start;
+               }
+               if (file) {
+                       ret = trace_seq_path(s, &file->f_path);
+                       if (ret)
+                               ret = trace_seq_printf(s, "[+0x%lx]", ip - vmstart);
+               }
+               up_read(&mm->mmap_sem);
+       }
+       if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
+               ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
+       return ret;
+}
+
+static int
+seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
+                     unsigned long sym_flags)
+{
+       struct mm_struct *mm = NULL;
+       int ret = 1;
+       unsigned int i;
+
+       if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
+               struct task_struct *task;
+               /*
+                * we do the lookup on the thread group leader,
+                * since individual threads might have already quit!
+                */
+               rcu_read_lock();
+               task = find_task_by_vpid(entry->ent.tgid);
+               if (task)
+                       mm = get_task_mm(task);
+               rcu_read_unlock();
+       }
+
+       for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
+               unsigned long ip = entry->caller[i];
+
+               if (ip == ULONG_MAX || !ret)
+                       break;
+               if (i && ret)
+                       ret = trace_seq_puts(s, " <- ");
+               if (!ip) {
+                       if (ret)
+                               ret = trace_seq_puts(s, "??");
+                       continue;
+               }
+               if (!ret)
+                       break;
+               if (ret)
+                       ret = seq_print_user_ip(s, mm, ip, sym_flags);
+       }
+
+       if (mm)
+               mmput(mm);
+       return ret;
+}
+
 static void print_lat_help_header(struct seq_file *m)
 {
        seq_puts(m, "#                  _------=> CPU#            \n");
@@ -1338,6 +1758,23 @@ void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
                trace_seq_putc(s, '\n');
 }
 
+static void test_cpu_buff_start(struct trace_iterator *iter)
+{
+       struct trace_seq *s = &iter->seq;
+
+       if (!(trace_flags & TRACE_ITER_ANNOTATE))
+               return;
+
+       if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
+               return;
+
+       if (cpu_isset(iter->cpu, iter->started))
+               return;
+
+       cpu_set(iter->cpu, iter->started);
+       trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
+}
+
 static enum print_line_t
 print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
 {
@@ -1357,6 +1794,8 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
        if (entry->type == TRACE_CONT)
                return TRACE_TYPE_HANDLED;
 
+       test_cpu_buff_start(iter);
+
        next_entry = find_next_entry(iter, NULL, &next_ts);
        if (!next_entry)
                next_ts = iter->ts;
@@ -1448,6 +1887,27 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
                        trace_seq_print_cont(s, iter);
                break;
        }
+       case TRACE_BRANCH: {
+               struct trace_branch *field;
+
+               trace_assign_type(field, entry);
+
+               trace_seq_printf(s, "[%s] %s:%s:%d\n",
+                                field->correct ? "  ok  " : " MISS ",
+                                field->func,
+                                field->file,
+                                field->line);
+               break;
+       }
+       case TRACE_USER_STACK: {
+               struct userstack_entry *field;
+
+               trace_assign_type(field, entry);
+
+               seq_print_userip_objs(field, s, sym_flags);
+               trace_seq_putc(s, '\n');
+               break;
+       }
        default:
                trace_seq_printf(s, "Unknown type %d\n", entry->type);
        }
@@ -1472,6 +1932,8 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
        if (entry->type == TRACE_CONT)
                return TRACE_TYPE_HANDLED;
 
+       test_cpu_buff_start(iter);
+
        comm = trace_find_cmdline(iter->ent->pid);
 
        t = ns2usecs(iter->ts);
@@ -1581,6 +2043,37 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
                        trace_seq_print_cont(s, iter);
                break;
        }
+       case TRACE_GRAPH_RET: {
+               return print_graph_function(iter);
+       }
+       case TRACE_GRAPH_ENT: {
+               return print_graph_function(iter);
+       }
+       case TRACE_BRANCH: {
+               struct trace_branch *field;
+
+               trace_assign_type(field, entry);
+
+               trace_seq_printf(s, "[%s] %s:%s:%d\n",
+                                field->correct ? "  ok  " : " MISS ",
+                                field->func,
+                                field->file,
+                                field->line);
+               break;
+       }
+       case TRACE_USER_STACK: {
+               struct userstack_entry *field;
+
+               trace_assign_type(field, entry);
+
+               ret = seq_print_userip_objs(field, s, sym_flags);
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+               ret = trace_seq_putc(s, '\n');
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+               break;
+       }
        }
        return TRACE_TYPE_HANDLED;
 }
@@ -1640,6 +2133,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
                break;
        }
        case TRACE_SPECIAL:
+       case TRACE_USER_STACK:
        case TRACE_STACK: {
                struct special_entry *field;
 
@@ -1728,6 +2222,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
                break;
        }
        case TRACE_SPECIAL:
+       case TRACE_USER_STACK:
        case TRACE_STACK: {
                struct special_entry *field;
 
@@ -1782,6 +2277,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
                break;
        }
        case TRACE_SPECIAL:
+       case TRACE_USER_STACK:
        case TRACE_STACK: {
                struct special_entry *field;
 
@@ -1847,7 +2343,9 @@ static int s_show(struct seq_file *m, void *v)
                        seq_printf(m, "# tracer: %s\n", iter->trace->name);
                        seq_puts(m, "#\n");
                }
-               if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
+               if (iter->trace && iter->trace->print_header)
+                       iter->trace->print_header(m);
+               else if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
                        /* print nothing if the buffers are empty */
                        if (trace_empty(iter))
                                return 0;
@@ -1899,6 +2397,15 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
        iter->trace = current_trace;
        iter->pos = -1;
 
+       /* Notify the tracer early; before we stop tracing. */
+       if (iter->trace && iter->trace->open)
+                       iter->trace->open(iter);
+
+       /* Annotate start of buffers if we had overruns */
+       if (ring_buffer_overruns(iter->tr->buffer))
+               iter->iter_flags |= TRACE_FILE_ANNOTATE;
+
+
        for_each_tracing_cpu(cpu) {
 
                iter->buffer_iter[cpu] =
@@ -1917,13 +2424,7 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
        m->private = iter;
 
        /* stop the trace while dumping */
-       if (iter->tr->ctrl) {
-               tracer_enabled = 0;
-               ftrace_function_enabled = 0;
-       }
-
-       if (iter->trace && iter->trace->open)
-                       iter->trace->open(iter);
+       tracing_stop();
 
        mutex_unlock(&trace_types_lock);
 
@@ -1966,14 +2467,7 @@ int tracing_release(struct inode *inode, struct file *file)
                iter->trace->close(iter);
 
        /* reenable tracing if it was previously enabled */
-       if (iter->tr->ctrl) {
-               tracer_enabled = 1;
-               /*
-                * It is safe to enable function tracing even if it
-                * isn't used
-                */
-               ftrace_function_enabled = 1;
-       }
+       tracing_start();
        mutex_unlock(&trace_types_lock);
 
        seq_release(inode, file);
@@ -2189,13 +2683,16 @@ static struct file_operations tracing_cpumask_fops = {
 };
 
 static ssize_t
-tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
+tracing_trace_options_read(struct file *filp, char __user *ubuf,
                       size_t cnt, loff_t *ppos)
 {
+       int i;
        char *buf;
        int r = 0;
        int len = 0;
-       int i;
+       u32 tracer_flags = current_trace->flags->val;
+       struct tracer_opt *trace_opts = current_trace->flags->opts;
+
 
        /* calulate max size */
        for (i = 0; trace_options[i]; i++) {
@@ -2203,6 +2700,15 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
                len += 3; /* "no" and space */
        }
 
+       /*
+        * Increase the size with names of options specific
+        * of the current tracer.
+        */
+       for (i = 0; trace_opts[i].name; i++) {
+               len += strlen(trace_opts[i].name);
+               len += 3; /* "no" and space */
+       }
+
        /* +2 for \n and \0 */
        buf = kmalloc(len + 2, GFP_KERNEL);
        if (!buf)
@@ -2215,6 +2721,15 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
                        r += sprintf(buf + r, "no%s ", trace_options[i]);
        }
 
+       for (i = 0; trace_opts[i].name; i++) {
+               if (tracer_flags & trace_opts[i].bit)
+                       r += sprintf(buf + r, "%s ",
+                               trace_opts[i].name);
+               else
+                       r += sprintf(buf + r, "no%s ",
+                               trace_opts[i].name);
+       }
+
        r += sprintf(buf + r, "\n");
        WARN_ON(r >= len + 2);
 
@@ -2225,13 +2740,48 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
        return r;
 }
 
+/* Try to assign a tracer specific option */
+static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
+{
+       struct tracer_flags *trace_flags = trace->flags;
+       struct tracer_opt *opts = NULL;
+       int ret = 0, i = 0;
+       int len;
+
+       for (i = 0; trace_flags->opts[i].name; i++) {
+               opts = &trace_flags->opts[i];
+               len = strlen(opts->name);
+
+               if (strncmp(cmp, opts->name, len) == 0) {
+                       ret = trace->set_flag(trace_flags->val,
+                               opts->bit, !neg);
+                       break;
+               }
+       }
+       /* Not found */
+       if (!trace_flags->opts[i].name)
+               return -EINVAL;
+
+       /* Refused to handle */
+       if (ret)
+               return ret;
+
+       if (neg)
+               trace_flags->val &= ~opts->bit;
+       else
+               trace_flags->val |= opts->bit;
+
+       return 0;
+}
+
 static ssize_t
-tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
+tracing_trace_options_write(struct file *filp, const char __user *ubuf,
                        size_t cnt, loff_t *ppos)
 {
        char buf[64];
        char *cmp = buf;
        int neg = 0;
+       int ret;
        int i;
 
        if (cnt >= sizeof(buf))
@@ -2258,11 +2808,13 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
                        break;
                }
        }
-       /*
-        * If no option could be set, return an error:
-        */
-       if (!trace_options[i])
-               return -EINVAL;
+
+       /* If no option could be set, test the specific tracer options */
+       if (!trace_options[i]) {
+               ret = set_tracer_option(current_trace, cmp, neg);
+               if (ret)
+                       return ret;
+       }
 
        filp->f_pos += cnt;
 
@@ -2271,8 +2823,8 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
 
 static struct file_operations tracing_iter_fops = {
        .open           = tracing_open_generic,
-       .read           = tracing_iter_ctrl_read,
-       .write          = tracing_iter_ctrl_write,
+       .read           = tracing_trace_options_read,
+       .write          = tracing_trace_options_write,
 };
 
 static const char readme_msg[] =
@@ -2286,9 +2838,9 @@ static const char readme_msg[] =
        "# echo sched_switch > /debug/tracing/current_tracer\n"
        "# cat /debug/tracing/current_tracer\n"
        "sched_switch\n"
-       "# cat /debug/tracing/iter_ctrl\n"
+       "# cat /debug/tracing/trace_options\n"
        "noprint-parent nosym-offset nosym-addr noverbose\n"
-       "# echo print-parent > /debug/tracing/iter_ctrl\n"
+       "# echo print-parent > /debug/tracing/trace_options\n"
        "# echo 1 > /debug/tracing/tracing_enabled\n"
        "# cat /debug/tracing/trace > /tmp/trace.txt\n"
        "echo 0 > /debug/tracing/tracing_enabled\n"
@@ -2311,11 +2863,10 @@ static ssize_t
 tracing_ctrl_read(struct file *filp, char __user *ubuf,
                  size_t cnt, loff_t *ppos)
 {
-       struct trace_array *tr = filp->private_data;
        char buf[64];
        int r;
 
-       r = sprintf(buf, "%ld\n", tr->ctrl);
+       r = sprintf(buf, "%u\n", tracer_enabled);
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 }
 
@@ -2343,16 +2894,18 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
        val = !!val;
 
        mutex_lock(&trace_types_lock);
-       if (tr->ctrl ^ val) {
-               if (val)
+       if (tracer_enabled ^ val) {
+               if (val) {
                        tracer_enabled = 1;
-               else
+                       if (current_trace->start)
+                               current_trace->start(tr);
+                       tracing_start();
+               } else {
                        tracer_enabled = 0;
-
-               tr->ctrl = val;
-
-               if (current_trace && current_trace->ctrl_update)
-                       current_trace->ctrl_update(tr);
+                       tracing_stop();
+                       if (current_trace->stop)
+                               current_trace->stop(tr);
+               }
        }
        mutex_unlock(&trace_types_lock);
 
@@ -2378,29 +2931,11 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 }
 
-static ssize_t
-tracing_set_trace_write(struct file *filp, const char __user *ubuf,
-                       size_t cnt, loff_t *ppos)
+static int tracing_set_tracer(char *buf)
 {
        struct trace_array *tr = &global_trace;
        struct tracer *t;
-       char buf[max_tracer_type_len+1];
-       int i;
-       size_t ret;
-
-       ret = cnt;
-
-       if (cnt > max_tracer_type_len)
-               cnt = max_tracer_type_len;
-
-       if (copy_from_user(&buf, ubuf, cnt))
-               return -EFAULT;
-
-       buf[cnt] = 0;
-
-       /* strip ending whitespace. */
-       for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
-               buf[i] = 0;
+       int ret = 0;
 
        mutex_lock(&trace_types_lock);
        for (t = trace_types; t; t = t->next) {
@@ -2414,18 +2949,52 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
        if (t == current_trace)
                goto out;
 
+       trace_branch_disable();
        if (current_trace && current_trace->reset)
                current_trace->reset(tr);
 
        current_trace = t;
-       if (t->init)
-               t->init(tr);
+       if (t->init) {
+               ret = t->init(tr);
+               if (ret)
+                       goto out;
+       }
 
+       trace_branch_enable(tr);
  out:
        mutex_unlock(&trace_types_lock);
 
-       if (ret > 0)
-               filp->f_pos += ret;
+       return ret;
+}
+
+static ssize_t
+tracing_set_trace_write(struct file *filp, const char __user *ubuf,
+                       size_t cnt, loff_t *ppos)
+{
+       char buf[max_tracer_type_len+1];
+       int i;
+       size_t ret;
+       int err;
+
+       ret = cnt;
+
+       if (cnt > max_tracer_type_len)
+               cnt = max_tracer_type_len;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+
+       /* strip ending whitespace. */
+       for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
+               buf[i] = 0;
+
+       err = tracing_set_tracer(buf);
+       if (err)
+               return err;
+
+       filp->f_pos += ret;
 
        return ret;
 }
@@ -2492,6 +3061,10 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
                return -ENOMEM;
 
        mutex_lock(&trace_types_lock);
+
+       /* trace pipe does not show start of buffer */
+       cpus_setall(iter->started);
+
        iter->tr = &global_trace;
        iter->trace = current_trace;
        filp->private_data = iter;
@@ -2667,7 +3240,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
        char buf[64];
        int r;
 
-       r = sprintf(buf, "%lu\n", tr->entries);
+       r = sprintf(buf, "%lu\n", tr->entries >> 10);
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 }
 
@@ -2678,7 +3251,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
        unsigned long val;
        char buf[64];
        int ret, cpu;
-       struct trace_array *tr = filp->private_data;
 
        if (cnt >= sizeof(buf))
                return -EINVAL;
@@ -2698,12 +3270,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
 
        mutex_lock(&trace_types_lock);
 
-       if (tr->ctrl) {
-               cnt = -EBUSY;
-               pr_info("ftrace: please disable tracing"
-                       " before modifying buffer size\n");
-               goto out;
-       }
+       tracing_stop();
 
        /* disable all cpu buffers */
        for_each_tracing_cpu(cpu) {
@@ -2713,6 +3280,9 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
                        atomic_inc(&max_tr.data[cpu]->disabled);
        }
 
+       /* value is in KB */
+       val <<= 10;
+
        if (val != global_trace.entries) {
                ret = ring_buffer_resize(global_trace.buffer, val);
                if (ret < 0) {
@@ -2751,6 +3321,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
                        atomic_dec(&max_tr.data[cpu]->disabled);
        }
 
+       tracing_start();
        max_tr.entries = global_trace.entries;
        mutex_unlock(&trace_types_lock);
 
@@ -2773,9 +3344,8 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
 {
        char *buf;
        char *end;
-       struct trace_array *tr = &global_trace;
 
-       if (!tr->ctrl || tracing_disabled)
+       if (tracing_disabled)
                return -EINVAL;
 
        if (cnt > TRACE_BUF_SIZE)
@@ -2841,22 +3411,38 @@ static struct file_operations tracing_mark_fops = {
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 
+int __weak ftrace_arch_read_dyn_info(char *buf, int size)
+{
+       return 0;
+}
+
 static ssize_t
-tracing_read_long(struct file *filp, char __user *ubuf,
+tracing_read_dyn_info(struct file *filp, char __user *ubuf,
                  size_t cnt, loff_t *ppos)
 {
+       static char ftrace_dyn_info_buffer[1024];
+       static DEFINE_MUTEX(dyn_info_mutex);
        unsigned long *p = filp->private_data;
-       char buf[64];
+       char *buf = ftrace_dyn_info_buffer;
+       int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
        int r;
 
-       r = sprintf(buf, "%ld\n", *p);
+       mutex_lock(&dyn_info_mutex);
+       r = sprintf(buf, "%ld ", *p);
 
-       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+       r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
+       buf[r++] = '\n';
+
+       r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+
+       mutex_unlock(&dyn_info_mutex);
+
+       return r;
 }
 
-static struct file_operations tracing_read_long_fops = {
+static struct file_operations tracing_dyn_info_fops = {
        .open           = tracing_open_generic,
-       .read           = tracing_read_long,
+       .read           = tracing_read_dyn_info,
 };
 #endif
 
@@ -2897,10 +3483,10 @@ static __init int tracer_init_debugfs(void)
        if (!entry)
                pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
 
-       entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
+       entry = debugfs_create_file("trace_options", 0644, d_tracer,
                                    NULL, &tracing_iter_fops);
        if (!entry)
-               pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
+               pr_warning("Could not create debugfs 'trace_options' entry\n");
 
        entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
                                    NULL, &tracing_cpumask_fops);
@@ -2950,11 +3536,11 @@ static __init int tracer_init_debugfs(void)
                pr_warning("Could not create debugfs "
                           "'trace_pipe' entry\n");
 
-       entry = debugfs_create_file("trace_entries", 0644, d_tracer,
+       entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer,
                                    &global_trace, &tracing_entries_fops);
        if (!entry)
                pr_warning("Could not create debugfs "
-                          "'trace_entries' entry\n");
+                          "'buffer_size_kb' entry\n");
 
        entry = debugfs_create_file("trace_marker", 0220, d_tracer,
                                    NULL, &tracing_mark_fops);
@@ -2965,7 +3551,7 @@ static __init int tracer_init_debugfs(void)
 #ifdef CONFIG_DYNAMIC_FTRACE
        entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
                                    &ftrace_update_tot_cnt,
-                                   &tracing_read_long_fops);
+                                   &tracing_dyn_info_fops);
        if (!entry)
                pr_warning("Could not create debugfs "
                           "'dyn_ftrace_total_info' entry\n");
@@ -2988,7 +3574,7 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
        unsigned long flags, irq_flags;
        int cpu, len = 0, size, pc;
 
-       if (!tr->ctrl || tracing_disabled)
+       if (tracing_disabled)
                return 0;
 
        pc = preempt_count();
@@ -3046,7 +3632,8 @@ EXPORT_SYMBOL_GPL(__ftrace_printk);
 static int trace_panic_handler(struct notifier_block *this,
                               unsigned long event, void *unused)
 {
-       ftrace_dump();
+       if (ftrace_dump_on_oops)
+               ftrace_dump();
        return NOTIFY_OK;
 }
 
@@ -3062,7 +3649,8 @@ static int trace_die_handler(struct notifier_block *self,
 {
        switch (val) {
        case DIE_OOPS:
-               ftrace_dump();
+               if (ftrace_dump_on_oops)
+                       ftrace_dump();
                break;
        default:
                break;
@@ -3103,7 +3691,6 @@ trace_printk_seq(struct trace_seq *s)
        trace_seq_reset(s);
 }
 
-
 void ftrace_dump(void)
 {
        static DEFINE_SPINLOCK(ftrace_dump_lock);
@@ -3128,6 +3715,9 @@ void ftrace_dump(void)
                atomic_inc(&global_trace.data[cpu]->disabled);
        }
 
+       /* don't look at user memory in panic mode */
+       trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
+
        printk(KERN_TRACE "Dumping ftrace buffer:\n");
 
        iter.tr = &global_trace;
@@ -3221,7 +3811,6 @@ __init static int tracer_alloc_buffers(void)
 #endif
 
        /* All seems OK, enable tracing */
-       global_trace.ctrl = tracer_enabled;
        tracing_disabled = 0;
 
        atomic_notifier_chain_register(&panic_notifier_list,
index 8465ad052707afe380e2fba0017c0f37fb1d5093..f96f4e787ff39fb8310ddc0efdc5845ccd43f30c 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/ring_buffer.h>
 #include <linux/mmiotrace.h>
 #include <linux/ftrace.h>
+#include <trace/boot.h>
 
 enum trace_type {
        __TRACE_FIRST_TYPE = 0,
@@ -21,7 +22,14 @@ enum trace_type {
        TRACE_SPECIAL,
        TRACE_MMIO_RW,
        TRACE_MMIO_MAP,
-       TRACE_BOOT,
+       TRACE_BRANCH,
+       TRACE_BOOT_CALL,
+       TRACE_BOOT_RET,
+       TRACE_GRAPH_RET,
+       TRACE_GRAPH_ENT,
+       TRACE_USER_STACK,
+       TRACE_BTS,
+       TRACE_POWER,
 
        __TRACE_LAST_TYPE
 };
@@ -38,6 +46,7 @@ struct trace_entry {
        unsigned char           flags;
        unsigned char           preempt_count;
        int                     pid;
+       int                     tgid;
 };
 
 /*
@@ -48,6 +57,18 @@ struct ftrace_entry {
        unsigned long           ip;
        unsigned long           parent_ip;
 };
+
+/* Function call entry */
+struct ftrace_graph_ent_entry {
+       struct trace_entry                      ent;
+       struct ftrace_graph_ent         graph_ent;
+};
+
+/* Function return entry */
+struct ftrace_graph_ret_entry {
+       struct trace_entry                      ent;
+       struct ftrace_graph_ret         ret;
+};
 extern struct tracer boot_tracer;
 
 /*
@@ -85,6 +106,11 @@ struct stack_entry {
        unsigned long           caller[FTRACE_STACK_ENTRIES];
 };
 
+struct userstack_entry {
+       struct trace_entry      ent;
+       unsigned long           caller[FTRACE_STACK_ENTRIES];
+};
+
 /*
  * ftrace_printk entry:
  */
@@ -112,9 +138,35 @@ struct trace_mmiotrace_map {
        struct mmiotrace_map    map;
 };
 
-struct trace_boot {
+struct trace_boot_call {
+       struct trace_entry      ent;
+       struct boot_trace_call boot_call;
+};
+
+struct trace_boot_ret {
+       struct trace_entry      ent;
+       struct boot_trace_ret boot_ret;
+};
+
+#define TRACE_FUNC_SIZE 30
+#define TRACE_FILE_SIZE 20
+struct trace_branch {
+       struct trace_entry      ent;
+       unsigned                line;
+       char                    func[TRACE_FUNC_SIZE+1];
+       char                    file[TRACE_FILE_SIZE+1];
+       char                    correct;
+};
+
+struct bts_entry {
+       struct trace_entry      ent;
+       unsigned long           from;
+       unsigned long           to;
+};
+
+struct trace_power {
        struct trace_entry      ent;
-       struct boot_trace       initcall;
+       struct power_trace      state_data;
 };
 
 /*
@@ -172,7 +224,6 @@ struct trace_iterator;
 struct trace_array {
        struct ring_buffer      *buffer;
        unsigned long           entries;
-       long                    ctrl;
        int                     cpu;
        cycle_t                 time_start;
        struct task_struct      *waiter;
@@ -212,13 +263,22 @@ extern void __ftrace_bad_type(void);
                IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);        \
                IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
                IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);   \
+               IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
                IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);   \
                IF_ASSIGN(var, ent, struct special_entry, 0);           \
                IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,          \
                          TRACE_MMIO_RW);                               \
                IF_ASSIGN(var, ent, struct trace_mmiotrace_map,         \
                          TRACE_MMIO_MAP);                              \
-               IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT);     \
+               IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
+               IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
+               IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
+               IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry,      \
+                         TRACE_GRAPH_ENT);             \
+               IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,      \
+                         TRACE_GRAPH_RET);             \
+               IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\
+               IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
                __ftrace_bad_type();                                    \
        } while (0)
 
@@ -229,29 +289,56 @@ enum print_line_t {
        TRACE_TYPE_UNHANDLED    = 2     /* Relay to other output functions */
 };
 
+
+/*
+ * An option specific to a tracer. This is a boolean value.
+ * The bit is the bit index that sets its value on the
+ * flags value in struct tracer_flags.
+ */
+struct tracer_opt {
+       const char      *name; /* Will appear on the trace_options file */
+       u32             bit; /* Mask assigned in val field in tracer_flags */
+};
+
+/*
+ * The set of specific options for a tracer. Your tracer
+ * have to set the initial value of the flags val.
+ */
+struct tracer_flags {
+       u32                     val;
+       struct tracer_opt       *opts;
+};
+
+/* Makes more easy to define a tracer opt */
+#define TRACER_OPT(s, b)       .name = #s, .bit = b
+
 /*
  * A specific tracer, represented by methods that operate on a trace array:
  */
 struct tracer {
        const char              *name;
-       void                    (*init)(struct trace_array *tr);
+       /* Your tracer should raise a warning if init fails */
+       int                     (*init)(struct trace_array *tr);
        void                    (*reset)(struct trace_array *tr);
+       void                    (*start)(struct trace_array *tr);
+       void                    (*stop)(struct trace_array *tr);
        void                    (*open)(struct trace_iterator *iter);
        void                    (*pipe_open)(struct trace_iterator *iter);
        void                    (*close)(struct trace_iterator *iter);
-       void                    (*start)(struct trace_iterator *iter);
-       void                    (*stop)(struct trace_iterator *iter);
        ssize_t                 (*read)(struct trace_iterator *iter,
                                        struct file *filp, char __user *ubuf,
                                        size_t cnt, loff_t *ppos);
-       void                    (*ctrl_update)(struct trace_array *tr);
 #ifdef CONFIG_FTRACE_STARTUP_TEST
        int                     (*selftest)(struct tracer *trace,
                                            struct trace_array *tr);
 #endif
+       void                    (*print_header)(struct seq_file *m);
        enum print_line_t       (*print_line)(struct trace_iterator *iter);
+       /* If you handled the flag setting, return 0 */
+       int                     (*set_flag)(u32 old_flags, u32 bit, int set);
        struct tracer           *next;
        int                     print_max;
+       struct tracer_flags     *flags;
 };
 
 struct trace_seq {
@@ -279,8 +366,11 @@ struct trace_iterator {
        unsigned long           iter_flags;
        loff_t                  pos;
        long                    idx;
+
+       cpumask_t               started;
 };
 
+int tracing_is_enabled(void);
 void trace_wake_up(void);
 void tracing_reset(struct trace_array *tr, int cpu);
 int tracing_open_generic(struct inode *inode, struct file *filp);
@@ -321,8 +411,17 @@ void trace_function(struct trace_array *tr,
                    unsigned long parent_ip,
                    unsigned long flags, int pc);
 
+void trace_graph_return(struct ftrace_graph_ret *trace);
+void trace_graph_entry(struct ftrace_graph_ent *trace);
+void trace_bts(struct trace_array *tr,
+              unsigned long from,
+              unsigned long to);
+
 void tracing_start_cmdline_record(void);
 void tracing_stop_cmdline_record(void);
+void tracing_sched_switch_assign_trace(struct trace_array *tr);
+void tracing_stop_sched_switch_record(void);
+void tracing_start_sched_switch_record(void);
 int register_tracer(struct tracer *type);
 void unregister_tracer(struct tracer *type);
 
@@ -358,6 +457,7 @@ struct tracer_switch_ops {
        struct tracer_switch_ops        *next;
 };
 
+char *trace_find_cmdline(int pid);
 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
 
 #ifdef CONFIG_DYNAMIC_FTRACE
@@ -383,12 +483,18 @@ extern int trace_selftest_startup_sched_switch(struct tracer *trace,
                                               struct trace_array *tr);
 extern int trace_selftest_startup_sysprof(struct tracer *trace,
                                               struct trace_array *tr);
+extern int trace_selftest_startup_branch(struct tracer *trace,
+                                        struct trace_array *tr);
 #endif /* CONFIG_FTRACE_STARTUP_TEST */
 
 extern void *head_page(struct trace_array_cpu *data);
 extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
 extern void trace_seq_print_cont(struct trace_seq *s,
                                 struct trace_iterator *iter);
+
+extern int
+seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
+               unsigned long sym_flags);
 extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
                                 size_t cnt);
 extern long ns2usecs(cycle_t nsec);
@@ -396,6 +502,17 @@ extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
 
 extern unsigned long trace_flags;
 
+/* Standard output formatting function used for function return traces */
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+extern enum print_line_t print_graph_function(struct trace_iterator *iter);
+#else
+static inline enum print_line_t
+print_graph_function(struct trace_iterator *iter)
+{
+       return TRACE_TYPE_UNHANDLED;
+}
+#endif
+
 /*
  * trace_iterator_flags is an enumeration that defines bit
  * positions into trace_flags that controls the output.
@@ -415,8 +532,92 @@ enum trace_iterator_flags {
        TRACE_ITER_STACKTRACE           = 0x100,
        TRACE_ITER_SCHED_TREE           = 0x200,
        TRACE_ITER_PRINTK               = 0x400,
+       TRACE_ITER_PREEMPTONLY          = 0x800,
+       TRACE_ITER_BRANCH               = 0x1000,
+       TRACE_ITER_ANNOTATE             = 0x2000,
+       TRACE_ITER_USERSTACKTRACE       = 0x4000,
+       TRACE_ITER_SYM_USEROBJ          = 0x8000
 };
 
+/*
+ * TRACE_ITER_SYM_MASK masks the options in trace_flags that
+ * control the output of kernel symbols.
+ */
+#define TRACE_ITER_SYM_MASK \
+       (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
+
 extern struct tracer nop_trace;
 
+/**
+ * ftrace_preempt_disable - disable preemption scheduler safe
+ *
+ * When tracing can happen inside the scheduler, there exists
+ * cases that the tracing might happen before the need_resched
+ * flag is checked. If this happens and the tracer calls
+ * preempt_enable (after a disable), a schedule might take place
+ * causing an infinite recursion.
+ *
+ * To prevent this, we read the need_recshed flag before
+ * disabling preemption. When we want to enable preemption we
+ * check the flag, if it is set, then we call preempt_enable_no_resched.
+ * Otherwise, we call preempt_enable.
+ *
+ * The rational for doing the above is that if need resched is set
+ * and we have yet to reschedule, we are either in an atomic location
+ * (where we do not need to check for scheduling) or we are inside
+ * the scheduler and do not want to resched.
+ */
+static inline int ftrace_preempt_disable(void)
+{
+       int resched;
+
+       resched = need_resched();
+       preempt_disable_notrace();
+
+       return resched;
+}
+
+/**
+ * ftrace_preempt_enable - enable preemption scheduler safe
+ * @resched: the return value from ftrace_preempt_disable
+ *
+ * This is a scheduler safe way to enable preemption and not miss
+ * any preemption checks. The disabled saved the state of preemption.
+ * If resched is set, then we were either inside an atomic or
+ * are inside the scheduler (we would have already scheduled
+ * otherwise). In this case, we do not want to call normal
+ * preempt_enable, but preempt_enable_no_resched instead.
+ */
+static inline void ftrace_preempt_enable(int resched)
+{
+       if (resched)
+               preempt_enable_no_resched_notrace();
+       else
+               preempt_enable_notrace();
+}
+
+#ifdef CONFIG_BRANCH_TRACER
+extern int enable_branch_tracing(struct trace_array *tr);
+extern void disable_branch_tracing(void);
+static inline int trace_branch_enable(struct trace_array *tr)
+{
+       if (trace_flags & TRACE_ITER_BRANCH)
+               return enable_branch_tracing(tr);
+       return 0;
+}
+static inline void trace_branch_disable(void)
+{
+       /* due to races, always disable */
+       disable_branch_tracing();
+}
+#else
+static inline int trace_branch_enable(struct trace_array *tr)
+{
+       return 0;
+}
+static inline void trace_branch_disable(void)
+{
+}
+#endif /* CONFIG_BRANCH_TRACER */
+
 #endif /* _LINUX_KERNEL_TRACE_H */
index d0a5e50eeff26d17405ffa4cb62cbde710f9de05..a4fa2c57e34e376e0f58920e816db6c159530bcb 100644 (file)
 #include "trace.h"
 
 static struct trace_array *boot_trace;
-static int trace_boot_enabled;
+static bool pre_initcalls_finished;
 
-
-/* Should be started after do_pre_smp_initcalls() in init/main.c */
+/* Tells the boot tracer that the pre_smp_initcalls are finished.
+ * So we are ready .
+ * It doesn't enable sched events tracing however.
+ * You have to call enable_boot_trace to do so.
+ */
 void start_boot_trace(void)
 {
-       trace_boot_enabled = 1;
+       pre_initcalls_finished = true;
 }
 
-void stop_boot_trace(void)
+void enable_boot_trace(void)
 {
-       trace_boot_enabled = 0;
+       if (pre_initcalls_finished)
+               tracing_start_sched_switch_record();
 }
 
-void reset_boot_trace(struct trace_array *tr)
+void disable_boot_trace(void)
 {
-       stop_boot_trace();
+       if (pre_initcalls_finished)
+               tracing_stop_sched_switch_record();
 }
 
-static void boot_trace_init(struct trace_array *tr)
+static void reset_boot_trace(struct trace_array *tr)
 {
        int cpu;
-       boot_trace = tr;
 
-       trace_boot_enabled = 0;
+       tr->time_start = ftrace_now(tr->cpu);
+
+       for_each_online_cpu(cpu)
+               tracing_reset(tr, cpu);
+}
+
+static int boot_trace_init(struct trace_array *tr)
+{
+       int cpu;
+       boot_trace = tr;
 
        for_each_cpu_mask(cpu, cpu_possible_map)
                tracing_reset(tr, cpu);
+
+       tracing_sched_switch_assign_trace(tr);
+       return 0;
 }
 
-static void boot_trace_ctrl_update(struct trace_array *tr)
+static enum print_line_t
+initcall_call_print_line(struct trace_iterator *iter)
 {
-       if (tr->ctrl)
-               start_boot_trace();
+       struct trace_entry *entry = iter->ent;
+       struct trace_seq *s = &iter->seq;
+       struct trace_boot_call *field;
+       struct boot_trace_call *call;
+       u64 ts;
+       unsigned long nsec_rem;
+       int ret;
+
+       trace_assign_type(field, entry);
+       call = &field->boot_call;
+       ts = iter->ts;
+       nsec_rem = do_div(ts, 1000000000);
+
+       ret = trace_seq_printf(s, "[%5ld.%09ld] calling  %s @ %i\n",
+                       (unsigned long)ts, nsec_rem, call->func, call->caller);
+
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
        else
-               stop_boot_trace();
+               return TRACE_TYPE_HANDLED;
 }
 
-static enum print_line_t initcall_print_line(struct trace_iterator *iter)
+static enum print_line_t
+initcall_ret_print_line(struct trace_iterator *iter)
 {
-       int ret;
        struct trace_entry *entry = iter->ent;
-       struct trace_boot *field = (struct trace_boot *)entry;
-       struct boot_trace *it = &field->initcall;
        struct trace_seq *s = &iter->seq;
-       struct timespec calltime = ktime_to_timespec(it->calltime);
-       struct timespec rettime = ktime_to_timespec(it->rettime);
-
-       if (entry->type == TRACE_BOOT) {
-               ret = trace_seq_printf(s, "[%5ld.%09ld] calling  %s @ %i\n",
-                                         calltime.tv_sec,
-                                         calltime.tv_nsec,
-                                         it->func, it->caller);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-
-               ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
-                                         "returned %d after %lld msecs\n",
-                                         rettime.tv_sec,
-                                         rettime.tv_nsec,
-                                         it->func, it->result, it->duration);
-
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
+       struct trace_boot_ret *field;
+       struct boot_trace_ret *init_ret;
+       u64 ts;
+       unsigned long nsec_rem;
+       int ret;
+
+       trace_assign_type(field, entry);
+       init_ret = &field->boot_ret;
+       ts = iter->ts;
+       nsec_rem = do_div(ts, 1000000000);
+
+       ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
+                       "returned %d after %llu msecs\n",
+                       (unsigned long) ts,
+                       nsec_rem,
+                       init_ret->func, init_ret->result, init_ret->duration);
+
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+       else
                return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t initcall_print_line(struct trace_iterator *iter)
+{
+       struct trace_entry *entry = iter->ent;
+
+       switch (entry->type) {
+       case TRACE_BOOT_CALL:
+               return initcall_call_print_line(iter);
+       case TRACE_BOOT_RET:
+               return initcall_ret_print_line(iter);
+       default:
+               return TRACE_TYPE_UNHANDLED;
        }
-       return TRACE_TYPE_UNHANDLED;
 }
 
 struct tracer boot_tracer __read_mostly =
@@ -87,27 +131,53 @@ struct tracer boot_tracer __read_mostly =
        .name           = "initcall",
        .init           = boot_trace_init,
        .reset          = reset_boot_trace,
-       .ctrl_update    = boot_trace_ctrl_update,
        .print_line     = initcall_print_line,
 };
 
-void trace_boot(struct boot_trace *it, initcall_t fn)
+void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
 {
        struct ring_buffer_event *event;
-       struct trace_boot *entry;
-       struct trace_array_cpu *data;
+       struct trace_boot_call *entry;
        unsigned long irq_flags;
        struct trace_array *tr = boot_trace;
 
-       if (!trace_boot_enabled)
+       if (!pre_initcalls_finished)
                return;
 
        /* Get its name now since this function could
         * disappear because it is in the .init section.
         */
-       sprint_symbol(it->func, (unsigned long)fn);
+       sprint_symbol(bt->func, (unsigned long)fn);
+       preempt_disable();
+
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
+                                        &irq_flags);
+       if (!event)
+               goto out;
+       entry   = ring_buffer_event_data(event);
+       tracing_generic_entry_update(&entry->ent, 0, 0);
+       entry->ent.type = TRACE_BOOT_CALL;
+       entry->boot_call = *bt;
+       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+
+       trace_wake_up();
+
+ out:
+       preempt_enable();
+}
+
+void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
+{
+       struct ring_buffer_event *event;
+       struct trace_boot_ret *entry;
+       unsigned long irq_flags;
+       struct trace_array *tr = boot_trace;
+
+       if (!pre_initcalls_finished)
+               return;
+
+       sprint_symbol(bt->func, (unsigned long)fn);
        preempt_disable();
-       data = tr->data[smp_processor_id()];
 
        event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
                                         &irq_flags);
@@ -115,8 +185,8 @@ void trace_boot(struct boot_trace *it, initcall_t fn)
                goto out;
        entry   = ring_buffer_event_data(event);
        tracing_generic_entry_update(&entry->ent, 0, 0);
-       entry->ent.type = TRACE_BOOT;
-       entry->initcall = *it;
+       entry->ent.type = TRACE_BOOT_RET;
+       entry->boot_ret = *bt;
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
 
        trace_wake_up();
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
new file mode 100644 (file)
index 0000000..bc97275
--- /dev/null
@@ -0,0 +1,342 @@
+/*
+ * unlikely profiler
+ *
+ * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
+ */
+#include <linux/kallsyms.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/irqflags.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/ftrace.h>
+#include <linux/hash.h>
+#include <linux/fs.h>
+#include <asm/local.h>
+#include "trace.h"
+
+#ifdef CONFIG_BRANCH_TRACER
+
+static int branch_tracing_enabled __read_mostly;
+static DEFINE_MUTEX(branch_tracing_mutex);
+static struct trace_array *branch_tracer;
+
+static void
+probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
+{
+       struct trace_array *tr = branch_tracer;
+       struct ring_buffer_event *event;
+       struct trace_branch *entry;
+       unsigned long flags, irq_flags;
+       int cpu, pc;
+       const char *p;
+
+       /*
+        * I would love to save just the ftrace_likely_data pointer, but
+        * this code can also be used by modules. Ugly things can happen
+        * if the module is unloaded, and then we go and read the
+        * pointer.  This is slower, but much safer.
+        */
+
+       if (unlikely(!tr))
+               return;
+
+       raw_local_irq_save(flags);
+       cpu = raw_smp_processor_id();
+       if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
+               goto out;
+
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
+                                        &irq_flags);
+       if (!event)
+               goto out;
+
+       pc = preempt_count();
+       entry   = ring_buffer_event_data(event);
+       tracing_generic_entry_update(&entry->ent, flags, pc);
+       entry->ent.type         = TRACE_BRANCH;
+
+       /* Strip off the path, only save the file */
+       p = f->file + strlen(f->file);
+       while (p >= f->file && *p != '/')
+               p--;
+       p++;
+
+       strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
+       strncpy(entry->file, p, TRACE_FILE_SIZE);
+       entry->func[TRACE_FUNC_SIZE] = 0;
+       entry->file[TRACE_FILE_SIZE] = 0;
+       entry->line = f->line;
+       entry->correct = val == expect;
+
+       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+
+ out:
+       atomic_dec(&tr->data[cpu]->disabled);
+       raw_local_irq_restore(flags);
+}
+
+static inline
+void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
+{
+       if (!branch_tracing_enabled)
+               return;
+
+       probe_likely_condition(f, val, expect);
+}
+
+int enable_branch_tracing(struct trace_array *tr)
+{
+       int ret = 0;
+
+       mutex_lock(&branch_tracing_mutex);
+       branch_tracer = tr;
+       /*
+        * Must be seen before enabling. The reader is a condition
+        * where we do not need a matching rmb()
+        */
+       smp_wmb();
+       branch_tracing_enabled++;
+       mutex_unlock(&branch_tracing_mutex);
+
+       return ret;
+}
+
+void disable_branch_tracing(void)
+{
+       mutex_lock(&branch_tracing_mutex);
+
+       if (!branch_tracing_enabled)
+               goto out_unlock;
+
+       branch_tracing_enabled--;
+
+ out_unlock:
+       mutex_unlock(&branch_tracing_mutex);
+}
+
+static void start_branch_trace(struct trace_array *tr)
+{
+       enable_branch_tracing(tr);
+}
+
+static void stop_branch_trace(struct trace_array *tr)
+{
+       disable_branch_tracing();
+}
+
+static int branch_trace_init(struct trace_array *tr)
+{
+       int cpu;
+
+       for_each_online_cpu(cpu)
+               tracing_reset(tr, cpu);
+
+       start_branch_trace(tr);
+       return 0;
+}
+
+static void branch_trace_reset(struct trace_array *tr)
+{
+       stop_branch_trace(tr);
+}
+
+struct tracer branch_trace __read_mostly =
+{
+       .name           = "branch",
+       .init           = branch_trace_init,
+       .reset          = branch_trace_reset,
+#ifdef CONFIG_FTRACE_SELFTEST
+       .selftest       = trace_selftest_startup_branch,
+#endif
+};
+
+__init static int init_branch_trace(void)
+{
+       return register_tracer(&branch_trace);
+}
+
+device_initcall(init_branch_trace);
+#else
+static inline
+void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
+{
+}
+#endif /* CONFIG_BRANCH_TRACER */
+
+void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
+{
+       /*
+        * I would love to have a trace point here instead, but the
+        * trace point code is so inundated with unlikely and likely
+        * conditions that the recursive nightmare that exists is too
+        * much to try to get working. At least for now.
+        */
+       trace_likely_condition(f, val, expect);
+
+       /* FIXME: Make this atomic! */
+       if (val == expect)
+               f->correct++;
+       else
+               f->incorrect++;
+}
+EXPORT_SYMBOL(ftrace_likely_update);
+
+struct ftrace_pointer {
+       void            *start;
+       void            *stop;
+       int             hit;
+};
+
+static void *
+t_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       const struct ftrace_pointer *f = m->private;
+       struct ftrace_branch_data *p = v;
+
+       (*pos)++;
+
+       if (v == (void *)1)
+               return f->start;
+
+       ++p;
+
+       if ((void *)p >= (void *)f->stop)
+               return NULL;
+
+       return p;
+}
+
+static void *t_start(struct seq_file *m, loff_t *pos)
+{
+       void *t = (void *)1;
+       loff_t l = 0;
+
+       for (; t && l < *pos; t = t_next(m, t, &l))
+               ;
+
+       return t;
+}
+
+static void t_stop(struct seq_file *m, void *p)
+{
+}
+
+static int t_show(struct seq_file *m, void *v)
+{
+       const struct ftrace_pointer *fp = m->private;
+       struct ftrace_branch_data *p = v;
+       const char *f;
+       long percent;
+
+       if (v == (void *)1) {
+               if (fp->hit)
+                       seq_printf(m, "   miss      hit    %% ");
+               else
+                       seq_printf(m, " correct incorrect  %% ");
+               seq_printf(m, "       Function                "
+                             "  File              Line\n"
+                             " ------- ---------  - "
+                             "       --------                "
+                             "  ----              ----\n");
+               return 0;
+       }
+
+       /* Only print the file, not the path */
+       f = p->file + strlen(p->file);
+       while (f >= p->file && *f != '/')
+               f--;
+       f++;
+
+       /*
+        * The miss is overlayed on correct, and hit on incorrect.
+        */
+       if (p->correct) {
+               percent = p->incorrect * 100;
+               percent /= p->correct + p->incorrect;
+       } else
+               percent = p->incorrect ? 100 : -1;
+
+       seq_printf(m, "%8lu %8lu ",  p->correct, p->incorrect);
+       if (percent < 0)
+               seq_printf(m, "  X ");
+       else
+               seq_printf(m, "%3ld ", percent);
+       seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
+       return 0;
+}
+
+static struct seq_operations tracing_likely_seq_ops = {
+       .start          = t_start,
+       .next           = t_next,
+       .stop           = t_stop,
+       .show           = t_show,
+};
+
+static int tracing_branch_open(struct inode *inode, struct file *file)
+{
+       int ret;
+
+       ret = seq_open(file, &tracing_likely_seq_ops);
+       if (!ret) {
+               struct seq_file *m = file->private_data;
+               m->private = (void *)inode->i_private;
+       }
+
+       return ret;
+}
+
+static const struct file_operations tracing_branch_fops = {
+       .open           = tracing_branch_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+};
+
+#ifdef CONFIG_PROFILE_ALL_BRANCHES
+extern unsigned long __start_branch_profile[];
+extern unsigned long __stop_branch_profile[];
+
+static const struct ftrace_pointer ftrace_branch_pos = {
+       .start                  = __start_branch_profile,
+       .stop                   = __stop_branch_profile,
+       .hit                    = 1,
+};
+
+#endif /* CONFIG_PROFILE_ALL_BRANCHES */
+
+extern unsigned long __start_annotated_branch_profile[];
+extern unsigned long __stop_annotated_branch_profile[];
+
+static const struct ftrace_pointer ftrace_annotated_branch_pos = {
+       .start                  = __start_annotated_branch_profile,
+       .stop                   = __stop_annotated_branch_profile,
+};
+
+static __init int ftrace_branch_init(void)
+{
+       struct dentry *d_tracer;
+       struct dentry *entry;
+
+       d_tracer = tracing_init_dentry();
+
+       entry = debugfs_create_file("profile_annotated_branch", 0444, d_tracer,
+                                   (void *)&ftrace_annotated_branch_pos,
+                                   &tracing_branch_fops);
+       if (!entry)
+               pr_warning("Could not create debugfs "
+                          "'profile_annotatet_branch' entry\n");
+
+#ifdef CONFIG_PROFILE_ALL_BRANCHES
+       entry = debugfs_create_file("profile_branch", 0444, d_tracer,
+                                   (void *)&ftrace_branch_pos,
+                                   &tracing_branch_fops);
+       if (!entry)
+               pr_warning("Could not create debugfs"
+                          " 'profile_branch' entry\n");
+#endif
+
+       return 0;
+}
+
+device_initcall(ftrace_branch_init);
diff --git a/kernel/trace/trace_bts.c b/kernel/trace/trace_bts.c
new file mode 100644 (file)
index 0000000..23b76e4
--- /dev/null
@@ -0,0 +1,276 @@
+/*
+ * BTS tracer
+ *
+ * Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/ftrace.h>
+#include <linux/kallsyms.h>
+
+#include <asm/ds.h>
+
+#include "trace.h"
+
+
+#define SIZEOF_BTS (1 << 13)
+
+static DEFINE_PER_CPU(struct bts_tracer *, tracer);
+static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
+
+#define this_tracer per_cpu(tracer, smp_processor_id())
+#define this_buffer per_cpu(buffer, smp_processor_id())
+
+
+/*
+ * Information to interpret a BTS record.
+ * This will go into an in-kernel BTS interface.
+ */
+static unsigned char sizeof_field;
+static unsigned long debugctl_mask;
+
+#define sizeof_bts (3 * sizeof_field)
+
+static void bts_trace_cpuinit(struct cpuinfo_x86 *c)
+{
+       switch (c->x86) {
+       case 0x6:
+               switch (c->x86_model) {
+               case 0x0 ... 0xC:
+                       break;
+               case 0xD:
+               case 0xE: /* Pentium M */
+                       sizeof_field = sizeof(long);
+                       debugctl_mask = (1<<6)|(1<<7);
+                       break;
+               default:
+                       sizeof_field = 8;
+                       debugctl_mask = (1<<6)|(1<<7);
+                       break;
+               }
+               break;
+       case 0xF:
+               switch (c->x86_model) {
+               case 0x0:
+               case 0x1:
+               case 0x2: /* Netburst */
+                       sizeof_field = sizeof(long);
+                       debugctl_mask = (1<<2)|(1<<3);
+                       break;
+               default:
+                       /* sorry, don't know about them */
+                       break;
+               }
+               break;
+       default:
+               /* sorry, don't know about them */
+               break;
+       }
+}
+
+static inline void bts_enable(void)
+{
+       unsigned long debugctl;
+
+       rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+       wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl | debugctl_mask);
+}
+
+static inline void bts_disable(void)
+{
+       unsigned long debugctl;
+
+       rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+       wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl & ~debugctl_mask);
+}
+
+static void bts_trace_reset(struct trace_array *tr)
+{
+       int cpu;
+
+       tr->time_start = ftrace_now(tr->cpu);
+
+       for_each_online_cpu(cpu)
+               tracing_reset(tr, cpu);
+}
+
+static void bts_trace_start_cpu(void *arg)
+{
+       this_tracer =
+               ds_request_bts(/* task = */ NULL, this_buffer, SIZEOF_BTS,
+                              /* ovfl = */ NULL, /* th = */ (size_t)-1);
+       if (IS_ERR(this_tracer)) {
+               this_tracer = NULL;
+               return;
+       }
+
+       bts_enable();
+}
+
+static void bts_trace_start(struct trace_array *tr)
+{
+       int cpu;
+
+       bts_trace_reset(tr);
+
+       for_each_cpu_mask(cpu, cpu_possible_map)
+               smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
+}
+
+static void bts_trace_stop_cpu(void *arg)
+{
+       if (this_tracer) {
+               bts_disable();
+
+               ds_release_bts(this_tracer);
+               this_tracer = NULL;
+       }
+}
+
+static void bts_trace_stop(struct trace_array *tr)
+{
+       int cpu;
+
+       for_each_cpu_mask(cpu, cpu_possible_map)
+               smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
+}
+
+static int bts_trace_init(struct trace_array *tr)
+{
+       bts_trace_cpuinit(&boot_cpu_data);
+       bts_trace_reset(tr);
+       bts_trace_start(tr);
+
+       return 0;
+}
+
+static void bts_trace_print_header(struct seq_file *m)
+{
+#ifdef __i386__
+       seq_puts(m, "# CPU#    FROM           TO     FUNCTION\n");
+       seq_puts(m, "#  |       |             |         |\n");
+#else
+       seq_puts(m,
+                "# CPU#        FROM                   TO         FUNCTION\n");
+       seq_puts(m,
+                "#  |           |                     |             |\n");
+#endif
+}
+
+static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
+{
+       struct trace_entry *entry = iter->ent;
+       struct trace_seq *seq = &iter->seq;
+       struct bts_entry *it;
+
+       trace_assign_type(it, entry);
+
+       if (entry->type == TRACE_BTS) {
+               int ret;
+#ifdef CONFIG_KALLSYMS
+               char function[KSYM_SYMBOL_LEN];
+               sprint_symbol(function, it->from);
+#else
+               char *function = "<unknown>";
+#endif
+
+               ret = trace_seq_printf(seq, "%4d  0x%lx -> 0x%lx [%s]\n",
+                                      entry->cpu, it->from, it->to, function);
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;;
+               return TRACE_TYPE_HANDLED;
+       }
+       return TRACE_TYPE_UNHANDLED;
+}
+
+void trace_bts(struct trace_array *tr, unsigned long from, unsigned long to)
+{
+       struct ring_buffer_event *event;
+       struct bts_entry *entry;
+       unsigned long irq;
+
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq);
+       if (!event)
+               return;
+       entry   = ring_buffer_event_data(event);
+       tracing_generic_entry_update(&entry->ent, 0, from);
+       entry->ent.type = TRACE_BTS;
+       entry->ent.cpu = smp_processor_id();
+       entry->from = from;
+       entry->to   = to;
+       ring_buffer_unlock_commit(tr->buffer, event, irq);
+}
+
+static void trace_bts_at(struct trace_array *tr, size_t index)
+{
+       const void *raw = NULL;
+       unsigned long from, to;
+       int err;
+
+       err = ds_access_bts(this_tracer, index, &raw);
+       if (err < 0)
+               return;
+
+       from = *(const unsigned long *)raw;
+       to = *(const unsigned long *)((const char *)raw + sizeof_field);
+
+       trace_bts(tr, from, to);
+}
+
+static void trace_bts_cpu(void *arg)
+{
+       struct trace_array *tr = (struct trace_array *) arg;
+       size_t index = 0, end = 0, i;
+       int err;
+
+       if (!this_tracer)
+               return;
+
+       bts_disable();
+
+       err = ds_get_bts_index(this_tracer, &index);
+       if (err < 0)
+               goto out;
+
+       err = ds_get_bts_end(this_tracer, &end);
+       if (err < 0)
+               goto out;
+
+       for (i = index; i < end; i++)
+               trace_bts_at(tr, i);
+
+       for (i = 0; i < index; i++)
+               trace_bts_at(tr, i);
+
+out:
+       bts_enable();
+}
+
+static void trace_bts_prepare(struct trace_iterator *iter)
+{
+       int cpu;
+
+       for_each_cpu_mask(cpu, cpu_possible_map)
+               smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
+}
+
+struct tracer bts_tracer __read_mostly =
+{
+       .name           = "bts",
+       .init           = bts_trace_init,
+       .reset          = bts_trace_stop,
+       .print_header   = bts_trace_print_header,
+       .print_line     = bts_trace_print_line,
+       .start          = bts_trace_start,
+       .stop           = bts_trace_stop,
+       .open           = trace_bts_prepare
+};
+
+__init static int init_bts_trace(void)
+{
+       return register_tracer(&bts_tracer);
+}
+device_initcall(init_bts_trace);
index 0f85a64003d3980d1002d1877712808703f25cb9..e74f6d0a321663b3610fb0e37d250c4fbf9696dc 100644 (file)
@@ -42,24 +42,20 @@ static void stop_function_trace(struct trace_array *tr)
        tracing_stop_cmdline_record();
 }
 
-static void function_trace_init(struct trace_array *tr)
+static int function_trace_init(struct trace_array *tr)
 {
-       if (tr->ctrl)
-               start_function_trace(tr);
+       start_function_trace(tr);
+       return 0;
 }
 
 static void function_trace_reset(struct trace_array *tr)
 {
-       if (tr->ctrl)
-               stop_function_trace(tr);
+       stop_function_trace(tr);
 }
 
-static void function_trace_ctrl_update(struct trace_array *tr)
+static void function_trace_start(struct trace_array *tr)
 {
-       if (tr->ctrl)
-               start_function_trace(tr);
-       else
-               stop_function_trace(tr);
+       function_reset(tr);
 }
 
 static struct tracer function_trace __read_mostly =
@@ -67,7 +63,7 @@ static struct tracer function_trace __read_mostly =
        .name        = "function",
        .init        = function_trace_init,
        .reset       = function_trace_reset,
-       .ctrl_update = function_trace_ctrl_update,
+       .start       = function_trace_start,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_function,
 #endif
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
new file mode 100644 (file)
index 0000000..894b50b
--- /dev/null
@@ -0,0 +1,400 @@
+/*
+ *
+ * Function graph tracer.
+ * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
+ * Mostly borrowed from function tracer which
+ * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/ftrace.h>
+#include <linux/fs.h>
+
+#include "trace.h"
+
+#define TRACE_GRAPH_INDENT     2
+
+/* Flag options */
+#define TRACE_GRAPH_PRINT_OVERRUN      0x1
+#define TRACE_GRAPH_PRINT_CPU          0x2
+#define TRACE_GRAPH_PRINT_OVERHEAD     0x4
+
+static struct tracer_opt trace_opts[] = {
+       /* Display overruns ? */
+       { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
+       /* Display CPU ? */
+       { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
+       /* Display Overhead ? */
+       { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
+       { } /* Empty entry */
+};
+
+static struct tracer_flags tracer_flags = {
+       /* Don't display overruns by default */
+       .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD,
+       .opts = trace_opts
+};
+
+/* pid on the last trace processed */
+static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
+
+static int graph_trace_init(struct trace_array *tr)
+{
+       int cpu, ret;
+
+       for_each_online_cpu(cpu)
+               tracing_reset(tr, cpu);
+
+       ret = register_ftrace_graph(&trace_graph_return,
+                                       &trace_graph_entry);
+       if (ret)
+               return ret;
+       tracing_start_cmdline_record();
+
+       return 0;
+}
+
+static void graph_trace_reset(struct trace_array *tr)
+{
+       tracing_stop_cmdline_record();
+       unregister_ftrace_graph();
+}
+
+static inline int log10_cpu(int nb)
+{
+       if (nb / 100)
+               return 3;
+       if (nb / 10)
+               return 2;
+       return 1;
+}
+
+static enum print_line_t
+print_graph_cpu(struct trace_seq *s, int cpu)
+{
+       int i;
+       int ret;
+       int log10_this = log10_cpu(cpu);
+       int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map));
+
+
+       /*
+        * Start with a space character - to make it stand out
+        * to the right a bit when trace output is pasted into
+        * email:
+        */
+       ret = trace_seq_printf(s, " ");
+
+       /*
+        * Tricky - we space the CPU field according to the max
+        * number of online CPUs. On a 2-cpu system it would take
+        * a maximum of 1 digit - on a 128 cpu system it would
+        * take up to 3 digits:
+        */
+       for (i = 0; i < log10_all - log10_this; i++) {
+               ret = trace_seq_printf(s, " ");
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
+       ret = trace_seq_printf(s, "%d) ", cpu);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+
+/* If the pid changed since the last trace, output this event */
+static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
+{
+       char *comm, *prev_comm;
+       pid_t prev_pid;
+       int ret;
+
+       if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
+               return 1;
+
+       prev_pid = last_pid[cpu];
+       last_pid[cpu] = pid;
+
+       comm = trace_find_cmdline(pid);
+       prev_comm = trace_find_cmdline(prev_pid);
+
+/*
+ * Context-switch trace line:
+
+ ------------------------------------------
+ | 1)  migration/0--1  =>  sshd-1755
+ ------------------------------------------
+
+ */
+       ret = trace_seq_printf(s,
+               " ------------------------------------------\n");
+       ret += trace_seq_printf(s, " | %d)  %s-%d  =>  %s-%d\n",
+                                 cpu, prev_comm, prev_pid, comm, pid);
+       ret += trace_seq_printf(s,
+               " ------------------------------------------\n\n");
+       return ret;
+}
+
+static bool
+trace_branch_is_leaf(struct trace_iterator *iter,
+               struct ftrace_graph_ent_entry *curr)
+{
+       struct ring_buffer_iter *ring_iter;
+       struct ring_buffer_event *event;
+       struct ftrace_graph_ret_entry *next;
+
+       ring_iter = iter->buffer_iter[iter->cpu];
+
+       if (!ring_iter)
+               return false;
+
+       event = ring_buffer_iter_peek(ring_iter, NULL);
+
+       if (!event)
+               return false;
+
+       next = ring_buffer_event_data(event);
+
+       if (next->ent.type != TRACE_GRAPH_RET)
+               return false;
+
+       if (curr->ent.pid != next->ent.pid ||
+                       curr->graph_ent.func != next->ret.func)
+               return false;
+
+       return true;
+}
+
+
+static inline int
+print_graph_duration(unsigned long long duration, struct trace_seq *s)
+{
+       unsigned long nsecs_rem = do_div(duration, 1000);
+       return trace_seq_printf(s, "%4llu.%3lu us |  ", duration, nsecs_rem);
+}
+
+/* Signal a overhead of time execution to the output */
+static int
+print_graph_overhead(unsigned long long duration, struct trace_seq *s)
+{
+       /* Duration exceeded 100 msecs */
+       if (duration > 100000ULL)
+               return trace_seq_printf(s, "! ");
+
+       /* Duration exceeded 10 msecs */
+       if (duration > 10000ULL)
+               return trace_seq_printf(s, "+ ");
+
+       return trace_seq_printf(s, "  ");
+}
+
+/* Case of a leaf function on its call entry */
+static enum print_line_t
+print_graph_entry_leaf(struct trace_iterator *iter,
+               struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
+{
+       struct ftrace_graph_ret_entry *ret_entry;
+       struct ftrace_graph_ret *graph_ret;
+       struct ring_buffer_event *event;
+       struct ftrace_graph_ent *call;
+       unsigned long long duration;
+       int ret;
+       int i;
+
+       event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
+       ret_entry = ring_buffer_event_data(event);
+       graph_ret = &ret_entry->ret;
+       call = &entry->graph_ent;
+       duration = graph_ret->rettime - graph_ret->calltime;
+
+       /* Must not exceed 8 characters: 9999.999 us */
+       if (duration > 10000000ULL)
+               duration = 9999999ULL;
+
+       /* Overhead */
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
+               ret = print_graph_overhead(duration, s);
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
+
+       /* Duration */
+       ret = print_graph_duration(duration, s);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Function */
+       for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
+               ret = trace_seq_printf(s, " ");
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
+
+       ret = seq_print_ip_sym(s, call->func, 0);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       ret = trace_seq_printf(s, "();\n");
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t
+print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
+                       struct trace_seq *s)
+{
+       int i;
+       int ret;
+       struct ftrace_graph_ent *call = &entry->graph_ent;
+
+       /* No overhead */
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
+               ret = trace_seq_printf(s, "  ");
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
+
+       /* No time */
+       ret = trace_seq_printf(s, "            |  ");
+
+       /* Function */
+       for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
+               ret = trace_seq_printf(s, " ");
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
+
+       ret = seq_print_ip_sym(s, call->func, 0);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       ret = trace_seq_printf(s, "() {\n");
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t
+print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
+                       struct trace_iterator *iter, int cpu)
+{
+       int ret;
+       struct trace_entry *ent = iter->ent;
+
+       /* Pid */
+       if (!verif_pid(s, ent->pid, cpu))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Cpu */
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
+               ret = print_graph_cpu(s, cpu);
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
+
+       if (trace_branch_is_leaf(iter, field))
+               return print_graph_entry_leaf(iter, field, s);
+       else
+               return print_graph_entry_nested(field, s);
+
+}
+
+static enum print_line_t
+print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
+                  struct trace_entry *ent, int cpu)
+{
+       int i;
+       int ret;
+       unsigned long long duration = trace->rettime - trace->calltime;
+
+       /* Must not exceed 8 characters: xxxx.yyy us */
+       if (duration > 10000000ULL)
+               duration = 9999999ULL;
+
+       /* Pid */
+       if (!verif_pid(s, ent->pid, cpu))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Cpu */
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
+               ret = print_graph_cpu(s, cpu);
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
+
+       /* Overhead */
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
+               ret = print_graph_overhead(duration, s);
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
+
+       /* Duration */
+       ret = print_graph_duration(duration, s);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Closing brace */
+       for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
+               ret = trace_seq_printf(s, " ");
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
+
+       ret = trace_seq_printf(s, "}\n");
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Overrun */
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
+               ret = trace_seq_printf(s, " (Overruns: %lu)\n",
+                                       trace->overrun);
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
+       return TRACE_TYPE_HANDLED;
+}
+
+enum print_line_t
+print_graph_function(struct trace_iterator *iter)
+{
+       struct trace_seq *s = &iter->seq;
+       struct trace_entry *entry = iter->ent;
+
+       switch (entry->type) {
+       case TRACE_GRAPH_ENT: {
+               struct ftrace_graph_ent_entry *field;
+               trace_assign_type(field, entry);
+               return print_graph_entry(field, s, iter,
+                                        iter->cpu);
+       }
+       case TRACE_GRAPH_RET: {
+               struct ftrace_graph_ret_entry *field;
+               trace_assign_type(field, entry);
+               return print_graph_return(&field->ret, s, entry, iter->cpu);
+       }
+       default:
+               return TRACE_TYPE_UNHANDLED;
+       }
+}
+
+static struct tracer graph_trace __read_mostly = {
+       .name        = "function_graph",
+       .init        = graph_trace_init,
+       .reset       = graph_trace_reset,
+       .print_line = print_graph_function,
+       .flags          = &tracer_flags,
+};
+
+static __init int init_graph_trace(void)
+{
+       return register_tracer(&graph_trace);
+}
+
+device_initcall(init_graph_trace);
index 9c74071c10e0b5c8534039c50bd3845f04a036fe..7c2e326bbc8b15d942532d5951a38978fb66e194 100644 (file)
@@ -353,15 +353,28 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
 }
 #endif /* CONFIG_PREEMPT_TRACER */
 
+/*
+ * save_tracer_enabled is used to save the state of the tracer_enabled
+ * variable when we disable it when we open a trace output file.
+ */
+static int save_tracer_enabled;
+
 static void start_irqsoff_tracer(struct trace_array *tr)
 {
        register_ftrace_function(&trace_ops);
-       tracer_enabled = 1;
+       if (tracing_is_enabled()) {
+               tracer_enabled = 1;
+               save_tracer_enabled = 1;
+       } else {
+               tracer_enabled = 0;
+               save_tracer_enabled = 0;
+       }
 }
 
 static void stop_irqsoff_tracer(struct trace_array *tr)
 {
        tracer_enabled = 0;
+       save_tracer_enabled = 0;
        unregister_ftrace_function(&trace_ops);
 }
 
@@ -370,53 +383,55 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
        irqsoff_trace = tr;
        /* make sure that the tracer is visible */
        smp_wmb();
-
-       if (tr->ctrl)
-               start_irqsoff_tracer(tr);
+       start_irqsoff_tracer(tr);
 }
 
 static void irqsoff_tracer_reset(struct trace_array *tr)
 {
-       if (tr->ctrl)
-               stop_irqsoff_tracer(tr);
+       stop_irqsoff_tracer(tr);
 }
 
-static void irqsoff_tracer_ctrl_update(struct trace_array *tr)
+static void irqsoff_tracer_start(struct trace_array *tr)
 {
-       if (tr->ctrl)
-               start_irqsoff_tracer(tr);
-       else
-               stop_irqsoff_tracer(tr);
+       tracer_enabled = 1;
+       save_tracer_enabled = 1;
+}
+
+static void irqsoff_tracer_stop(struct trace_array *tr)
+{
+       tracer_enabled = 0;
+       save_tracer_enabled = 0;
 }
 
 static void irqsoff_tracer_open(struct trace_iterator *iter)
 {
        /* stop the trace while dumping */
-       if (iter->tr->ctrl)
-               stop_irqsoff_tracer(iter->tr);
+       tracer_enabled = 0;
 }
 
 static void irqsoff_tracer_close(struct trace_iterator *iter)
 {
-       if (iter->tr->ctrl)
-               start_irqsoff_tracer(iter->tr);
+       /* restart tracing */
+       tracer_enabled = save_tracer_enabled;
 }
 
 #ifdef CONFIG_IRQSOFF_TRACER
-static void irqsoff_tracer_init(struct trace_array *tr)
+static int irqsoff_tracer_init(struct trace_array *tr)
 {
        trace_type = TRACER_IRQS_OFF;
 
        __irqsoff_tracer_init(tr);
+       return 0;
 }
 static struct tracer irqsoff_tracer __read_mostly =
 {
        .name           = "irqsoff",
        .init           = irqsoff_tracer_init,
        .reset          = irqsoff_tracer_reset,
+       .start          = irqsoff_tracer_start,
+       .stop           = irqsoff_tracer_stop,
        .open           = irqsoff_tracer_open,
        .close          = irqsoff_tracer_close,
-       .ctrl_update    = irqsoff_tracer_ctrl_update,
        .print_max      = 1,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_irqsoff,
@@ -428,11 +443,12 @@ static struct tracer irqsoff_tracer __read_mostly =
 #endif
 
 #ifdef CONFIG_PREEMPT_TRACER
-static void preemptoff_tracer_init(struct trace_array *tr)
+static int preemptoff_tracer_init(struct trace_array *tr)
 {
        trace_type = TRACER_PREEMPT_OFF;
 
        __irqsoff_tracer_init(tr);
+       return 0;
 }
 
 static struct tracer preemptoff_tracer __read_mostly =
@@ -440,9 +456,10 @@ static struct tracer preemptoff_tracer __read_mostly =
        .name           = "preemptoff",
        .init           = preemptoff_tracer_init,
        .reset          = irqsoff_tracer_reset,
+       .start          = irqsoff_tracer_start,
+       .stop           = irqsoff_tracer_stop,
        .open           = irqsoff_tracer_open,
        .close          = irqsoff_tracer_close,
-       .ctrl_update    = irqsoff_tracer_ctrl_update,
        .print_max      = 1,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_preemptoff,
@@ -456,11 +473,12 @@ static struct tracer preemptoff_tracer __read_mostly =
 #if defined(CONFIG_IRQSOFF_TRACER) && \
        defined(CONFIG_PREEMPT_TRACER)
 
-static void preemptirqsoff_tracer_init(struct trace_array *tr)
+static int preemptirqsoff_tracer_init(struct trace_array *tr)
 {
        trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
 
        __irqsoff_tracer_init(tr);
+       return 0;
 }
 
 static struct tracer preemptirqsoff_tracer __read_mostly =
@@ -468,9 +486,10 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
        .name           = "preemptirqsoff",
        .init           = preemptirqsoff_tracer_init,
        .reset          = irqsoff_tracer_reset,
+       .start          = irqsoff_tracer_start,
+       .stop           = irqsoff_tracer_stop,
        .open           = irqsoff_tracer_open,
        .close          = irqsoff_tracer_close,
-       .ctrl_update    = irqsoff_tracer_ctrl_update,
        .print_max      = 1,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_preemptirqsoff,
index e62cbf78eab6d6a6270a2a5c97f4ffc7ac443525..2a98a206acc2f0b6cdeaabd54b258075b084550e 100644 (file)
@@ -32,34 +32,29 @@ static void mmio_reset_data(struct trace_array *tr)
                tracing_reset(tr, cpu);
 }
 
-static void mmio_trace_init(struct trace_array *tr)
+static int mmio_trace_init(struct trace_array *tr)
 {
        pr_debug("in %s\n", __func__);
        mmio_trace_array = tr;
-       if (tr->ctrl) {
-               mmio_reset_data(tr);
-               enable_mmiotrace();
-       }
+
+       mmio_reset_data(tr);
+       enable_mmiotrace();
+       return 0;
 }
 
 static void mmio_trace_reset(struct trace_array *tr)
 {
        pr_debug("in %s\n", __func__);
-       if (tr->ctrl)
-               disable_mmiotrace();
+
+       disable_mmiotrace();
        mmio_reset_data(tr);
        mmio_trace_array = NULL;
 }
 
-static void mmio_trace_ctrl_update(struct trace_array *tr)
+static void mmio_trace_start(struct trace_array *tr)
 {
        pr_debug("in %s\n", __func__);
-       if (tr->ctrl) {
-               mmio_reset_data(tr);
-               enable_mmiotrace();
-       } else {
-               disable_mmiotrace();
-       }
+       mmio_reset_data(tr);
 }
 
 static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev)
@@ -296,10 +291,10 @@ static struct tracer mmio_tracer __read_mostly =
        .name           = "mmiotrace",
        .init           = mmio_trace_init,
        .reset          = mmio_trace_reset,
+       .start          = mmio_trace_start,
        .pipe_open      = mmio_pipe_open,
        .close          = mmio_close,
        .read           = mmio_read,
-       .ctrl_update    = mmio_trace_ctrl_update,
        .print_line     = mmio_print_line,
 };
 
index 4592b4862515c9d1680417f4fe46bc192999d661..b9767acd30acca0fcb6192b847888d1d32a6924a 100644 (file)
 
 #include "trace.h"
 
+/* Our two options */
+enum {
+       TRACE_NOP_OPT_ACCEPT = 0x1,
+       TRACE_NOP_OPT_REFUSE = 0x2
+};
+
+/* Options for the tracer (see trace_options file) */
+static struct tracer_opt nop_opts[] = {
+       /* Option that will be accepted by set_flag callback */
+       { TRACER_OPT(test_nop_accept, TRACE_NOP_OPT_ACCEPT) },
+       /* Option that will be refused by set_flag callback */
+       { TRACER_OPT(test_nop_refuse, TRACE_NOP_OPT_REFUSE) },
+       { } /* Always set a last empty entry */
+};
+
+static struct tracer_flags nop_flags = {
+       /* You can check your flags value here when you want. */
+       .val = 0, /* By default: all flags disabled */
+       .opts = nop_opts
+};
+
 static struct trace_array      *ctx_trace;
 
 static void start_nop_trace(struct trace_array *tr)
@@ -24,7 +45,7 @@ static void stop_nop_trace(struct trace_array *tr)
        /* Nothing to do! */
 }
 
-static void nop_trace_init(struct trace_array *tr)
+static int nop_trace_init(struct trace_array *tr)
 {
        int cpu;
        ctx_trace = tr;
@@ -32,33 +53,53 @@ static void nop_trace_init(struct trace_array *tr)
        for_each_online_cpu(cpu)
                tracing_reset(tr, cpu);
 
-       if (tr->ctrl)
-               start_nop_trace(tr);
+       start_nop_trace(tr);
+       return 0;
 }
 
 static void nop_trace_reset(struct trace_array *tr)
 {
-       if (tr->ctrl)
-               stop_nop_trace(tr);
+       stop_nop_trace(tr);
 }
 
-static void nop_trace_ctrl_update(struct trace_array *tr)
+/* It only serves as a signal handler and a callback to
+ * accept or refuse tthe setting of a flag.
+ * If you don't implement it, then the flag setting will be
+ * automatically accepted.
+ */
+static int nop_set_flag(u32 old_flags, u32 bit, int set)
 {
-       /* When starting a new trace, reset the buffers */
-       if (tr->ctrl)
-               start_nop_trace(tr);
-       else
-               stop_nop_trace(tr);
+       /*
+        * Note that you don't need to update nop_flags.val yourself.
+        * The tracing Api will do it automatically if you return 0
+        */
+       if (bit == TRACE_NOP_OPT_ACCEPT) {
+               printk(KERN_DEBUG "nop_test_accept flag set to %d: we accept."
+                       " Now cat trace_options to see the result\n",
+                       set);
+               return 0;
+       }
+
+       if (bit == TRACE_NOP_OPT_REFUSE) {
+               printk(KERN_DEBUG "nop_test_refuse flag set to %d: we refuse."
+                       "Now cat trace_options to see the result\n",
+                       set);
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
+
 struct tracer nop_trace __read_mostly =
 {
        .name           = "nop",
        .init           = nop_trace_init,
        .reset          = nop_trace_reset,
-       .ctrl_update    = nop_trace_ctrl_update,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest       = trace_selftest_startup_nop,
 #endif
+       .flags          = &nop_flags,
+       .set_flag       = nop_set_flag
 };
 
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
new file mode 100644 (file)
index 0000000..a7172a3
--- /dev/null
@@ -0,0 +1,179 @@
+/*
+ * ring buffer based C-state tracer
+ *
+ * Arjan van de Ven <arjan@linux.intel.com>
+ * Copyright (C) 2008 Intel Corporation
+ *
+ * Much is borrowed from trace_boot.c which is
+ * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/ftrace.h>
+#include <linux/kallsyms.h>
+#include <linux/module.h>
+
+#include "trace.h"
+
+static struct trace_array *power_trace;
+static int __read_mostly trace_power_enabled;
+
+
+static void start_power_trace(struct trace_array *tr)
+{
+       trace_power_enabled = 1;
+}
+
+static void stop_power_trace(struct trace_array *tr)
+{
+       trace_power_enabled = 0;
+}
+
+
+static int power_trace_init(struct trace_array *tr)
+{
+       int cpu;
+       power_trace = tr;
+
+       trace_power_enabled = 1;
+
+       for_each_cpu_mask(cpu, cpu_possible_map)
+               tracing_reset(tr, cpu);
+       return 0;
+}
+
+static enum print_line_t power_print_line(struct trace_iterator *iter)
+{
+       int ret = 0;
+       struct trace_entry *entry = iter->ent;
+       struct trace_power *field ;
+       struct power_trace *it;
+       struct trace_seq *s = &iter->seq;
+       struct timespec stamp;
+       struct timespec duration;
+
+       trace_assign_type(field, entry);
+       it = &field->state_data;
+       stamp = ktime_to_timespec(it->stamp);
+       duration = ktime_to_timespec(ktime_sub(it->end, it->stamp));
+
+       if (entry->type == TRACE_POWER) {
+               if (it->type == POWER_CSTATE)
+                       ret = trace_seq_printf(s, "[%5ld.%09ld] CSTATE: Going to C%i on cpu %i for %ld.%09ld\n",
+                                         stamp.tv_sec,
+                                         stamp.tv_nsec,
+                                         it->state, iter->cpu,
+                                         duration.tv_sec,
+                                         duration.tv_nsec);
+               if (it->type == POWER_PSTATE)
+                       ret = trace_seq_printf(s, "[%5ld.%09ld] PSTATE: Going to P%i on cpu %i\n",
+                                         stamp.tv_sec,
+                                         stamp.tv_nsec,
+                                         it->state, iter->cpu);
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+               return TRACE_TYPE_HANDLED;
+       }
+       return TRACE_TYPE_UNHANDLED;
+}
+
+static struct tracer power_tracer __read_mostly =
+{
+       .name           = "power",
+       .init           = power_trace_init,
+       .start          = start_power_trace,
+       .stop           = stop_power_trace,
+       .reset          = stop_power_trace,
+       .print_line     = power_print_line,
+};
+
+static int init_power_trace(void)
+{
+       return register_tracer(&power_tracer);
+}
+device_initcall(init_power_trace);
+
+void trace_power_start(struct power_trace *it, unsigned int type,
+                        unsigned int level)
+{
+       if (!trace_power_enabled)
+               return;
+
+       memset(it, 0, sizeof(struct power_trace));
+       it->state = level;
+       it->type = type;
+       it->stamp = ktime_get();
+}
+EXPORT_SYMBOL_GPL(trace_power_start);
+
+
+void trace_power_end(struct power_trace *it)
+{
+       struct ring_buffer_event *event;
+       struct trace_power *entry;
+       struct trace_array_cpu *data;
+       unsigned long irq_flags;
+       struct trace_array *tr = power_trace;
+
+       if (!trace_power_enabled)
+               return;
+
+       preempt_disable();
+       it->end = ktime_get();
+       data = tr->data[smp_processor_id()];
+
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
+                                        &irq_flags);
+       if (!event)
+               goto out;
+       entry   = ring_buffer_event_data(event);
+       tracing_generic_entry_update(&entry->ent, 0, 0);
+       entry->ent.type = TRACE_POWER;
+       entry->state_data = *it;
+       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+
+       trace_wake_up();
+
+ out:
+       preempt_enable();
+}
+EXPORT_SYMBOL_GPL(trace_power_end);
+
+void trace_power_mark(struct power_trace *it, unsigned int type,
+                        unsigned int level)
+{
+       struct ring_buffer_event *event;
+       struct trace_power *entry;
+       struct trace_array_cpu *data;
+       unsigned long irq_flags;
+       struct trace_array *tr = power_trace;
+
+       if (!trace_power_enabled)
+               return;
+
+       memset(it, 0, sizeof(struct power_trace));
+       it->state = level;
+       it->type = type;
+       it->stamp = ktime_get();
+       preempt_disable();
+       it->end = it->stamp;
+       data = tr->data[smp_processor_id()];
+
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
+                                        &irq_flags);
+       if (!event)
+               goto out;
+       entry   = ring_buffer_event_data(event);
+       tracing_generic_entry_update(&entry->ent, 0, 0);
+       entry->ent.type = TRACE_POWER;
+       entry->state_data = *it;
+       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+
+       trace_wake_up();
+
+ out:
+       preempt_enable();
+}
+EXPORT_SYMBOL_GPL(trace_power_mark);
index b8f56beb1a621d5ff527a93aee383f0e02fd30dd..863390557b445d88596dde1b452ffb2e7e3e4ec8 100644 (file)
@@ -16,7 +16,8 @@
 
 static struct trace_array      *ctx_trace;
 static int __read_mostly       tracer_enabled;
-static atomic_t                        sched_ref;
+static int                     sched_ref;
+static DEFINE_MUTEX(sched_register_mutex);
 
 static void
 probe_sched_switch(struct rq *__rq, struct task_struct *prev,
@@ -27,7 +28,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
        int cpu;
        int pc;
 
-       if (!atomic_read(&sched_ref))
+       if (!sched_ref)
                return;
 
        tracing_record_cmdline(prev);
@@ -123,20 +124,18 @@ static void tracing_sched_unregister(void)
 
 static void tracing_start_sched_switch(void)
 {
-       long ref;
-
-       ref = atomic_inc_return(&sched_ref);
-       if (ref == 1)
+       mutex_lock(&sched_register_mutex);
+       if (!(sched_ref++))
                tracing_sched_register();
+       mutex_unlock(&sched_register_mutex);
 }
 
 static void tracing_stop_sched_switch(void)
 {
-       long ref;
-
-       ref = atomic_dec_and_test(&sched_ref);
-       if (ref)
+       mutex_lock(&sched_register_mutex);
+       if (!(--sched_ref))
                tracing_sched_unregister();
+       mutex_unlock(&sched_register_mutex);
 }
 
 void tracing_start_cmdline_record(void)
@@ -149,40 +148,86 @@ void tracing_stop_cmdline_record(void)
        tracing_stop_sched_switch();
 }
 
+/**
+ * tracing_start_sched_switch_record - start tracing context switches
+ *
+ * Turns on context switch tracing for a tracer.
+ */
+void tracing_start_sched_switch_record(void)
+{
+       if (unlikely(!ctx_trace)) {
+               WARN_ON(1);
+               return;
+       }
+
+       tracing_start_sched_switch();
+
+       mutex_lock(&sched_register_mutex);
+       tracer_enabled++;
+       mutex_unlock(&sched_register_mutex);
+}
+
+/**
+ * tracing_stop_sched_switch_record - start tracing context switches
+ *
+ * Turns off context switch tracing for a tracer.
+ */
+void tracing_stop_sched_switch_record(void)
+{
+       mutex_lock(&sched_register_mutex);
+       tracer_enabled--;
+       WARN_ON(tracer_enabled < 0);
+       mutex_unlock(&sched_register_mutex);
+
+       tracing_stop_sched_switch();
+}
+
+/**
+ * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
+ * @tr: trace array pointer to assign
+ *
+ * Some tracers might want to record the context switches in their
+ * trace. This function lets those tracers assign the trace array
+ * to use.
+ */
+void tracing_sched_switch_assign_trace(struct trace_array *tr)
+{
+       ctx_trace = tr;
+}
+
 static void start_sched_trace(struct trace_array *tr)
 {
        sched_switch_reset(tr);
-       tracing_start_cmdline_record();
-       tracer_enabled = 1;
+       tracing_start_sched_switch_record();
 }
 
 static void stop_sched_trace(struct trace_array *tr)
 {
-       tracer_enabled = 0;
-       tracing_stop_cmdline_record();
+       tracing_stop_sched_switch_record();
 }
 
-static void sched_switch_trace_init(struct trace_array *tr)
+static int sched_switch_trace_init(struct trace_array *tr)
 {
        ctx_trace = tr;
-
-       if (tr->ctrl)
-               start_sched_trace(tr);
+       start_sched_trace(tr);
+       return 0;
 }
 
 static void sched_switch_trace_reset(struct trace_array *tr)
 {
-       if (tr->ctrl)
+       if (sched_ref)
                stop_sched_trace(tr);
 }
 
-static void sched_switch_trace_ctrl_update(struct trace_array *tr)
+static void sched_switch_trace_start(struct trace_array *tr)
 {
-       /* When starting a new trace, reset the buffers */
-       if (tr->ctrl)
-               start_sched_trace(tr);
-       else
-               stop_sched_trace(tr);
+       sched_switch_reset(tr);
+       tracing_start_sched_switch();
+}
+
+static void sched_switch_trace_stop(struct trace_array *tr)
+{
+       tracing_stop_sched_switch();
 }
 
 static struct tracer sched_switch_trace __read_mostly =
@@ -190,7 +235,8 @@ static struct tracer sched_switch_trace __read_mostly =
        .name           = "sched_switch",
        .init           = sched_switch_trace_init,
        .reset          = sched_switch_trace_reset,
-       .ctrl_update    = sched_switch_trace_ctrl_update,
+       .start          = sched_switch_trace_start,
+       .stop           = sched_switch_trace_stop,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_sched_switch,
 #endif
@@ -198,14 +244,6 @@ static struct tracer sched_switch_trace __read_mostly =
 
 __init static int init_sched_switch_trace(void)
 {
-       int ret = 0;
-
-       if (atomic_read(&sched_ref))
-               ret = tracing_sched_register();
-       if (ret) {
-               pr_info("error registering scheduler trace\n");
-               return ret;
-       }
        return register_tracer(&sched_switch_trace);
 }
 device_initcall(init_sched_switch_trace);
index 3ae93f16b565de131887da94ee10835c183b2d6a..0067b49746c1d6b8c8a121d90560fad4f52b78d7 100644 (file)
@@ -50,8 +50,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
                return;
 
        pc = preempt_count();
-       resched = need_resched();
-       preempt_disable_notrace();
+       resched = ftrace_preempt_disable();
 
        cpu = raw_smp_processor_id();
        data = tr->data[cpu];
@@ -81,15 +80,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
  out:
        atomic_dec(&data->disabled);
 
-       /*
-        * To prevent recursion from the scheduler, if the
-        * resched flag was set before we entered, then
-        * don't reschedule.
-        */
-       if (resched)
-               preempt_enable_no_resched_notrace();
-       else
-               preempt_enable_notrace();
+       ftrace_preempt_enable(resched);
 }
 
 static struct ftrace_ops trace_ops __read_mostly =
@@ -271,6 +262,12 @@ out:
        atomic_dec(&wakeup_trace->data[cpu]->disabled);
 }
 
+/*
+ * save_tracer_enabled is used to save the state of the tracer_enabled
+ * variable when we disable it when we open a trace output file.
+ */
+static int save_tracer_enabled;
+
 static void start_wakeup_tracer(struct trace_array *tr)
 {
        int ret;
@@ -309,7 +306,13 @@ static void start_wakeup_tracer(struct trace_array *tr)
 
        register_ftrace_function(&trace_ops);
 
-       tracer_enabled = 1;
+       if (tracing_is_enabled()) {
+               tracer_enabled = 1;
+               save_tracer_enabled = 1;
+       } else {
+               tracer_enabled = 0;
+               save_tracer_enabled = 0;
+       }
 
        return;
 fail_deprobe_wake_new:
@@ -321,49 +324,53 @@ fail_deprobe:
 static void stop_wakeup_tracer(struct trace_array *tr)
 {
        tracer_enabled = 0;
+       save_tracer_enabled = 0;
        unregister_ftrace_function(&trace_ops);
        unregister_trace_sched_switch(probe_wakeup_sched_switch);
        unregister_trace_sched_wakeup_new(probe_wakeup);
        unregister_trace_sched_wakeup(probe_wakeup);
 }
 
-static void wakeup_tracer_init(struct trace_array *tr)
+static int wakeup_tracer_init(struct trace_array *tr)
 {
        wakeup_trace = tr;
-
-       if (tr->ctrl)
-               start_wakeup_tracer(tr);
+       start_wakeup_tracer(tr);
+       return 0;
 }
 
 static void wakeup_tracer_reset(struct trace_array *tr)
 {
-       if (tr->ctrl) {
-               stop_wakeup_tracer(tr);
-               /* make sure we put back any tasks we are tracing */
-               wakeup_reset(tr);
-       }
+       stop_wakeup_tracer(tr);
+       /* make sure we put back any tasks we are tracing */
+       wakeup_reset(tr);
+}
+
+static void wakeup_tracer_start(struct trace_array *tr)
+{
+       wakeup_reset(tr);
+       tracer_enabled = 1;
+       save_tracer_enabled = 1;
 }
 
-static void wakeup_tracer_ctrl_update(struct trace_array *tr)
+static void wakeup_tracer_stop(struct trace_array *tr)
 {
-       if (tr->ctrl)
-               start_wakeup_tracer(tr);
-       else
-               stop_wakeup_tracer(tr);
+       tracer_enabled = 0;
+       save_tracer_enabled = 0;
 }
 
 static void wakeup_tracer_open(struct trace_iterator *iter)
 {
        /* stop the trace while dumping */
-       if (iter->tr->ctrl)
-               stop_wakeup_tracer(iter->tr);
+       tracer_enabled = 0;
 }
 
 static void wakeup_tracer_close(struct trace_iterator *iter)
 {
        /* forget about any processes we were recording */
-       if (iter->tr->ctrl)
-               start_wakeup_tracer(iter->tr);
+       if (save_tracer_enabled) {
+               wakeup_reset(iter->tr);
+               tracer_enabled = 1;
+       }
 }
 
 static struct tracer wakeup_tracer __read_mostly =
@@ -371,9 +378,10 @@ static struct tracer wakeup_tracer __read_mostly =
        .name           = "wakeup",
        .init           = wakeup_tracer_init,
        .reset          = wakeup_tracer_reset,
+       .start          = wakeup_tracer_start,
+       .stop           = wakeup_tracer_stop,
        .open           = wakeup_tracer_open,
        .close          = wakeup_tracer_close,
-       .ctrl_update    = wakeup_tracer_ctrl_update,
        .print_max      = 1,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_wakeup,
index 90bc752a7580b3d4800417d2f7a28abe604c1d65..88c8eb70f54aeb3508dda9c668f082b9a43b0c78 100644 (file)
@@ -13,6 +13,7 @@ static inline int trace_valid_entry(struct trace_entry *entry)
        case TRACE_STACK:
        case TRACE_PRINT:
        case TRACE_SPECIAL:
+       case TRACE_BRANCH:
                return 1;
        }
        return 0;
@@ -51,7 +52,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
        int cpu, ret = 0;
 
        /* Don't allow flipping of max traces now */
-       raw_local_irq_save(flags);
+       local_irq_save(flags);
        __raw_spin_lock(&ftrace_max_lock);
 
        cnt = ring_buffer_entries(tr->buffer);
@@ -62,7 +63,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
                        break;
        }
        __raw_spin_unlock(&ftrace_max_lock);
-       raw_local_irq_restore(flags);
+       local_irq_restore(flags);
 
        if (count)
                *count = cnt;
@@ -70,6 +71,11 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
        return ret;
 }
 
+static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
+{
+       printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
+               trace->name, init_ret);
+}
 #ifdef CONFIG_FUNCTION_TRACER
 
 #ifdef CONFIG_DYNAMIC_FTRACE
@@ -110,8 +116,11 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
        ftrace_set_filter(func_name, strlen(func_name), 1);
 
        /* enable tracing */
-       tr->ctrl = 1;
-       trace->init(tr);
+       ret = trace->init(tr);
+       if (ret) {
+               warn_failed_init_tracer(trace, ret);
+               goto out;
+       }
 
        /* Sleep for a 1/10 of a second */
        msleep(100);
@@ -134,13 +143,13 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
        msleep(100);
 
        /* stop the tracing. */
-       tr->ctrl = 0;
-       trace->ctrl_update(tr);
+       tracing_stop();
        ftrace_enabled = 0;
 
        /* check the trace buffer */
        ret = trace_test_buffer(tr, &count);
        trace->reset(tr);
+       tracing_start();
 
        /* we should only have one item */
        if (!ret && count != 1) {
@@ -148,6 +157,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
                ret = -1;
                goto out;
        }
+
  out:
        ftrace_enabled = save_ftrace_enabled;
        tracer_enabled = save_tracer_enabled;
@@ -180,18 +190,22 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
        ftrace_enabled = 1;
        tracer_enabled = 1;
 
-       tr->ctrl = 1;
-       trace->init(tr);
+       ret = trace->init(tr);
+       if (ret) {
+               warn_failed_init_tracer(trace, ret);
+               goto out;
+       }
+
        /* Sleep for a 1/10 of a second */
        msleep(100);
        /* stop the tracing. */
-       tr->ctrl = 0;
-       trace->ctrl_update(tr);
+       tracing_stop();
        ftrace_enabled = 0;
 
        /* check the trace buffer */
        ret = trace_test_buffer(tr, &count);
        trace->reset(tr);
+       tracing_start();
 
        if (!ret && !count) {
                printk(KERN_CONT ".. no entries found ..");
@@ -223,8 +237,12 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
        int ret;
 
        /* start the tracing */
-       tr->ctrl = 1;
-       trace->init(tr);
+       ret = trace->init(tr);
+       if (ret) {
+               warn_failed_init_tracer(trace, ret);
+               return ret;
+       }
+
        /* reset the max latency */
        tracing_max_latency = 0;
        /* disable interrupts for a bit */
@@ -232,13 +250,13 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
        udelay(100);
        local_irq_enable();
        /* stop the tracing. */
-       tr->ctrl = 0;
-       trace->ctrl_update(tr);
+       tracing_stop();
        /* check both trace buffers */
        ret = trace_test_buffer(tr, NULL);
        if (!ret)
                ret = trace_test_buffer(&max_tr, &count);
        trace->reset(tr);
+       tracing_start();
 
        if (!ret && !count) {
                printk(KERN_CONT ".. no entries found ..");
@@ -259,9 +277,26 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
        unsigned long count;
        int ret;
 
+       /*
+        * Now that the big kernel lock is no longer preemptable,
+        * and this is called with the BKL held, it will always
+        * fail. If preemption is already disabled, simply
+        * pass the test. When the BKL is removed, or becomes
+        * preemptible again, we will once again test this,
+        * so keep it in.
+        */
+       if (preempt_count()) {
+               printk(KERN_CONT "can not test ... force ");
+               return 0;
+       }
+
        /* start the tracing */
-       tr->ctrl = 1;
-       trace->init(tr);
+       ret = trace->init(tr);
+       if (ret) {
+               warn_failed_init_tracer(trace, ret);
+               return ret;
+       }
+
        /* reset the max latency */
        tracing_max_latency = 0;
        /* disable preemption for a bit */
@@ -269,13 +304,13 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
        udelay(100);
        preempt_enable();
        /* stop the tracing. */
-       tr->ctrl = 0;
-       trace->ctrl_update(tr);
+       tracing_stop();
        /* check both trace buffers */
        ret = trace_test_buffer(tr, NULL);
        if (!ret)
                ret = trace_test_buffer(&max_tr, &count);
        trace->reset(tr);
+       tracing_start();
 
        if (!ret && !count) {
                printk(KERN_CONT ".. no entries found ..");
@@ -296,9 +331,25 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
        unsigned long count;
        int ret;
 
+       /*
+        * Now that the big kernel lock is no longer preemptable,
+        * and this is called with the BKL held, it will always
+        * fail. If preemption is already disabled, simply
+        * pass the test. When the BKL is removed, or becomes
+        * preemptible again, we will once again test this,
+        * so keep it in.
+        */
+       if (preempt_count()) {
+               printk(KERN_CONT "can not test ... force ");
+               return 0;
+       }
+
        /* start the tracing */
-       tr->ctrl = 1;
-       trace->init(tr);
+       ret = trace->init(tr);
+       if (ret) {
+               warn_failed_init_tracer(trace, ret);
+               goto out;
+       }
 
        /* reset the max latency */
        tracing_max_latency = 0;
@@ -312,27 +363,30 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
        local_irq_enable();
 
        /* stop the tracing. */
-       tr->ctrl = 0;
-       trace->ctrl_update(tr);
+       tracing_stop();
        /* check both trace buffers */
        ret = trace_test_buffer(tr, NULL);
-       if (ret)
+       if (ret) {
+               tracing_start();
                goto out;
+       }
 
        ret = trace_test_buffer(&max_tr, &count);
-       if (ret)
+       if (ret) {
+               tracing_start();
                goto out;
+       }
 
        if (!ret && !count) {
                printk(KERN_CONT ".. no entries found ..");
                ret = -1;
+               tracing_start();
                goto out;
        }
 
        /* do the test by disabling interrupts first this time */
        tracing_max_latency = 0;
-       tr->ctrl = 1;
-       trace->ctrl_update(tr);
+       tracing_start();
        preempt_disable();
        local_irq_disable();
        udelay(100);
@@ -341,8 +395,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
        local_irq_enable();
 
        /* stop the tracing. */
-       tr->ctrl = 0;
-       trace->ctrl_update(tr);
+       tracing_stop();
        /* check both trace buffers */
        ret = trace_test_buffer(tr, NULL);
        if (ret)
@@ -358,6 +411,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
 
  out:
        trace->reset(tr);
+       tracing_start();
        tracing_max_latency = save_max;
 
        return ret;
@@ -423,8 +477,12 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
        wait_for_completion(&isrt);
 
        /* start the tracing */
-       tr->ctrl = 1;
-       trace->init(tr);
+       ret = trace->init(tr);
+       if (ret) {
+               warn_failed_init_tracer(trace, ret);
+               return ret;
+       }
+
        /* reset the max latency */
        tracing_max_latency = 0;
 
@@ -448,8 +506,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
        msleep(100);
 
        /* stop the tracing. */
-       tr->ctrl = 0;
-       trace->ctrl_update(tr);
+       tracing_stop();
        /* check both trace buffers */
        ret = trace_test_buffer(tr, NULL);
        if (!ret)
@@ -457,6 +514,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
 
 
        trace->reset(tr);
+       tracing_start();
 
        tracing_max_latency = save_max;
 
@@ -480,16 +538,20 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr
        int ret;
 
        /* start the tracing */
-       tr->ctrl = 1;
-       trace->init(tr);
+       ret = trace->init(tr);
+       if (ret) {
+               warn_failed_init_tracer(trace, ret);
+               return ret;
+       }
+
        /* Sleep for a 1/10 of a second */
        msleep(100);
        /* stop the tracing. */
-       tr->ctrl = 0;
-       trace->ctrl_update(tr);
+       tracing_stop();
        /* check the trace buffer */
        ret = trace_test_buffer(tr, &count);
        trace->reset(tr);
+       tracing_start();
 
        if (!ret && !count) {
                printk(KERN_CONT ".. no entries found ..");
@@ -508,17 +570,48 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
        int ret;
 
        /* start the tracing */
-       tr->ctrl = 1;
-       trace->init(tr);
+       ret = trace->init(tr);
+       if (ret) {
+               warn_failed_init_tracer(trace, ret);
+               return 0;
+       }
+
        /* Sleep for a 1/10 of a second */
        msleep(100);
        /* stop the tracing. */
-       tr->ctrl = 0;
-       trace->ctrl_update(tr);
+       tracing_stop();
        /* check the trace buffer */
        ret = trace_test_buffer(tr, &count);
        trace->reset(tr);
+       tracing_start();
 
        return ret;
 }
 #endif /* CONFIG_SYSPROF_TRACER */
+
+#ifdef CONFIG_BRANCH_TRACER
+int
+trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
+{
+       unsigned long count;
+       int ret;
+
+       /* start the tracing */
+       ret = trace->init(tr);
+       if (ret) {
+               warn_failed_init_tracer(trace, ret);
+               return ret;
+       }
+
+       /* Sleep for a 1/10 of a second */
+       msleep(100);
+       /* stop the tracing. */
+       tracing_stop();
+       /* check the trace buffer */
+       ret = trace_test_buffer(tr, &count);
+       trace->reset(tr);
+       tracing_start();
+
+       return ret;
+}
+#endif /* CONFIG_BRANCH_TRACER */
index 3bdb44bde4b7e5fa4af698c467f0c64b704e7699..fde3be15c6420495c00c4c9506282b538c2b10da 100644 (file)
@@ -107,8 +107,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
        if (unlikely(!ftrace_enabled || stack_trace_disabled))
                return;
 
-       resched = need_resched();
-       preempt_disable_notrace();
+       resched = ftrace_preempt_disable();
 
        cpu = raw_smp_processor_id();
        /* no atomic needed, we only modify this variable by this cpu */
@@ -120,10 +119,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
  out:
        per_cpu(trace_active, cpu)--;
        /* prevent recursion in schedule */
-       if (resched)
-               preempt_enable_no_resched_notrace();
-       else
-               preempt_enable_notrace();
+       ftrace_preempt_enable(resched);
 }
 
 static struct ftrace_ops trace_ops __read_mostly =
index 9587d3bcba556761de49854c95676a03a6dcecbf..54960edb96d077d2c09a4356f7976ae8d0216781 100644 (file)
@@ -261,27 +261,17 @@ static void stop_stack_trace(struct trace_array *tr)
        mutex_unlock(&sample_timer_lock);
 }
 
-static void stack_trace_init(struct trace_array *tr)
+static int stack_trace_init(struct trace_array *tr)
 {
        sysprof_trace = tr;
 
-       if (tr->ctrl)
-               start_stack_trace(tr);
+       start_stack_trace(tr);
+       return 0;
 }
 
 static void stack_trace_reset(struct trace_array *tr)
 {
-       if (tr->ctrl)
-               stop_stack_trace(tr);
-}
-
-static void stack_trace_ctrl_update(struct trace_array *tr)
-{
-       /* When starting a new trace, reset the buffers */
-       if (tr->ctrl)
-               start_stack_trace(tr);
-       else
-               stop_stack_trace(tr);
+       stop_stack_trace(tr);
 }
 
 static struct tracer stack_trace __read_mostly =
@@ -289,7 +279,6 @@ static struct tracer stack_trace __read_mostly =
        .name           = "sysprof",
        .init           = stack_trace_init,
        .reset          = stack_trace_reset,
-       .ctrl_update    = stack_trace_ctrl_update,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_sysprof,
 #endif
index af8c85664882c992d962c0ff8809f889ccadd65d..79602740bbb5f396278dbe1665eada4c6f0259f0 100644 (file)
@@ -43,6 +43,7 @@ static DEFINE_MUTEX(tracepoints_mutex);
  */
 #define TRACEPOINT_HASH_BITS 6
 #define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
+static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
 
 /*
  * Note about RCU :
@@ -54,40 +55,43 @@ struct tracepoint_entry {
        struct hlist_node hlist;
        void **funcs;
        int refcount;   /* Number of times armed. 0 if disarmed. */
-       struct rcu_head rcu;
-       void *oldptr;
-       unsigned char rcu_pending:1;
        char name[0];
 };
 
-static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
+struct tp_probes {
+       union {
+               struct rcu_head rcu;
+               struct list_head list;
+       } u;
+       void *probes[0];
+};
 
-static void free_old_closure(struct rcu_head *head)
+static inline void *allocate_probes(int count)
 {
-       struct tracepoint_entry *entry = container_of(head,
-               struct tracepoint_entry, rcu);
-       kfree(entry->oldptr);
-       /* Make sure we free the data before setting the pending flag to 0 */
-       smp_wmb();
-       entry->rcu_pending = 0;
+       struct tp_probes *p  = kmalloc(count * sizeof(void *)
+                       + sizeof(struct tp_probes), GFP_KERNEL);
+       return p == NULL ? NULL : p->probes;
 }
 
-static void tracepoint_entry_free_old(struct tracepoint_entry *entry, void *old)
+static void rcu_free_old_probes(struct rcu_head *head)
 {
-       if (!old)
-               return;
-       entry->oldptr = old;
-       entry->rcu_pending = 1;
-       /* write rcu_pending before calling the RCU callback */
-       smp_wmb();
-       call_rcu_sched(&entry->rcu, free_old_closure);
+       kfree(container_of(head, struct tp_probes, u.rcu));
+}
+
+static inline void release_probes(void *old)
+{
+       if (old) {
+               struct tp_probes *tp_probes = container_of(old,
+                       struct tp_probes, probes[0]);
+               call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes);
+       }
 }
 
 static void debug_print_probes(struct tracepoint_entry *entry)
 {
        int i;
 
-       if (!tracepoint_debug)
+       if (!tracepoint_debug || !entry->funcs)
                return;
 
        for (i = 0; entry->funcs[i]; i++)
@@ -111,12 +115,13 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe)
                                return ERR_PTR(-EEXIST);
        }
        /* + 2 : one for new probe, one for NULL func */
-       new = kzalloc((nr_probes + 2) * sizeof(void *), GFP_KERNEL);
+       new = allocate_probes(nr_probes + 2);
        if (new == NULL)
                return ERR_PTR(-ENOMEM);
        if (old)
                memcpy(new, old, nr_probes * sizeof(void *));
        new[nr_probes] = probe;
+       new[nr_probes + 1] = NULL;
        entry->refcount = nr_probes + 1;
        entry->funcs = new;
        debug_print_probes(entry);
@@ -132,7 +137,7 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe)
        old = entry->funcs;
 
        if (!old)
-               return NULL;
+               return ERR_PTR(-ENOENT);
 
        debug_print_probes(entry);
        /* (N -> M), (N > 1, M >= 0) probes */
@@ -151,13 +156,13 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe)
                int j = 0;
                /* N -> M, (N > 1, M > 0) */
                /* + 1 for NULL */
-               new = kzalloc((nr_probes - nr_del + 1)
-                       * sizeof(void *), GFP_KERNEL);
+               new = allocate_probes(nr_probes - nr_del + 1);
                if (new == NULL)
                        return ERR_PTR(-ENOMEM);
                for (i = 0; old[i]; i++)
                        if ((probe && old[i] != probe))
                                new[j++] = old[i];
+               new[nr_probes - nr_del] = NULL;
                entry->refcount = nr_probes - nr_del;
                entry->funcs = new;
        }
@@ -215,7 +220,6 @@ static struct tracepoint_entry *add_tracepoint(const char *name)
        memcpy(&e->name[0], name, name_len);
        e->funcs = NULL;
        e->refcount = 0;
-       e->rcu_pending = 0;
        hlist_add_head(&e->hlist, head);
        return e;
 }
@@ -224,32 +228,10 @@ static struct tracepoint_entry *add_tracepoint(const char *name)
  * Remove the tracepoint from the tracepoint hash table. Must be called with
  * mutex_lock held.
  */
-static int remove_tracepoint(const char *name)
+static inline void remove_tracepoint(struct tracepoint_entry *e)
 {
-       struct hlist_head *head;
-       struct hlist_node *node;
-       struct tracepoint_entry *e;
-       int found = 0;
-       size_t len = strlen(name) + 1;
-       u32 hash = jhash(name, len-1, 0);
-
-       head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
-       hlist_for_each_entry(e, node, head, hlist) {
-               if (!strcmp(name, e->name)) {
-                       found = 1;
-                       break;
-               }
-       }
-       if (!found)
-               return -ENOENT;
-       if (e->refcount)
-               return -EBUSY;
        hlist_del(&e->hlist);
-       /* Make sure the call_rcu_sched has been executed */
-       if (e->rcu_pending)
-               rcu_barrier_sched();
        kfree(e);
-       return 0;
 }
 
 /*
@@ -280,6 +262,7 @@ static void set_tracepoint(struct tracepoint_entry **entry,
 static void disable_tracepoint(struct tracepoint *elem)
 {
        elem->state = 0;
+       rcu_assign_pointer(elem->funcs, NULL);
 }
 
 /**
@@ -320,6 +303,23 @@ static void tracepoint_update_probes(void)
        module_update_tracepoints();
 }
 
+static void *tracepoint_add_probe(const char *name, void *probe)
+{
+       struct tracepoint_entry *entry;
+       void *old;
+
+       entry = get_tracepoint(name);
+       if (!entry) {
+               entry = add_tracepoint(name);
+               if (IS_ERR(entry))
+                       return entry;
+       }
+       old = tracepoint_entry_add_probe(entry, probe);
+       if (IS_ERR(old) && !entry->refcount)
+               remove_tracepoint(entry);
+       return old;
+}
+
 /**
  * tracepoint_probe_register -  Connect a probe to a tracepoint
  * @name: tracepoint name
@@ -330,44 +330,36 @@ static void tracepoint_update_probes(void)
  */
 int tracepoint_probe_register(const char *name, void *probe)
 {
-       struct tracepoint_entry *entry;
-       int ret = 0;
        void *old;
 
        mutex_lock(&tracepoints_mutex);
-       entry = get_tracepoint(name);
-       if (!entry) {
-               entry = add_tracepoint(name);
-               if (IS_ERR(entry)) {
-                       ret = PTR_ERR(entry);
-                       goto end;
-               }
-       }
-       /*
-        * If we detect that a call_rcu_sched is pending for this tracepoint,
-        * make sure it's executed now.
-        */
-       if (entry->rcu_pending)
-               rcu_barrier_sched();
-       old = tracepoint_entry_add_probe(entry, probe);
-       if (IS_ERR(old)) {
-               ret = PTR_ERR(old);
-               goto end;
-       }
+       old = tracepoint_add_probe(name, probe);
        mutex_unlock(&tracepoints_mutex);
+       if (IS_ERR(old))
+               return PTR_ERR(old);
+
        tracepoint_update_probes();             /* may update entry */
-       mutex_lock(&tracepoints_mutex);
-       entry = get_tracepoint(name);
-       WARN_ON(!entry);
-       if (entry->rcu_pending)
-               rcu_barrier_sched();
-       tracepoint_entry_free_old(entry, old);
-end:
-       mutex_unlock(&tracepoints_mutex);
-       return ret;
+       release_probes(old);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(tracepoint_probe_register);
 
+static void *tracepoint_remove_probe(const char *name, void *probe)
+{
+       struct tracepoint_entry *entry;
+       void *old;
+
+       entry = get_tracepoint(name);
+       if (!entry)
+               return ERR_PTR(-ENOENT);
+       old = tracepoint_entry_remove_probe(entry, probe);
+       if (IS_ERR(old))
+               return old;
+       if (!entry->refcount)
+               remove_tracepoint(entry);
+       return old;
+}
+
 /**
  * tracepoint_probe_unregister -  Disconnect a probe from a tracepoint
  * @name: tracepoint name
@@ -380,38 +372,104 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_register);
  */
 int tracepoint_probe_unregister(const char *name, void *probe)
 {
-       struct tracepoint_entry *entry;
        void *old;
-       int ret = -ENOENT;
 
        mutex_lock(&tracepoints_mutex);
-       entry = get_tracepoint(name);
-       if (!entry)
-               goto end;
-       if (entry->rcu_pending)
-               rcu_barrier_sched();
-       old = tracepoint_entry_remove_probe(entry, probe);
-       if (!old) {
-               printk(KERN_WARNING "Warning: Trying to unregister a probe"
-                                   "that doesn't exist\n");
-               goto end;
-       }
+       old = tracepoint_remove_probe(name, probe);
        mutex_unlock(&tracepoints_mutex);
+       if (IS_ERR(old))
+               return PTR_ERR(old);
+
        tracepoint_update_probes();             /* may update entry */
+       release_probes(old);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
+
+static LIST_HEAD(old_probes);
+static int need_update;
+
+static void tracepoint_add_old_probes(void *old)
+{
+       need_update = 1;
+       if (old) {
+               struct tp_probes *tp_probes = container_of(old,
+                       struct tp_probes, probes[0]);
+               list_add(&tp_probes->u.list, &old_probes);
+       }
+}
+
+/**
+ * tracepoint_probe_register_noupdate -  register a probe but not connect
+ * @name: tracepoint name
+ * @probe: probe handler
+ *
+ * caller must call tracepoint_probe_update_all()
+ */
+int tracepoint_probe_register_noupdate(const char *name, void *probe)
+{
+       void *old;
+
        mutex_lock(&tracepoints_mutex);
-       entry = get_tracepoint(name);
-       if (!entry)
-               goto end;
-       if (entry->rcu_pending)
-               rcu_barrier_sched();
-       tracepoint_entry_free_old(entry, old);
-       remove_tracepoint(name);        /* Ignore busy error message */
-       ret = 0;
-end:
+       old = tracepoint_add_probe(name, probe);
+       if (IS_ERR(old)) {
+               mutex_unlock(&tracepoints_mutex);
+               return PTR_ERR(old);
+       }
+       tracepoint_add_old_probes(old);
        mutex_unlock(&tracepoints_mutex);
-       return ret;
+       return 0;
 }
-EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
+EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
+
+/**
+ * tracepoint_probe_unregister_noupdate -  remove a probe but not disconnect
+ * @name: tracepoint name
+ * @probe: probe function pointer
+ *
+ * caller must call tracepoint_probe_update_all()
+ */
+int tracepoint_probe_unregister_noupdate(const char *name, void *probe)
+{
+       void *old;
+
+       mutex_lock(&tracepoints_mutex);
+       old = tracepoint_remove_probe(name, probe);
+       if (IS_ERR(old)) {
+               mutex_unlock(&tracepoints_mutex);
+               return PTR_ERR(old);
+       }
+       tracepoint_add_old_probes(old);
+       mutex_unlock(&tracepoints_mutex);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate);
+
+/**
+ * tracepoint_probe_update_all -  update tracepoints
+ */
+void tracepoint_probe_update_all(void)
+{
+       LIST_HEAD(release_probes);
+       struct tp_probes *pos, *next;
+
+       mutex_lock(&tracepoints_mutex);
+       if (!need_update) {
+               mutex_unlock(&tracepoints_mutex);
+               return;
+       }
+       if (!list_empty(&old_probes))
+               list_replace_init(&old_probes, &release_probes);
+       need_update = 0;
+       mutex_unlock(&tracepoints_mutex);
+
+       tracepoint_update_probes();
+       list_for_each_entry_safe(pos, next, &release_probes, u.list) {
+               list_del(&pos->u.list);
+               call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
+       }
+}
+EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
 
 /**
  * tracepoint_get_iter_range - Get a next tracepoint iterator given a range.
@@ -483,3 +541,36 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter)
        iter->tracepoint = NULL;
 }
 EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
+
+#ifdef CONFIG_MODULES
+
+int tracepoint_module_notify(struct notifier_block *self,
+                            unsigned long val, void *data)
+{
+       struct module *mod = data;
+
+       switch (val) {
+       case MODULE_STATE_COMING:
+               tracepoint_update_probe_range(mod->tracepoints,
+                       mod->tracepoints + mod->num_tracepoints);
+               break;
+       case MODULE_STATE_GOING:
+               tracepoint_update_probe_range(mod->tracepoints,
+                       mod->tracepoints + mod->num_tracepoints);
+               break;
+       }
+       return 0;
+}
+
+struct notifier_block tracepoint_module_nb = {
+       .notifier_call = tracepoint_module_notify,
+       .priority = 0,
+};
+
+static int init_tracepoints(void)
+{
+       return register_module_notifier(&tracepoint_module_nb);
+}
+__initcall(init_tracepoints);
+
+#endif /* CONFIG_MODULES */
index 06722c4030584382478d7c447ad35f96fe856dd6..bf0cf7c8387b8d92c93dfb18436a7f2cf424cb00 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/hash.h>
 #include <linux/highmem.h>
 #include <linux/blktrace_api.h>
+#include <trace/block.h>
 #include <asm/tlbflush.h>
 
 #define POOL_SIZE      64
@@ -21,6 +22,8 @@
 
 static mempool_t *page_pool, *isa_page_pool;
 
+DEFINE_TRACE(block_bio_bounce);
+
 #ifdef CONFIG_HIGHMEM
 static __init int init_emergency_pool(void)
 {
@@ -222,7 +225,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
        if (!bio)
                return;
 
-       blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE);
+       trace_block_bio_bounce(q, *bio_orig);
 
        /*
         * at least one page was bounced, fill in possible non-highmem
index 0216b55bd64075a462d3d32c6abc77a5cdadca4c..01724e04c556339762a272ecff36ec4aa23145d4 100644 (file)
@@ -4,10 +4,10 @@
 #include <linux/proc_fs.h>     /* for struct inode and struct file */
 #include <linux/tracepoint.h>
 
-DEFINE_TRACE(subsys_event,
+DECLARE_TRACE(subsys_event,
        TPPROTO(struct inode *inode, struct file *file),
        TPARGS(inode, file));
-DEFINE_TRACE(subsys_eventb,
+DECLARE_TRACE(subsys_eventb,
        TPPROTO(void),
        TPARGS());
 #endif
index 55abfdda4bd4eeedcf06b034dc7f09ac0688f26e..e3a964889dc7951565b3f45497e0e123762ebab5 100644 (file)
@@ -46,6 +46,7 @@ void __exit tp_sample_trace_exit(void)
 {
        unregister_trace_subsys_eventb(probe_subsys_eventb);
        unregister_trace_subsys_event(probe_subsys_event);
+       tracepoint_synchronize_unregister();
 }
 
 module_exit(tp_sample_trace_exit);
index 5e9fcf4afffeca97bd30befd00f881bc3b28064f..685a5acb456275dd5459891caaa9f29abbdfa2d2 100644 (file)
@@ -33,6 +33,7 @@ module_init(tp_sample_trace_init);
 void __exit tp_sample_trace_exit(void)
 {
        unregister_trace_subsys_event(probe_subsys_event);
+       tracepoint_synchronize_unregister();
 }
 
 module_exit(tp_sample_trace_exit);
index 4ae4b7fcc04327766145a0483d00720dadc2928a..00d169792a3e14847d19e2abef38e4a7c243645b 100644 (file)
@@ -13,6 +13,9 @@
 #include <linux/proc_fs.h>
 #include "tp-samples-trace.h"
 
+DEFINE_TRACE(subsys_event);
+DEFINE_TRACE(subsys_eventb);
+
 struct proc_dir_entry *pentry_example;
 
 static int my_open(struct inode *inode, struct file *file)
index 468fbc9016c7b0773db9073a28f655bdf31e372d..7a176773af85a9fee23db5ff6e23edb783c74009 100644 (file)
@@ -198,16 +198,10 @@ cmd_modversions =                                                 \
        fi;
 endif
 
-ifdef CONFIG_64BIT
-arch_bits = 64
-else
-arch_bits = 32
-endif
-
 ifdef CONFIG_FTRACE_MCOUNT_RECORD
-cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl \
-       "$(ARCH)" "$(arch_bits)" "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" \
-       "$(NM)" "$(RM)" "$(MV)" "$(@)";
+cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
+       "$(if $(CONFIG_64BIT),64,32)" \
+       "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" "$(MV)" "$(@)";
 endif
 
 define rule_cc_o_c
index d2c61efc216f4bd413dd271faa3aa62c736c6203..f0af9aa9b243bcb1892ede765a44b2b225921046 100644 (file)
@@ -78,11 +78,13 @@ while (<>) {
 }
 
 if ($count == 0) {
-       print "No data found in the dmesg. Make sure that 'printk.time=1' and\n";
-       print "'initcall_debug' are passed on the kernel command line.\n\n";
-       print "Usage: \n";
-       print "      dmesg | perl scripts/bootgraph.pl > output.svg\n\n";
-       exit;
+    print STDERR <<END;
+No data found in the dmesg. Make sure that 'printk.time=1' and
+'initcall_debug' are passed on the kernel command line.
+Usage:
+      dmesg | perl scripts/bootgraph.pl > output.svg
+END
+    exit 1;
 }
 
 print "<?xml version=\"1.0\" standalone=\"no\"?> \n";
@@ -109,8 +111,8 @@ my $stylecounter = 0;
 my %rows;
 my $rowscount = 1;
 my @initcalls = sort { $start{$a} <=> $start{$b} } keys(%start);
-my $key;
-foreach $key (@initcalls) {
+
+foreach my $key (@initcalls) {
        my $duration = $end{$key} - $start{$key};
 
        if ($duration >= $threshold) {
index 6b9fe3eb836027bff637b912798b3b024a4baeba..0b1dc9f9bb0682a65d9158f8fb44012ea05464b1 100755 (executable)
@@ -112,6 +112,8 @@ my ($arch, $bits, $objdump, $objcopy, $cc,
 # Acceptable sections to record.
 my %text_sections = (
      ".text" => 1,
+     ".sched.text" => 1,
+     ".spinlock.text" => 1,
 );
 
 $objdump = "objdump" if ((length $objdump) == 0);
@@ -130,10 +132,13 @@ my %weak;         # List of weak functions
 my %convert;           # List of local functions used that needs conversion
 
 my $type;
+my $nm_regex;          # Find the local functions (return function)
 my $section_regex;     # Find the start of a section
 my $function_regex;    # Find the name of a function
                        #    (return offset and func name)
 my $mcount_regex;      # Find the call site to mcount (return offset)
+my $alignment;         # The .align value to use for $mcount_section
+my $section_type;      # Section header plus possible alignment command
 
 if ($arch eq "x86") {
     if ($bits == 64) {
@@ -143,11 +148,21 @@ if ($arch eq "x86") {
     }
 }
 
+#
+# We base the defaults off of i386, the other archs may
+# feel free to change them in the below if statements.
+#
+$nm_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\S+)";
+$section_regex = "Disassembly of section\\s+(\\S+):";
+$function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
+$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$";
+$section_type = '@progbits';
+$type = ".long";
+
 if ($arch eq "x86_64") {
-    $section_regex = "Disassembly of section\\s+(\\S+):";
-    $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
     $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount([+-]0x[0-9a-zA-Z]+)?\$";
     $type = ".quad";
+    $alignment = 8;
 
     # force flags for this arch
     $ld .= " -m elf_x86_64";
@@ -156,10 +171,7 @@ if ($arch eq "x86_64") {
     $cc .= " -m64";
 
 } elsif ($arch eq "i386") {
-    $section_regex = "Disassembly of section\\s+(\\S+):";
-    $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
-    $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$";
-    $type = ".long";
+    $alignment = 4;
 
     # force flags for this arch
     $ld .= " -m elf_i386";
@@ -167,6 +179,27 @@ if ($arch eq "x86_64") {
     $objcopy .= " -O elf32-i386";
     $cc .= " -m32";
 
+} elsif ($arch eq "sh") {
+    $alignment = 2;
+
+    # force flags for this arch
+    $ld .= " -m shlelf_linux";
+    $objcopy .= " -O elf32-sh-linux";
+    $cc .= " -m32";
+
+} elsif ($arch eq "powerpc") {
+    $nm_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
+    $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:";
+    $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$";
+
+    if ($bits == 64) {
+       $type = ".quad";
+    }
+
+} elsif ($arch eq "arm") {
+    $alignment = 2;
+    $section_type = '%progbits';
+
 } else {
     die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
 }
@@ -236,7 +269,7 @@ if (!$found_version) {
 #
 open (IN, "$nm $inputfile|") || die "error running $nm";
 while (<IN>) {
-    if (/^[0-9a-fA-F]+\s+t\s+(\S+)/) {
+    if (/$nm_regex/) {
        $locals{$1} = 1;
     } elsif (/^[0-9a-fA-F]+\s+([wW])\s+(\S+)/) {
        $weak{$2} = $1;
@@ -287,7 +320,8 @@ sub update_funcs
        if (!$opened) {
            open(FILE, ">$mcount_s") || die "can't create $mcount_s\n";
            $opened = 1;
-           print FILE "\t.section $mcount_section,\"a\",\@progbits\n";
+           print FILE "\t.section $mcount_section,\"a\",$section_type\n";
+           print FILE "\t.align $alignment\n" if (defined($alignment));
        }
        printf FILE "\t%s %s + %d\n", $type, $ref_func, $offsets[$i] - $offset;
     }
diff --git a/scripts/trace/power.pl b/scripts/trace/power.pl
new file mode 100644 (file)
index 0000000..4f729b3
--- /dev/null
@@ -0,0 +1,108 @@
+#!/usr/bin/perl
+
+# Copyright 2008, Intel Corporation
+#
+# This file is part of the Linux kernel
+#
+# This program file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program in a file named COPYING; if not, write to the
+# Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301 USA
+#
+# Authors:
+#      Arjan van de Ven <arjan@linux.intel.com>
+
+
+#
+# This script turns a cstate ftrace output into a SVG graphic that shows
+# historic C-state information
+#
+#
+#      cat /sys/kernel/debug/tracing/trace | perl power.pl > out.svg
+#
+
+my @styles;
+my $base = 0;
+
+my @pstate_last;
+my @pstate_level;
+
+$styles[0] = "fill:rgb(0,0,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
+$styles[1] = "fill:rgb(0,255,0);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
+$styles[2] = "fill:rgb(255,0,20);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
+$styles[3] = "fill:rgb(255,255,20);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
+$styles[4] = "fill:rgb(255,0,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
+$styles[5] = "fill:rgb(0,255,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
+$styles[6] = "fill:rgb(0,128,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
+$styles[7] = "fill:rgb(0,255,128);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
+$styles[8] = "fill:rgb(0,25,20);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
+
+
+print "<?xml version=\"1.0\" standalone=\"no\"?> \n";
+print "<svg width=\"10000\" height=\"100%\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n";
+
+my $scale = 30000.0;
+while (<>) {
+       my $line = $_;
+       if ($line =~ /([0-9\.]+)\] CSTATE: Going to C([0-9]) on cpu ([0-9]+) for ([0-9\.]+)/) {
+               if ($base == 0) {
+                       $base = $1;
+               }
+               my $time = $1 - $base;
+               $time = $time * $scale;
+               my $C = $2;
+               my $cpu = $3;
+               my $y = 400 * $cpu;
+               my $duration = $4 * $scale;
+               my $msec = int($4 * 100000)/100.0;
+               my $height = $C * 20;
+               $style = $styles[$C];
+
+               $y = $y + 140 - $height;
+
+               $x2 = $time + 4;
+               $y2 = $y + 4;
+
+
+               print "<rect x=\"$time\" width=\"$duration\" y=\"$y\" height=\"$height\" style=\"$style\"/>\n";
+               print "<text transform=\"translate($x2,$y2) rotate(90)\">C$C $msec</text>\n";
+       }
+       if ($line =~ /([0-9\.]+)\] PSTATE: Going to P([0-9]) on cpu ([0-9]+)/) {
+               my $time = $1 - $base;
+               my $state = $2;
+               my $cpu = $3;
+
+               if (defined($pstate_last[$cpu])) {
+                       my $from = $pstate_last[$cpu];
+                       my $oldstate = $pstate_state[$cpu];
+                       my $duration = ($time-$from) * $scale;
+
+                       $from = $from * $scale;
+                       my $to = $from + $duration;
+                       my $height = 140 - ($oldstate * (140/8));
+
+                       my $y = 400 * $cpu + 200 + $height;
+                       my $y2 = $y+4;
+                       my $style = $styles[8];
+
+                       print "<rect x=\"$from\" y=\"$y\" width=\"$duration\" height=\"5\" style=\"$style\"/>\n";
+                       print "<text transform=\"translate($from,$y2)\">P$oldstate (cpu $cpu)</text>\n";
+               };
+
+               $pstate_last[$cpu] = $time;
+               $pstate_state[$cpu] = $state;
+       }
+}
+
+
+print "</svg>\n";
diff --git a/scripts/tracing/draw_functrace.py b/scripts/tracing/draw_functrace.py
new file mode 100644 (file)
index 0000000..902f9a9
--- /dev/null
@@ -0,0 +1,130 @@
+#!/usr/bin/python
+
+"""
+Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
+Licensed under the terms of the GNU GPL License version 2
+
+This script parses a trace provided by the function tracer in
+kernel/trace/trace_functions.c
+The resulted trace is processed into a tree to produce a more human
+view of the call stack by drawing textual but hierarchical tree of
+calls. Only the functions's names and the the call time are provided.
+
+Usage:
+       Be sure that you have CONFIG_FUNCTION_TRACER
+       # mkdir /debugfs
+       # mount -t debug debug /debug
+       # echo function > /debug/tracing/current_tracer
+       $ cat /debug/tracing/trace_pipe > ~/raw_trace_func
+       Wait some times but not too much, the script is a bit slow.
+       Break the pipe (Ctrl + Z)
+       $ scripts/draw_functrace.py < raw_trace_func > draw_functrace
+       Then you have your drawn trace in draw_functrace
+"""
+
+
+import sys, re
+
+class CallTree:
+       """ This class provides a tree representation of the functions
+               call stack. If a function has no parent in the kernel (interrupt,
+               syscall, kernel thread...) then it is attached to a virtual parent
+               called ROOT.
+       """
+       ROOT = None
+
+       def __init__(self, func, time = None, parent = None):
+               self._func = func
+               self._time = time
+               if parent is None:
+                       self._parent = CallTree.ROOT
+               else:
+                       self._parent = parent
+               self._children = []
+
+       def calls(self, func, calltime):
+               """ If a function calls another one, call this method to insert it
+                       into the tree at the appropriate place.
+                       @return: A reference to the newly created child node.
+               """
+               child = CallTree(func, calltime, self)
+               self._children.append(child)
+               return child
+
+       def getParent(self, func):
+               """ Retrieve the last parent of the current node that
+                       has the name given by func. If this function is not
+                       on a parent, then create it as new child of root
+                       @return: A reference to the parent.
+               """
+               tree = self
+               while tree != CallTree.ROOT and tree._func != func:
+                       tree = tree._parent
+               if tree == CallTree.ROOT:
+                       child = CallTree.ROOT.calls(func, None)
+                       return child
+               return tree
+
+       def __repr__(self):
+               return self.__toString("", True)
+
+       def __toString(self, branch, lastChild):
+               if self._time is not None:
+                       s = "%s----%s (%s)\n" % (branch, self._func, self._time)
+               else:
+                       s = "%s----%s\n" % (branch, self._func)
+
+               i = 0
+               if lastChild:
+                       branch = branch[:-1] + " "
+               while i < len(self._children):
+                       if i != len(self._children) - 1:
+                               s += "%s" % self._children[i].__toString(branch +\
+                                                               "    |", False)
+                       else:
+                               s += "%s" % self._children[i].__toString(branch +\
+                                                               "    |", True)
+                       i += 1
+               return s
+
+class BrokenLineException(Exception):
+       """If the last line is not complete because of the pipe breakage,
+          we want to stop the processing and ignore this line.
+       """
+       pass
+
+class CommentLineException(Exception):
+       """ If the line is a comment (as in the beginning of the trace file),
+           just ignore it.
+       """
+       pass
+
+
+def parseLine(line):
+       line = line.strip()
+       if line.startswith("#"):
+               raise CommentLineException
+       m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
+       if m is None:
+               raise BrokenLineException
+       return (m.group(1), m.group(2), m.group(3))
+
+
+def main():
+       CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
+       tree = CallTree.ROOT
+
+       for line in sys.stdin:
+               try:
+                       calltime, callee, caller = parseLine(line)
+               except BrokenLineException:
+                       break
+               except CommentLineException:
+                       continue
+               tree = tree.getParent(caller)
+               tree = tree.calls(callee, calltime)
+
+       print CallTree.ROOT
+
+if __name__ == "__main__":
+       main()