]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
ftrace: Fix breakage of set_ftrace_pid
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>
Fri, 24 Jul 2015 14:38:12 +0000 (10:38 -0400)
committerSteven Rostedt <rostedt@goodmis.org>
Fri, 24 Jul 2015 17:58:14 +0000 (13:58 -0400)
Commit 4104d326b670 ("ftrace: Remove global function list and call function
directly") simplified the ftrace code by removing the global_ops list with a
new design. But this cleanup also broke the filtering of PIDs that are added
to the set_ftrace_pid file.

Add back the proper hooks to have pid filtering working once again.

Cc: stable@vger.kernel.org # 3.16+
Reported-by: Matt Fleming <matt@console-pimps.org>
Reported-by: Richard Weinberger <richard.weinberger@gmail.com>
Tested-by: Matt Fleming <matt@console-pimps.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
include/linux/ftrace.h
kernel/trace/ftrace.c

index 1da602982cf93a6a262bef85967945ffea9c586a..6cd8c0ee4b6f89a9ab93b67515cb6e214e108071 100644 (file)
@@ -116,6 +116,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
  *            SAVE_REGS. If another ops with this flag set is already registered
  *            for any of the functions that this ops will be registered for, then
  *            this ops will fail to register or set_filter_ip.
+ * PID     - Is affected by set_ftrace_pid (allows filtering on those pids)
  */
 enum {
        FTRACE_OPS_FL_ENABLED                   = 1 << 0,
@@ -132,6 +133,7 @@ enum {
        FTRACE_OPS_FL_MODIFYING                 = 1 << 11,
        FTRACE_OPS_FL_ALLOC_TRAMP               = 1 << 12,
        FTRACE_OPS_FL_IPMODIFY                  = 1 << 13,
+       FTRACE_OPS_FL_PID                       = 1 << 14,
 };
 
 #ifdef CONFIG_DYNAMIC_FTRACE
@@ -159,6 +161,7 @@ struct ftrace_ops {
        struct ftrace_ops               *next;
        unsigned long                   flags;
        void                            *private;
+       ftrace_func_t                   saved_func;
        int __percpu                    *disabled;
 #ifdef CONFIG_DYNAMIC_FTRACE
        int                             nr_trampolines;
index 02bece4a99ea36bb835fc45a9aa55c1aedd69f9f..eb11011b5292add880af7038800560aa29c5a674 100644 (file)
@@ -98,6 +98,13 @@ struct ftrace_pid {
        struct pid *pid;
 };
 
+static bool ftrace_pids_enabled(void)
+{
+       return !list_empty(&ftrace_pids);
+}
+
+static void ftrace_update_trampoline(struct ftrace_ops *ops);
+
 /*
  * ftrace_disabled is set when an anomaly is discovered.
  * ftrace_disabled is much stronger than ftrace_enabled.
@@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock);
 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
-ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
 static struct ftrace_ops global_ops;
 static struct ftrace_ops control_ops;
 
@@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
        if (!test_tsk_trace_trace(current))
                return;
 
-       ftrace_pid_function(ip, parent_ip, op, regs);
-}
-
-static void set_ftrace_pid_function(ftrace_func_t func)
-{
-       /* do not set ftrace_pid_function to itself! */
-       if (func != ftrace_pid_func)
-               ftrace_pid_function = func;
+       op->saved_func(ip, parent_ip, op, regs);
 }
 
 /**
@@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
 void clear_ftrace_function(void)
 {
        ftrace_trace_function = ftrace_stub;
-       ftrace_pid_function = ftrace_stub;
 }
 
 static void control_ops_disable_all(struct ftrace_ops *ops)
@@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
        } else
                add_ftrace_ops(&ftrace_ops_list, ops);
 
+       /* Always save the function, and reset at unregistering */
+       ops->saved_func = ops->func;
+
+       if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
+               ops->func = ftrace_pid_func;
+
        ftrace_update_trampoline(ops);
 
        if (ftrace_enabled)
@@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
        if (ftrace_enabled)
                update_ftrace_function();
 
+       ops->func = ops->saved_func;
+
        return 0;
 }
 
 static void ftrace_update_pid_func(void)
 {
+       bool enabled = ftrace_pids_enabled();
+       struct ftrace_ops *op;
+
        /* Only do something if we are tracing something */
        if (ftrace_trace_function == ftrace_stub)
                return;
 
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               if (op->flags & FTRACE_OPS_FL_PID) {
+                       op->func = enabled ? ftrace_pid_func :
+                               op->saved_func;
+                       ftrace_update_trampoline(op);
+               }
+       } while_for_each_ftrace_op(op);
+
        update_ftrace_function();
 }
 
@@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = {
        .local_hash.filter_hash         = EMPTY_HASH,
        INIT_OPS_HASH(global_ops)
        .flags                          = FTRACE_OPS_FL_RECURSION_SAFE |
-                                         FTRACE_OPS_FL_INITIALIZED,
+                                         FTRACE_OPS_FL_INITIALIZED |
+                                         FTRACE_OPS_FL_PID,
 };
 
 /*
@@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
 
 static struct ftrace_ops global_ops = {
        .func                   = ftrace_stub,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
+                                 FTRACE_OPS_FL_INITIALIZED |
+                                 FTRACE_OPS_FL_PID,
 };
 
 static int __init ftrace_nodyn_init(void)
@@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
                if (WARN_ON(tr->ops->func != ftrace_stub))
                        printk("ftrace ops had %pS for function\n",
                               tr->ops->func);
-               /* Only the top level instance does pid tracing */
-               if (!list_empty(&ftrace_pids)) {
-                       set_ftrace_pid_function(func);
-                       func = ftrace_pid_func;
-               }
        }
        tr->ops->func = func;
        tr->ops->private = tr;
@@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
 {
        mutex_lock(&ftrace_lock);
 
-       if (list_empty(&ftrace_pids) && (!*pos))
+       if (!ftrace_pids_enabled() && (!*pos))
                return (void *) 1;
 
        return seq_list_start(&ftrace_pids, *pos);
@@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = {
        .func                   = ftrace_stub,
        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
                                   FTRACE_OPS_FL_INITIALIZED |
+                                  FTRACE_OPS_FL_PID |
                                   FTRACE_OPS_FL_STUB,
 #ifdef FTRACE_GRAPH_TRAMP_ADDR
        .trampoline             = FTRACE_GRAPH_TRAMP_ADDR,