]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
kernel/kprobes.c: convert printk to pr_foo()
authorFabian Frederick <fabf@skynet.be>
Sat, 17 May 2014 13:19:24 +0000 (23:19 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 21 May 2014 07:11:32 +0000 (17:11 +1000)
Also fixes some checkpatch warnings
-Static initialization
-Lines over 80 characters

Signed-off-by: Fabian Frederick <fabf@skynet.be>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
kernel/kprobes.c

index 3214289df5a7a8f6917718a9a00f418794efeab1..2ac9f133c4da4a25317d3e7aa5e184816eedf056 100644 (file)
@@ -76,7 +76,7 @@ static bool kprobes_all_disarmed;
 
 /* This protects kprobe_table and optimizing_list */
 static DEFINE_MUTEX(kprobe_mutex);
-static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
+static DEFINE_PER_CPU(struct kprobe *, kprobe_instance);
 static struct {
        raw_spinlock_t lock ____cacheline_aligned_in_smp;
 } kretprobe_table_locks[KPROBE_TABLE_SIZE];
@@ -297,9 +297,9 @@ static inline void reset_kprobe_instance(void)
 
 /*
  * This routine is called either:
- *     - under the kprobe_mutex - during kprobe_[un]register()
- *                             OR
- *     - with preemption disabled - from arch/xxx/kernel/kprobes.c
+ *     - under the kprobe_mutex - during kprobe_[un]register()
+ *                             OR
+ *     - with preemption disabled - from arch/xxx/kernel/kprobes.c
  */
 struct kprobe *get_kprobe(void *addr)
 {
@@ -567,7 +567,8 @@ static void wait_for_kprobe_optimizer(void)
 {
        mutex_lock(&kprobe_mutex);
 
-       while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
+       while (!list_empty(&optimizing_list) ||
+              !list_empty(&unoptimizing_list)) {
                mutex_unlock(&kprobe_mutex);
 
                /* this will also make optimizing_work execute immmediately */
@@ -676,8 +677,8 @@ static void reuse_unused_kprobe(struct kprobe *ap)
         */
        op = container_of(ap, struct optimized_kprobe, kp);
        if (unlikely(list_empty(&op->list)))
-               printk(KERN_WARNING "Warning: found a stray unused "
-                       "aggrprobe@%p\n", ap->addr);
+               pr_warn("Warning: found a stray unused aggrprobe@%p\n",
+                       ap->addr);
        /* Enable the probe again */
        ap->flags &= ~KPROBE_FLAG_DISABLED;
        /* Optimize it again (remove from op->list) */
@@ -794,7 +795,7 @@ static void optimize_all_kprobes(void)
                        if (!kprobe_disabled(p))
                                optimize_kprobe(p);
        }
-       printk(KERN_INFO "Kprobes globally optimized\n");
+       pr_info("Kprobes globally optimized\n");
 out:
        mutex_unlock(&kprobe_mutex);
 }
@@ -824,7 +825,7 @@ static void unoptimize_all_kprobes(void)
 
        /* Wait for unoptimizing completion */
        wait_for_kprobe_optimizer();
-       printk(KERN_INFO "Kprobes globally unoptimized\n");
+       pr_info("Kprobes globally unoptimized\n");
 }
 
 static DEFINE_MUTEX(kprobe_sysctl_mutex);
@@ -896,7 +897,7 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
 /* There should be no unused kprobes can be reused without optimization */
 static void reuse_unused_kprobe(struct kprobe *ap)
 {
-       printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
+       pr_err("Error: There should be no unused kprobe here.\n");
        BUG_ON(kprobe_unused(ap));
 }
 
@@ -955,7 +956,8 @@ static void disarm_kprobe_ftrace(struct kprobe *p)
        }
        ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
                           (unsigned long)p->addr, 1, 0);
-       WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
+       WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n",
+            p->addr, ret);
 }
 #else  /* !CONFIG_KPROBES_ON_FTRACE */
 #define prepare_kprobe(p)      arch_prepare_kprobe(p)
@@ -1389,7 +1391,7 @@ static struct kprobe *__get_valid_kprobe(struct kprobe *p)
        if (p != ap) {
                list_for_each_entry_rcu(list_p, &ap->list, list)
                        if (list_p == p)
-                       /* kprobe p is a valid probe */
+                               /* kprobe p is a valid probe */
                                goto valid;
                return NULL;
        }
@@ -2018,8 +2020,8 @@ EXPORT_SYMBOL_GPL(enable_kprobe);
 
 void dump_kprobe(struct kprobe *kp)
 {
-       printk(KERN_WARNING "Dumping kprobe:\n");
-       printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
+       pr_warn("Dumping kprobe:\n");
+       pr_warn("Name: %s\nAddress: %p\nOffset: %x\n",
               kp->symbol_name, kp->addr, kp->offset);
 }
 NOKPROBE_SYMBOL(dump_kprobe);
@@ -2128,7 +2130,7 @@ static int __init init_kprobes(void)
                        kprobe_lookup_name(kretprobe_blacklist[i].name,
                                           kretprobe_blacklist[i].addr);
                        if (!kretprobe_blacklist[i].addr)
-                               printk("kretprobe: lookup failed: %s\n",
+                               pr_warn("kretprobe: lookup failed: %s\n",
                                       kretprobe_blacklist[i].name);
                }
        }
@@ -2310,7 +2312,7 @@ static void arm_all_kprobes(void)
        }
 
        kprobes_all_disarmed = false;
-       printk(KERN_INFO "Kprobes globally enabled\n");
+       pr_info("Kprobes globally enabled\n");
 
 already_enabled:
        mutex_unlock(&kprobe_mutex);
@@ -2332,7 +2334,7 @@ static void disarm_all_kprobes(void)
        }
 
        kprobes_all_disarmed = true;
-       printk(KERN_INFO "Kprobes globally disabled\n");
+       pr_info("Kprobes globally disabled\n");
 
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];