]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - kernel/locking/locktorture.c
Merge branch 'ras-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[karo-tx-linux.git] / kernel / locking / locktorture.c
index 32244186f1f2ae0e7a6343ad084f416aa0cda055..8ef1919d63b2401a4832404d92c598c3e656b090 100644 (file)
  *
  * Copyright (C) IBM Corporation, 2014
  *
- * Author: Paul E. McKenney <paulmck@us.ibm.com>
+ * Authors: Paul E. McKenney <paulmck@us.ibm.com>
+ *          Davidlohr Bueso <dave@stgolabs.net>
  *     Based on kernel/rcu/torture.c.
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/kthread.h>
+#include <linux/sched/rt.h>
 #include <linux/spinlock.h>
 #include <linux/rwlock.h>
 #include <linux/mutex.h>
@@ -34,6 +36,7 @@
 #include <linux/moduleparam.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
+#include <linux/percpu-rwsem.h>
 #include <linux/torture.h>
 
 MODULE_LICENSE("GPL");
@@ -91,11 +94,13 @@ struct lock_torture_ops {
        void (*init)(void);
        int (*writelock)(void);
        void (*write_delay)(struct torture_random_state *trsp);
+       void (*task_boost)(struct torture_random_state *trsp);
        void (*writeunlock)(void);
        int (*readlock)(void);
        void (*read_delay)(struct torture_random_state *trsp);
        void (*readunlock)(void);
-       unsigned long flags;
+
+       unsigned long flags; /* for irq spinlocks */
        const char *name;
 };
 
@@ -139,9 +144,15 @@ static void torture_lock_busted_write_unlock(void)
          /* BUGGY, do not use in real life!!! */
 }
 
+static void torture_boost_dummy(struct torture_random_state *trsp)
+{
+       /* Only rtmutexes care about priority */
+}
+
 static struct lock_torture_ops lock_busted_ops = {
        .writelock      = torture_lock_busted_write_lock,
        .write_delay    = torture_lock_busted_write_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_lock_busted_write_unlock,
        .readlock       = NULL,
        .read_delay     = NULL,
@@ -185,6 +196,7 @@ static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
 static struct lock_torture_ops spin_lock_ops = {
        .writelock      = torture_spin_lock_write_lock,
        .write_delay    = torture_spin_lock_write_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_spin_lock_write_unlock,
        .readlock       = NULL,
        .read_delay     = NULL,
@@ -211,6 +223,7 @@ __releases(torture_spinlock)
 static struct lock_torture_ops spin_lock_irq_ops = {
        .writelock      = torture_spin_lock_write_lock_irq,
        .write_delay    = torture_spin_lock_write_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_lock_spin_write_unlock_irq,
        .readlock       = NULL,
        .read_delay     = NULL,
@@ -275,6 +288,7 @@ static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
 static struct lock_torture_ops rw_lock_ops = {
        .writelock      = torture_rwlock_write_lock,
        .write_delay    = torture_rwlock_write_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_rwlock_write_unlock,
        .readlock       = torture_rwlock_read_lock,
        .read_delay     = torture_rwlock_read_delay,
@@ -315,6 +329,7 @@ __releases(torture_rwlock)
 static struct lock_torture_ops rw_lock_irq_ops = {
        .writelock      = torture_rwlock_write_lock_irq,
        .write_delay    = torture_rwlock_write_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_rwlock_write_unlock_irq,
        .readlock       = torture_rwlock_read_lock_irq,
        .read_delay     = torture_rwlock_read_delay,
@@ -354,6 +369,7 @@ static void torture_mutex_unlock(void) __releases(torture_mutex)
 static struct lock_torture_ops mutex_lock_ops = {
        .writelock      = torture_mutex_lock,
        .write_delay    = torture_mutex_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_mutex_unlock,
        .readlock       = NULL,
        .read_delay     = NULL,
@@ -361,6 +377,90 @@ static struct lock_torture_ops mutex_lock_ops = {
        .name           = "mutex_lock"
 };
 
+#ifdef CONFIG_RT_MUTEXES
+static DEFINE_RT_MUTEX(torture_rtmutex);
+
+static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
+{
+       rt_mutex_lock(&torture_rtmutex);
+       return 0;
+}
+
+static void torture_rtmutex_boost(struct torture_random_state *trsp)
+{
+       int policy;
+       struct sched_param param;
+       const unsigned int factor = 50000; /* yes, quite arbitrary */
+
+       if (!rt_task(current)) {
+               /*
+                * (1) Boost priority once every ~50k operations. When the
+                * task tries to take the lock, the rtmutex it will account
+                * for the new priority, and do any corresponding pi-dance.
+                */
+               if (!(torture_random(trsp) %
+                     (cxt.nrealwriters_stress * factor))) {
+                       policy = SCHED_FIFO;
+                       param.sched_priority = MAX_RT_PRIO - 1;
+               } else /* common case, do nothing */
+                       return;
+       } else {
+               /*
+                * The task will remain boosted for another ~500k operations,
+                * then restored back to its original prio, and so forth.
+                *
+                * When @trsp is nil, we want to force-reset the task for
+                * stopping the kthread.
+                */
+               if (!trsp || !(torture_random(trsp) %
+                              (cxt.nrealwriters_stress * factor * 2))) {
+                       policy = SCHED_NORMAL;
+                       param.sched_priority = 0;
+               } else /* common case, do nothing */
+                       return;
+       }
+
+       sched_setscheduler_nocheck(current, policy, &param);
+}
+
+static void torture_rtmutex_delay(struct torture_random_state *trsp)
+{
+       const unsigned long shortdelay_us = 2;
+       const unsigned long longdelay_ms = 100;
+
+       /*
+        * We want a short delay mostly to emulate likely code, and
+        * we want a long delay occasionally to force massive contention.
+        */
+       if (!(torture_random(trsp) %
+             (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
+               mdelay(longdelay_ms);
+       if (!(torture_random(trsp) %
+             (cxt.nrealwriters_stress * 2 * shortdelay_us)))
+               udelay(shortdelay_us);
+#ifdef CONFIG_PREEMPT
+       if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
+               preempt_schedule();  /* Allow test to be preempted. */
+#endif
+}
+
+static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
+{
+       rt_mutex_unlock(&torture_rtmutex);
+}
+
+static struct lock_torture_ops rtmutex_lock_ops = {
+       .writelock      = torture_rtmutex_lock,
+       .write_delay    = torture_rtmutex_delay,
+       .task_boost     = torture_rtmutex_boost,
+       .writeunlock    = torture_rtmutex_unlock,
+       .readlock       = NULL,
+       .read_delay     = NULL,
+       .readunlock     = NULL,
+       .name           = "rtmutex_lock"
+};
+#endif
+
 static DECLARE_RWSEM(torture_rwsem);
 static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
 {
@@ -419,6 +519,7 @@ static void torture_rwsem_up_read(void) __releases(torture_rwsem)
 static struct lock_torture_ops rwsem_lock_ops = {
        .writelock      = torture_rwsem_down_write,
        .write_delay    = torture_rwsem_write_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_rwsem_up_write,
        .readlock       = torture_rwsem_down_read,
        .read_delay     = torture_rwsem_read_delay,
@@ -426,6 +527,48 @@ static struct lock_torture_ops rwsem_lock_ops = {
        .name           = "rwsem_lock"
 };
 
+#include <linux/percpu-rwsem.h>
+static struct percpu_rw_semaphore pcpu_rwsem;
+
+void torture_percpu_rwsem_init(void)
+{
+       BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
+}
+
+static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem)
+{
+       percpu_down_write(&pcpu_rwsem);
+       return 0;
+}
+
+static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem)
+{
+       percpu_up_write(&pcpu_rwsem);
+}
+
+static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem)
+{
+       percpu_down_read(&pcpu_rwsem);
+       return 0;
+}
+
+static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem)
+{
+       percpu_up_read(&pcpu_rwsem);
+}
+
+static struct lock_torture_ops percpu_rwsem_lock_ops = {
+       .init           = torture_percpu_rwsem_init,
+       .writelock      = torture_percpu_rwsem_down_write,
+       .write_delay    = torture_rwsem_write_delay,
+       .task_boost     = torture_boost_dummy,
+       .writeunlock    = torture_percpu_rwsem_up_write,
+       .readlock       = torture_percpu_rwsem_down_read,
+       .read_delay     = torture_rwsem_read_delay,
+       .readunlock     = torture_percpu_rwsem_up_read,
+       .name           = "percpu_rwsem_lock"
+};
+
 /*
  * Lock torture writer kthread.  Repeatedly acquires and releases
  * the lock, checking for duplicate acquisitions.
@@ -442,6 +585,7 @@ static int lock_torture_writer(void *arg)
                if ((torture_random(&rand) & 0xfffff) == 0)
                        schedule_timeout_uninterruptible(1);
 
+               cxt.cur_ops->task_boost(&rand);
                cxt.cur_ops->writelock();
                if (WARN_ON_ONCE(lock_is_write_held))
                        lwsp->n_lock_fail++;
@@ -456,6 +600,8 @@ static int lock_torture_writer(void *arg)
 
                stutter_wait("lock_torture_writer");
        } while (!torture_must_stop());
+
+       cxt.cur_ops->task_boost(NULL); /* reset prio */
        torture_kthread_stopping("lock_torture_writer");
        return 0;
 }
@@ -642,7 +788,11 @@ static int __init lock_torture_init(void)
                &spin_lock_ops, &spin_lock_irq_ops,
                &rw_lock_ops, &rw_lock_irq_ops,
                &mutex_lock_ops,
+#ifdef CONFIG_RT_MUTEXES
+               &rtmutex_lock_ops,
+#endif
                &rwsem_lock_ops,
+               &percpu_rwsem_lock_ops,
        };
 
        if (!torture_init_begin(torture_type, verbose, &torture_runnable))
@@ -661,11 +811,11 @@ static int __init lock_torture_init(void)
                for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
                        pr_alert(" %s", torture_ops[i]->name);
                pr_alert("\n");
-               torture_init_end();
-               return -EINVAL;
+               firsterr = -EINVAL;
+               goto unwind;
        }
        if (cxt.cur_ops->init)
-               cxt.cur_ops->init(); /* no "goto unwind" prior to this point!!! */
+               cxt.cur_ops->init();
 
        if (nwriters_stress >= 0)
                cxt.nrealwriters_stress = nwriters_stress;
@@ -676,6 +826,10 @@ static int __init lock_torture_init(void)
        if (strncmp(torture_type, "mutex", 5) == 0)
                cxt.debug_lock = true;
 #endif
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+       if (strncmp(torture_type, "rtmutex", 7) == 0)
+               cxt.debug_lock = true;
+#endif
 #ifdef CONFIG_DEBUG_SPINLOCK
        if ((strncmp(torture_type, "spin", 4) == 0) ||
            (strncmp(torture_type, "rw_lock", 7) == 0))