]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - kernel/locking/locktorture.c
locktorture: Support rtmutex torturing
[karo-tx-linux.git] / kernel / locking / locktorture.c
1 /*
2  * Module-based torture test facility for locking
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, you can access it online at
16  * http://www.gnu.org/licenses/gpl-2.0.html.
17  *
18  * Copyright (C) IBM Corporation, 2014
19  *
20  * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21  *          Davidlohr Bueso <dave@stgolabs.net>
22  *      Based on kernel/rcu/torture.c.
23  */
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/kthread.h>
27 #include <linux/sched/rt.h>
28 #include <linux/spinlock.h>
29 #include <linux/rwlock.h>
30 #include <linux/mutex.h>
31 #include <linux/rwsem.h>
32 #include <linux/smp.h>
33 #include <linux/interrupt.h>
34 #include <linux/sched.h>
35 #include <linux/atomic.h>
36 #include <linux/moduleparam.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39 #include <linux/torture.h>
40
41 MODULE_LICENSE("GPL");
42 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
43
44 torture_param(int, nwriters_stress, -1,
45              "Number of write-locking stress-test threads");
46 torture_param(int, nreaders_stress, -1,
47              "Number of read-locking stress-test threads");
48 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
49 torture_param(int, onoff_interval, 0,
50              "Time between CPU hotplugs (s), 0=disable");
51 torture_param(int, shuffle_interval, 3,
52              "Number of jiffies between shuffles, 0=disable");
53 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
54 torture_param(int, stat_interval, 60,
55              "Number of seconds between stats printk()s");
56 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
57 torture_param(bool, verbose, true,
58              "Enable verbose debugging printk()s");
59
60 static char *torture_type = "spin_lock";
61 module_param(torture_type, charp, 0444);
62 MODULE_PARM_DESC(torture_type,
63                  "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
64
65 static struct task_struct *stats_task;
66 static struct task_struct **writer_tasks;
67 static struct task_struct **reader_tasks;
68
69 static bool lock_is_write_held;
70 static bool lock_is_read_held;
71
72 struct lock_stress_stats {
73         long n_lock_fail;
74         long n_lock_acquired;
75 };
76
77 #if defined(MODULE)
78 #define LOCKTORTURE_RUNNABLE_INIT 1
79 #else
80 #define LOCKTORTURE_RUNNABLE_INIT 0
81 #endif
82 int torture_runnable = LOCKTORTURE_RUNNABLE_INIT;
83 module_param(torture_runnable, int, 0444);
84 MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init");
85
86 /* Forward reference. */
87 static void lock_torture_cleanup(void);
88
89 /*
90  * Operations vector for selecting different types of tests.
91  */
92 struct lock_torture_ops {
93         void (*init)(void);
94         int (*writelock)(void);
95         void (*write_delay)(struct torture_random_state *trsp);
96         void (*task_boost)(struct torture_random_state *trsp);
97         void (*writeunlock)(void);
98         int (*readlock)(void);
99         void (*read_delay)(struct torture_random_state *trsp);
100         void (*readunlock)(void);
101
102         unsigned long flags; /* for irq spinlocks */
103         const char *name;
104 };
105
106 struct lock_torture_cxt {
107         int nrealwriters_stress;
108         int nrealreaders_stress;
109         bool debug_lock;
110         atomic_t n_lock_torture_errors;
111         struct lock_torture_ops *cur_ops;
112         struct lock_stress_stats *lwsa; /* writer statistics */
113         struct lock_stress_stats *lrsa; /* reader statistics */
114 };
115 static struct lock_torture_cxt cxt = { 0, 0, false,
116                                        ATOMIC_INIT(0),
117                                        NULL, NULL};
118 /*
119  * Definitions for lock torture testing.
120  */
121
122 static int torture_lock_busted_write_lock(void)
123 {
124         return 0;  /* BUGGY, do not use in real life!!! */
125 }
126
127 static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
128 {
129         const unsigned long longdelay_ms = 100;
130
131         /* We want a long delay occasionally to force massive contention.  */
132         if (!(torture_random(trsp) %
133               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
134                 mdelay(longdelay_ms);
135 #ifdef CONFIG_PREEMPT
136         if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
137                 preempt_schedule();  /* Allow test to be preempted. */
138 #endif
139 }
140
141 static void torture_lock_busted_write_unlock(void)
142 {
143           /* BUGGY, do not use in real life!!! */
144 }
145
146 static void torture_boost_dummy(struct torture_random_state *trsp)
147 {
148         /* Only rtmutexes care about priority */
149 }
150
151 static struct lock_torture_ops lock_busted_ops = {
152         .writelock      = torture_lock_busted_write_lock,
153         .write_delay    = torture_lock_busted_write_delay,
154         .task_boost     = torture_boost_dummy,
155         .writeunlock    = torture_lock_busted_write_unlock,
156         .readlock       = NULL,
157         .read_delay     = NULL,
158         .readunlock     = NULL,
159         .name           = "lock_busted"
160 };
161
162 static DEFINE_SPINLOCK(torture_spinlock);
163
164 static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
165 {
166         spin_lock(&torture_spinlock);
167         return 0;
168 }
169
170 static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
171 {
172         const unsigned long shortdelay_us = 2;
173         const unsigned long longdelay_ms = 100;
174
175         /* We want a short delay mostly to emulate likely code, and
176          * we want a long delay occasionally to force massive contention.
177          */
178         if (!(torture_random(trsp) %
179               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
180                 mdelay(longdelay_ms);
181         if (!(torture_random(trsp) %
182               (cxt.nrealwriters_stress * 2 * shortdelay_us)))
183                 udelay(shortdelay_us);
184 #ifdef CONFIG_PREEMPT
185         if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
186                 preempt_schedule();  /* Allow test to be preempted. */
187 #endif
188 }
189
190 static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
191 {
192         spin_unlock(&torture_spinlock);
193 }
194
195 static struct lock_torture_ops spin_lock_ops = {
196         .writelock      = torture_spin_lock_write_lock,
197         .write_delay    = torture_spin_lock_write_delay,
198         .task_boost     = torture_boost_dummy,
199         .writeunlock    = torture_spin_lock_write_unlock,
200         .readlock       = NULL,
201         .read_delay     = NULL,
202         .readunlock     = NULL,
203         .name           = "spin_lock"
204 };
205
206 static int torture_spin_lock_write_lock_irq(void)
207 __acquires(torture_spinlock)
208 {
209         unsigned long flags;
210
211         spin_lock_irqsave(&torture_spinlock, flags);
212         cxt.cur_ops->flags = flags;
213         return 0;
214 }
215
216 static void torture_lock_spin_write_unlock_irq(void)
217 __releases(torture_spinlock)
218 {
219         spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
220 }
221
222 static struct lock_torture_ops spin_lock_irq_ops = {
223         .writelock      = torture_spin_lock_write_lock_irq,
224         .write_delay    = torture_spin_lock_write_delay,
225         .task_boost     = torture_boost_dummy,
226         .writeunlock    = torture_lock_spin_write_unlock_irq,
227         .readlock       = NULL,
228         .read_delay     = NULL,
229         .readunlock     = NULL,
230         .name           = "spin_lock_irq"
231 };
232
233 static DEFINE_RWLOCK(torture_rwlock);
234
235 static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
236 {
237         write_lock(&torture_rwlock);
238         return 0;
239 }
240
241 static void torture_rwlock_write_delay(struct torture_random_state *trsp)
242 {
243         const unsigned long shortdelay_us = 2;
244         const unsigned long longdelay_ms = 100;
245
246         /* We want a short delay mostly to emulate likely code, and
247          * we want a long delay occasionally to force massive contention.
248          */
249         if (!(torture_random(trsp) %
250               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
251                 mdelay(longdelay_ms);
252         else
253                 udelay(shortdelay_us);
254 }
255
256 static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
257 {
258         write_unlock(&torture_rwlock);
259 }
260
261 static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
262 {
263         read_lock(&torture_rwlock);
264         return 0;
265 }
266
267 static void torture_rwlock_read_delay(struct torture_random_state *trsp)
268 {
269         const unsigned long shortdelay_us = 10;
270         const unsigned long longdelay_ms = 100;
271
272         /* We want a short delay mostly to emulate likely code, and
273          * we want a long delay occasionally to force massive contention.
274          */
275         if (!(torture_random(trsp) %
276               (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
277                 mdelay(longdelay_ms);
278         else
279                 udelay(shortdelay_us);
280 }
281
282 static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
283 {
284         read_unlock(&torture_rwlock);
285 }
286
287 static struct lock_torture_ops rw_lock_ops = {
288         .writelock      = torture_rwlock_write_lock,
289         .write_delay    = torture_rwlock_write_delay,
290         .task_boost     = torture_boost_dummy,
291         .writeunlock    = torture_rwlock_write_unlock,
292         .readlock       = torture_rwlock_read_lock,
293         .read_delay     = torture_rwlock_read_delay,
294         .readunlock     = torture_rwlock_read_unlock,
295         .name           = "rw_lock"
296 };
297
298 static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
299 {
300         unsigned long flags;
301
302         write_lock_irqsave(&torture_rwlock, flags);
303         cxt.cur_ops->flags = flags;
304         return 0;
305 }
306
307 static void torture_rwlock_write_unlock_irq(void)
308 __releases(torture_rwlock)
309 {
310         write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
311 }
312
313 static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
314 {
315         unsigned long flags;
316
317         read_lock_irqsave(&torture_rwlock, flags);
318         cxt.cur_ops->flags = flags;
319         return 0;
320 }
321
322 static void torture_rwlock_read_unlock_irq(void)
323 __releases(torture_rwlock)
324 {
325         read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
326 }
327
328 static struct lock_torture_ops rw_lock_irq_ops = {
329         .writelock      = torture_rwlock_write_lock_irq,
330         .write_delay    = torture_rwlock_write_delay,
331         .task_boost     = torture_boost_dummy,
332         .writeunlock    = torture_rwlock_write_unlock_irq,
333         .readlock       = torture_rwlock_read_lock_irq,
334         .read_delay     = torture_rwlock_read_delay,
335         .readunlock     = torture_rwlock_read_unlock_irq,
336         .name           = "rw_lock_irq"
337 };
338
339 static DEFINE_MUTEX(torture_mutex);
340
341 static int torture_mutex_lock(void) __acquires(torture_mutex)
342 {
343         mutex_lock(&torture_mutex);
344         return 0;
345 }
346
347 static void torture_mutex_delay(struct torture_random_state *trsp)
348 {
349         const unsigned long longdelay_ms = 100;
350
351         /* We want a long delay occasionally to force massive contention.  */
352         if (!(torture_random(trsp) %
353               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
354                 mdelay(longdelay_ms * 5);
355         else
356                 mdelay(longdelay_ms / 5);
357 #ifdef CONFIG_PREEMPT
358         if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
359                 preempt_schedule();  /* Allow test to be preempted. */
360 #endif
361 }
362
363 static void torture_mutex_unlock(void) __releases(torture_mutex)
364 {
365         mutex_unlock(&torture_mutex);
366 }
367
368 static struct lock_torture_ops mutex_lock_ops = {
369         .writelock      = torture_mutex_lock,
370         .write_delay    = torture_mutex_delay,
371         .task_boost     = torture_boost_dummy,
372         .writeunlock    = torture_mutex_unlock,
373         .readlock       = NULL,
374         .read_delay     = NULL,
375         .readunlock     = NULL,
376         .name           = "mutex_lock"
377 };
378
379 #ifdef CONFIG_RT_MUTEXES
380 static DEFINE_RT_MUTEX(torture_rtmutex);
381
382 static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
383 {
384         rt_mutex_lock(&torture_rtmutex);
385         return 0;
386 }
387
388 static void torture_rtmutex_boost(struct torture_random_state *trsp)
389 {
390         int policy;
391         struct sched_param param;
392         const unsigned int factor = 50000; /* yes, quite arbitrary */
393
394         if (!rt_task(current)) {
395                 /*
396                  * (1) Boost priority once every ~50k operations. When the
397                  * task tries to take the lock, the rtmutex it will account
398                  * for the new priority, and do any corresponding pi-dance.
399                  */
400                 if (!(torture_random(trsp) %
401                       (cxt.nrealwriters_stress * factor))) {
402                         policy = SCHED_FIFO;
403                         param.sched_priority = MAX_RT_PRIO - 1;
404                 } else /* common case, do nothing */
405                         return;
406         } else {
407                 /*
408                  * The task will remain boosted for another ~500k operations,
409                  * then restored back to its original prio, and so forth.
410                  *
411                  * When @trsp is nil, we want to force-reset the task for
412                  * stopping the kthread.
413                  */
414                 if (!trsp || !(torture_random(trsp) %
415                                (cxt.nrealwriters_stress * factor * 2))) {
416                         policy = SCHED_NORMAL;
417                         param.sched_priority = 0;
418                 } else /* common case, do nothing */
419                         return;
420         }
421
422         sched_setscheduler_nocheck(current, policy, &param);
423 }
424
425 static void torture_rtmutex_delay(struct torture_random_state *trsp)
426 {
427         const unsigned long shortdelay_us = 2;
428         const unsigned long longdelay_ms = 100;
429
430         /*
431          * We want a short delay mostly to emulate likely code, and
432          * we want a long delay occasionally to force massive contention.
433          */
434         if (!(torture_random(trsp) %
435               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
436                 mdelay(longdelay_ms);
437         if (!(torture_random(trsp) %
438               (cxt.nrealwriters_stress * 2 * shortdelay_us)))
439                 udelay(shortdelay_us);
440 #ifdef CONFIG_PREEMPT
441         if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
442                 preempt_schedule();  /* Allow test to be preempted. */
443 #endif
444 }
445
446 static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
447 {
448         rt_mutex_unlock(&torture_rtmutex);
449 }
450
451 static struct lock_torture_ops rtmutex_lock_ops = {
452         .writelock      = torture_rtmutex_lock,
453         .write_delay    = torture_rtmutex_delay,
454         .task_boost     = torture_rtmutex_boost,
455         .writeunlock    = torture_rtmutex_unlock,
456         .readlock       = NULL,
457         .read_delay     = NULL,
458         .readunlock     = NULL,
459         .name           = "rtmutex_lock"
460 };
461 #endif
462
463 static DECLARE_RWSEM(torture_rwsem);
464 static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
465 {
466         down_write(&torture_rwsem);
467         return 0;
468 }
469
470 static void torture_rwsem_write_delay(struct torture_random_state *trsp)
471 {
472         const unsigned long longdelay_ms = 100;
473
474         /* We want a long delay occasionally to force massive contention.  */
475         if (!(torture_random(trsp) %
476               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
477                 mdelay(longdelay_ms * 10);
478         else
479                 mdelay(longdelay_ms / 10);
480 #ifdef CONFIG_PREEMPT
481         if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
482                 preempt_schedule();  /* Allow test to be preempted. */
483 #endif
484 }
485
486 static void torture_rwsem_up_write(void) __releases(torture_rwsem)
487 {
488         up_write(&torture_rwsem);
489 }
490
491 static int torture_rwsem_down_read(void) __acquires(torture_rwsem)
492 {
493         down_read(&torture_rwsem);
494         return 0;
495 }
496
497 static void torture_rwsem_read_delay(struct torture_random_state *trsp)
498 {
499         const unsigned long longdelay_ms = 100;
500
501         /* We want a long delay occasionally to force massive contention.  */
502         if (!(torture_random(trsp) %
503               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
504                 mdelay(longdelay_ms * 2);
505         else
506                 mdelay(longdelay_ms / 2);
507 #ifdef CONFIG_PREEMPT
508         if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
509                 preempt_schedule();  /* Allow test to be preempted. */
510 #endif
511 }
512
513 static void torture_rwsem_up_read(void) __releases(torture_rwsem)
514 {
515         up_read(&torture_rwsem);
516 }
517
518 static struct lock_torture_ops rwsem_lock_ops = {
519         .writelock      = torture_rwsem_down_write,
520         .write_delay    = torture_rwsem_write_delay,
521         .task_boost     = torture_boost_dummy,
522         .writeunlock    = torture_rwsem_up_write,
523         .readlock       = torture_rwsem_down_read,
524         .read_delay     = torture_rwsem_read_delay,
525         .readunlock     = torture_rwsem_up_read,
526         .name           = "rwsem_lock"
527 };
528
529 /*
530  * Lock torture writer kthread.  Repeatedly acquires and releases
531  * the lock, checking for duplicate acquisitions.
532  */
533 static int lock_torture_writer(void *arg)
534 {
535         struct lock_stress_stats *lwsp = arg;
536         static DEFINE_TORTURE_RANDOM(rand);
537
538         VERBOSE_TOROUT_STRING("lock_torture_writer task started");
539         set_user_nice(current, MAX_NICE);
540
541         do {
542                 if ((torture_random(&rand) & 0xfffff) == 0)
543                         schedule_timeout_uninterruptible(1);
544
545                 cxt.cur_ops->task_boost(&rand);
546                 cxt.cur_ops->writelock();
547                 if (WARN_ON_ONCE(lock_is_write_held))
548                         lwsp->n_lock_fail++;
549                 lock_is_write_held = 1;
550                 if (WARN_ON_ONCE(lock_is_read_held))
551                         lwsp->n_lock_fail++; /* rare, but... */
552
553                 lwsp->n_lock_acquired++;
554                 cxt.cur_ops->write_delay(&rand);
555                 lock_is_write_held = 0;
556                 cxt.cur_ops->writeunlock();
557
558                 stutter_wait("lock_torture_writer");
559         } while (!torture_must_stop());
560
561         cxt.cur_ops->task_boost(NULL); /* reset prio */
562         torture_kthread_stopping("lock_torture_writer");
563         return 0;
564 }
565
566 /*
567  * Lock torture reader kthread.  Repeatedly acquires and releases
568  * the reader lock.
569  */
570 static int lock_torture_reader(void *arg)
571 {
572         struct lock_stress_stats *lrsp = arg;
573         static DEFINE_TORTURE_RANDOM(rand);
574
575         VERBOSE_TOROUT_STRING("lock_torture_reader task started");
576         set_user_nice(current, MAX_NICE);
577
578         do {
579                 if ((torture_random(&rand) & 0xfffff) == 0)
580                         schedule_timeout_uninterruptible(1);
581
582                 cxt.cur_ops->readlock();
583                 lock_is_read_held = 1;
584                 if (WARN_ON_ONCE(lock_is_write_held))
585                         lrsp->n_lock_fail++; /* rare, but... */
586
587                 lrsp->n_lock_acquired++;
588                 cxt.cur_ops->read_delay(&rand);
589                 lock_is_read_held = 0;
590                 cxt.cur_ops->readunlock();
591
592                 stutter_wait("lock_torture_reader");
593         } while (!torture_must_stop());
594         torture_kthread_stopping("lock_torture_reader");
595         return 0;
596 }
597
598 /*
599  * Create an lock-torture-statistics message in the specified buffer.
600  */
601 static void __torture_print_stats(char *page,
602                                   struct lock_stress_stats *statp, bool write)
603 {
604         bool fail = 0;
605         int i, n_stress;
606         long max = 0;
607         long min = statp[0].n_lock_acquired;
608         long long sum = 0;
609
610         n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
611         for (i = 0; i < n_stress; i++) {
612                 if (statp[i].n_lock_fail)
613                         fail = true;
614                 sum += statp[i].n_lock_acquired;
615                 if (max < statp[i].n_lock_fail)
616                         max = statp[i].n_lock_fail;
617                 if (min > statp[i].n_lock_fail)
618                         min = statp[i].n_lock_fail;
619         }
620         page += sprintf(page,
621                         "%s:  Total: %lld  Max/Min: %ld/%ld %s  Fail: %d %s\n",
622                         write ? "Writes" : "Reads ",
623                         sum, max, min, max / 2 > min ? "???" : "",
624                         fail, fail ? "!!!" : "");
625         if (fail)
626                 atomic_inc(&cxt.n_lock_torture_errors);
627 }
628
629 /*
630  * Print torture statistics.  Caller must ensure that there is only one
631  * call to this function at a given time!!!  This is normally accomplished
632  * by relying on the module system to only have one copy of the module
633  * loaded, and then by giving the lock_torture_stats kthread full control
634  * (or the init/cleanup functions when lock_torture_stats thread is not
635  * running).
636  */
637 static void lock_torture_stats_print(void)
638 {
639         int size = cxt.nrealwriters_stress * 200 + 8192;
640         char *buf;
641
642         if (cxt.cur_ops->readlock)
643                 size += cxt.nrealreaders_stress * 200 + 8192;
644
645         buf = kmalloc(size, GFP_KERNEL);
646         if (!buf) {
647                 pr_err("lock_torture_stats_print: Out of memory, need: %d",
648                        size);
649                 return;
650         }
651
652         __torture_print_stats(buf, cxt.lwsa, true);
653         pr_alert("%s", buf);
654         kfree(buf);
655
656         if (cxt.cur_ops->readlock) {
657                 buf = kmalloc(size, GFP_KERNEL);
658                 if (!buf) {
659                         pr_err("lock_torture_stats_print: Out of memory, need: %d",
660                                size);
661                         return;
662                 }
663
664                 __torture_print_stats(buf, cxt.lrsa, false);
665                 pr_alert("%s", buf);
666                 kfree(buf);
667         }
668 }
669
670 /*
671  * Periodically prints torture statistics, if periodic statistics printing
672  * was specified via the stat_interval module parameter.
673  *
674  * No need to worry about fullstop here, since this one doesn't reference
675  * volatile state or register callbacks.
676  */
677 static int lock_torture_stats(void *arg)
678 {
679         VERBOSE_TOROUT_STRING("lock_torture_stats task started");
680         do {
681                 schedule_timeout_interruptible(stat_interval * HZ);
682                 lock_torture_stats_print();
683                 torture_shutdown_absorb("lock_torture_stats");
684         } while (!torture_must_stop());
685         torture_kthread_stopping("lock_torture_stats");
686         return 0;
687 }
688
689 static inline void
690 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
691                                 const char *tag)
692 {
693         pr_alert("%s" TORTURE_FLAG
694                  "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
695                  torture_type, tag, cxt.debug_lock ? " [debug]": "",
696                  cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
697                  verbose, shuffle_interval, stutter, shutdown_secs,
698                  onoff_interval, onoff_holdoff);
699 }
700
701 static void lock_torture_cleanup(void)
702 {
703         int i;
704
705         if (torture_cleanup_begin())
706                 return;
707
708         if (writer_tasks) {
709                 for (i = 0; i < cxt.nrealwriters_stress; i++)
710                         torture_stop_kthread(lock_torture_writer,
711                                              writer_tasks[i]);
712                 kfree(writer_tasks);
713                 writer_tasks = NULL;
714         }
715
716         if (reader_tasks) {
717                 for (i = 0; i < cxt.nrealreaders_stress; i++)
718                         torture_stop_kthread(lock_torture_reader,
719                                              reader_tasks[i]);
720                 kfree(reader_tasks);
721                 reader_tasks = NULL;
722         }
723
724         torture_stop_kthread(lock_torture_stats, stats_task);
725         lock_torture_stats_print();  /* -After- the stats thread is stopped! */
726
727         if (atomic_read(&cxt.n_lock_torture_errors))
728                 lock_torture_print_module_parms(cxt.cur_ops,
729                                                 "End of test: FAILURE");
730         else if (torture_onoff_failures())
731                 lock_torture_print_module_parms(cxt.cur_ops,
732                                                 "End of test: LOCK_HOTPLUG");
733         else
734                 lock_torture_print_module_parms(cxt.cur_ops,
735                                                 "End of test: SUCCESS");
736         torture_cleanup_end();
737 }
738
739 static int __init lock_torture_init(void)
740 {
741         int i, j;
742         int firsterr = 0;
743         static struct lock_torture_ops *torture_ops[] = {
744                 &lock_busted_ops,
745                 &spin_lock_ops, &spin_lock_irq_ops,
746                 &rw_lock_ops, &rw_lock_irq_ops,
747                 &mutex_lock_ops,
748 #ifdef CONFIG_RT_MUTEXES
749                 &rtmutex_lock_ops,
750 #endif
751                 &rwsem_lock_ops,
752         };
753
754         if (!torture_init_begin(torture_type, verbose, &torture_runnable))
755                 return -EBUSY;
756
757         /* Process args and tell the world that the torturer is on the job. */
758         for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
759                 cxt.cur_ops = torture_ops[i];
760                 if (strcmp(torture_type, cxt.cur_ops->name) == 0)
761                         break;
762         }
763         if (i == ARRAY_SIZE(torture_ops)) {
764                 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
765                          torture_type);
766                 pr_alert("lock-torture types:");
767                 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
768                         pr_alert(" %s", torture_ops[i]->name);
769                 pr_alert("\n");
770                 torture_init_end();
771                 return -EINVAL;
772         }
773         if (cxt.cur_ops->init)
774                 cxt.cur_ops->init(); /* no "goto unwind" prior to this point!!! */
775
776         if (nwriters_stress >= 0)
777                 cxt.nrealwriters_stress = nwriters_stress;
778         else
779                 cxt.nrealwriters_stress = 2 * num_online_cpus();
780
781 #ifdef CONFIG_DEBUG_MUTEXES
782         if (strncmp(torture_type, "mutex", 5) == 0)
783                 cxt.debug_lock = true;
784 #endif
785 #ifdef CONFIG_DEBUG_RT_MUTEXES
786         if (strncmp(torture_type, "rtmutex", 7) == 0)
787                 cxt.debug_lock = true;
788 #endif
789 #ifdef CONFIG_DEBUG_SPINLOCK
790         if ((strncmp(torture_type, "spin", 4) == 0) ||
791             (strncmp(torture_type, "rw_lock", 7) == 0))
792                 cxt.debug_lock = true;
793 #endif
794
795         /* Initialize the statistics so that each run gets its own numbers. */
796
797         lock_is_write_held = 0;
798         cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
799         if (cxt.lwsa == NULL) {
800                 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
801                 firsterr = -ENOMEM;
802                 goto unwind;
803         }
804         for (i = 0; i < cxt.nrealwriters_stress; i++) {
805                 cxt.lwsa[i].n_lock_fail = 0;
806                 cxt.lwsa[i].n_lock_acquired = 0;
807         }
808
809         if (cxt.cur_ops->readlock) {
810                 if (nreaders_stress >= 0)
811                         cxt.nrealreaders_stress = nreaders_stress;
812                 else {
813                         /*
814                          * By default distribute evenly the number of
815                          * readers and writers. We still run the same number
816                          * of threads as the writer-only locks default.
817                          */
818                         if (nwriters_stress < 0) /* user doesn't care */
819                                 cxt.nrealwriters_stress = num_online_cpus();
820                         cxt.nrealreaders_stress = cxt.nrealwriters_stress;
821                 }
822
823                 lock_is_read_held = 0;
824                 cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
825                 if (cxt.lrsa == NULL) {
826                         VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
827                         firsterr = -ENOMEM;
828                         kfree(cxt.lwsa);
829                         goto unwind;
830                 }
831
832                 for (i = 0; i < cxt.nrealreaders_stress; i++) {
833                         cxt.lrsa[i].n_lock_fail = 0;
834                         cxt.lrsa[i].n_lock_acquired = 0;
835                 }
836         }
837         lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
838
839         /* Prepare torture context. */
840         if (onoff_interval > 0) {
841                 firsterr = torture_onoff_init(onoff_holdoff * HZ,
842                                               onoff_interval * HZ);
843                 if (firsterr)
844                         goto unwind;
845         }
846         if (shuffle_interval > 0) {
847                 firsterr = torture_shuffle_init(shuffle_interval);
848                 if (firsterr)
849                         goto unwind;
850         }
851         if (shutdown_secs > 0) {
852                 firsterr = torture_shutdown_init(shutdown_secs,
853                                                  lock_torture_cleanup);
854                 if (firsterr)
855                         goto unwind;
856         }
857         if (stutter > 0) {
858                 firsterr = torture_stutter_init(stutter);
859                 if (firsterr)
860                         goto unwind;
861         }
862
863         writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
864                                GFP_KERNEL);
865         if (writer_tasks == NULL) {
866                 VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
867                 firsterr = -ENOMEM;
868                 goto unwind;
869         }
870
871         if (cxt.cur_ops->readlock) {
872                 reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]),
873                                        GFP_KERNEL);
874                 if (reader_tasks == NULL) {
875                         VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
876                         firsterr = -ENOMEM;
877                         goto unwind;
878                 }
879         }
880
881         /*
882          * Create the kthreads and start torturing (oh, those poor little locks).
883          *
884          * TODO: Note that we interleave writers with readers, giving writers a
885          * slight advantage, by creating its kthread first. This can be modified
886          * for very specific needs, or even let the user choose the policy, if
887          * ever wanted.
888          */
889         for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
890                     j < cxt.nrealreaders_stress; i++, j++) {
891                 if (i >= cxt.nrealwriters_stress)
892                         goto create_reader;
893
894                 /* Create writer. */
895                 firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
896                                                   writer_tasks[i]);
897                 if (firsterr)
898                         goto unwind;
899
900         create_reader:
901                 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
902                         continue;
903                 /* Create reader. */
904                 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
905                                                   reader_tasks[j]);
906                 if (firsterr)
907                         goto unwind;
908         }
909         if (stat_interval > 0) {
910                 firsterr = torture_create_kthread(lock_torture_stats, NULL,
911                                                   stats_task);
912                 if (firsterr)
913                         goto unwind;
914         }
915         torture_init_end();
916         return 0;
917
918 unwind:
919         torture_init_end();
920         lock_torture_cleanup();
921         return firsterr;
922 }
923
924 module_init(lock_torture_init);
925 module_exit(lock_torture_cleanup);