]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/s390/lib/spinlock.c
01f29bb9c71b929edb2e74a771291026f8007d7a
[karo-tx-linux.git] / arch / s390 / lib / spinlock.c
1 /*
2  *    Out of line spinlock code.
3  *
4  *    Copyright IBM Corp. 2004, 2006
5  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6  */
7
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
13 #include <asm/io.h>
14
15 int spin_retry = 1000;
16
17 /**
18  * spin_retry= parameter
19  */
20 static int __init spin_retry_setup(char *str)
21 {
22         spin_retry = simple_strtoul(str, &str, 0);
23         return 1;
24 }
25 __setup("spin_retry=", spin_retry_setup);
26
27 void arch_spin_lock_wait(arch_spinlock_t *lp)
28 {
29         unsigned int cpu = SPINLOCK_LOCKVAL;
30         unsigned int owner;
31         int count;
32
33         while (1) {
34                 owner = ACCESS_ONCE(lp->lock);
35                 /* Try to get the lock if it is free. */
36                 if (!owner) {
37                         if (_raw_compare_and_swap(&lp->lock, 0, cpu))
38                                 return;
39                         continue;
40                 }
41                 /* Check if the lock owner is running. */
42                 if (!smp_vcpu_scheduled(~owner)) {
43                         smp_yield_cpu(~owner);
44                         continue;
45                 }
46                 /* Loop for a while on the lock value. */
47                 count = spin_retry;
48                 do {
49                         owner = ACCESS_ONCE(lp->lock);
50                 } while (owner && count-- > 0);
51                 if (!owner)
52                         continue;
53                 /*
54                  * For multiple layers of hypervisors, e.g. z/VM + LPAR
55                  * yield the CPU if the lock is still unavailable.
56                  */
57                 if (!MACHINE_IS_LPAR)
58                         smp_yield_cpu(~owner);
59         }
60 }
61 EXPORT_SYMBOL(arch_spin_lock_wait);
62
63 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
64 {
65         unsigned int cpu = SPINLOCK_LOCKVAL;
66         unsigned int owner;
67         int count;
68
69         local_irq_restore(flags);
70         while (1) {
71                 owner = ACCESS_ONCE(lp->lock);
72                 /* Try to get the lock if it is free. */
73                 if (!owner) {
74                         local_irq_disable();
75                         if (_raw_compare_and_swap(&lp->lock, 0, cpu))
76                                 return;
77                         local_irq_restore(flags);
78                 }
79                 /* Check if the lock owner is running. */
80                 if (!smp_vcpu_scheduled(~owner)) {
81                         smp_yield_cpu(~owner);
82                         continue;
83                 }
84                 /* Loop for a while on the lock value. */
85                 count = spin_retry;
86                 do {
87                         owner = ACCESS_ONCE(lp->lock);
88                 } while (owner && count-- > 0);
89                 if (!owner)
90                         continue;
91                 /*
92                  * For multiple layers of hypervisors, e.g. z/VM + LPAR
93                  * yield the CPU if the lock is still unavailable.
94                  */
95                 if (!MACHINE_IS_LPAR)
96                         smp_yield_cpu(~owner);
97         }
98 }
99 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
100
101 int arch_spin_trylock_retry(arch_spinlock_t *lp)
102 {
103         int count;
104
105         for (count = spin_retry; count > 0; count--)
106                 if (arch_spin_trylock_once(lp))
107                         return 1;
108         return 0;
109 }
110 EXPORT_SYMBOL(arch_spin_trylock_retry);
111
112 void _raw_read_lock_wait(arch_rwlock_t *rw)
113 {
114         unsigned int owner, old;
115         int count = spin_retry;
116
117         owner = 0;
118         while (1) {
119                 if (count-- <= 0) {
120                         if (owner && !smp_vcpu_scheduled(~owner))
121                                 smp_yield_cpu(~owner);
122                         count = spin_retry;
123                 }
124                 old = ACCESS_ONCE(rw->lock);
125                 owner = ACCESS_ONCE(rw->owner);
126                 if ((int) old < 0)
127                         continue;
128                 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
129                         return;
130         }
131 }
132 EXPORT_SYMBOL(_raw_read_lock_wait);
133
134 int _raw_read_trylock_retry(arch_rwlock_t *rw)
135 {
136         unsigned int old;
137         int count = spin_retry;
138
139         while (count-- > 0) {
140                 old = ACCESS_ONCE(rw->lock);
141                 if ((int) old < 0)
142                         continue;
143                 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
144                         return 1;
145         }
146         return 0;
147 }
148 EXPORT_SYMBOL(_raw_read_trylock_retry);
149
150 void _raw_write_lock_wait(arch_rwlock_t *rw)
151 {
152         unsigned int owner, old, prev;
153         int count = spin_retry;
154
155         prev = 0x80000000;
156         owner = 0;
157         while (1) {
158                 if (count-- <= 0) {
159                         if (owner && !smp_vcpu_scheduled(~owner))
160                                 smp_yield_cpu(~owner);
161                         count = spin_retry;
162                 }
163                 old = ACCESS_ONCE(rw->lock);
164                 owner = ACCESS_ONCE(rw->owner);
165                 if ((int) old >= 0 &&
166                     _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
167                         prev = old;
168                 else
169                         smp_rmb();
170                 if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
171                         break;
172         }
173 }
174 EXPORT_SYMBOL(_raw_write_lock_wait);
175
176 int _raw_write_trylock_retry(arch_rwlock_t *rw)
177 {
178         unsigned int old;
179         int count = spin_retry;
180
181         while (count-- > 0) {
182                 old = ACCESS_ONCE(rw->lock);
183                 if (old)
184                         continue;
185                 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
186                         return 1;
187         }
188         return 0;
189 }
190 EXPORT_SYMBOL(_raw_write_trylock_retry);
191
192 void arch_lock_relax(unsigned int cpu)
193 {
194         if (!cpu)
195                 return;
196         if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu))
197                 return;
198         smp_yield_cpu(~cpu);
199 }
200 EXPORT_SYMBOL(arch_lock_relax);