]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/mips/include/asm/spinlock.h
Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
[karo-tx-linux.git] / arch / mips / include / asm / spinlock.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  */
9 #ifndef _ASM_SPINLOCK_H
10 #define _ASM_SPINLOCK_H
11
12 #include <linux/compiler.h>
13
14 #include <asm/barrier.h>
15 #include <asm/processor.h>
16 #include <asm/compiler.h>
17 #include <asm/war.h>
18
19 /*
20  * Your basic SMP spinlocks, allowing only a single CPU anywhere
21  *
22  * Simple spin lock operations.  There are two variants, one clears IRQ's
23  * on the local processor, one does not.
24  *
25  * These are fair FIFO ticket locks
26  *
27  * (the type definitions are in asm/spinlock_types.h)
28  */
29
30
31 /*
32  * Ticket locks are conceptually two parts, one indicating the current head of
33  * the queue, and the other indicating the current tail. The lock is acquired
34  * by atomically noting the tail and incrementing it by one (thus adding
35  * ourself to the queue and noting our position), then waiting until the head
36  * becomes equal to the the initial value of the tail.
37  */
38
39 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
40 {
41         u32 counters = ACCESS_ONCE(lock->lock);
42
43         return ((counters >> 16) ^ counters) & 0xffff;
44 }
45
46 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
47 {
48         return lock.h.serving_now == lock.h.ticket;
49 }
50
51 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
52
53 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
54 {
55         u16 owner = READ_ONCE(lock->h.serving_now);
56         smp_rmb();
57         for (;;) {
58                 arch_spinlock_t tmp = READ_ONCE(*lock);
59
60                 if (tmp.h.serving_now == tmp.h.ticket ||
61                     tmp.h.serving_now != owner)
62                         break;
63
64                 cpu_relax();
65         }
66         smp_acquire__after_ctrl_dep();
67 }
68
69 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
70 {
71         u32 counters = ACCESS_ONCE(lock->lock);
72
73         return (((counters >> 16) - counters) & 0xffff) > 1;
74 }
75 #define arch_spin_is_contended  arch_spin_is_contended
76
77 static inline void arch_spin_lock(arch_spinlock_t *lock)
78 {
79         int my_ticket;
80         int tmp;
81         int inc = 0x10000;
82
83         if (R10000_LLSC_WAR) {
84                 __asm__ __volatile__ (
85                 "       .set push               # arch_spin_lock        \n"
86                 "       .set noreorder                                  \n"
87                 "                                                       \n"
88                 "1:     ll      %[ticket], %[ticket_ptr]                \n"
89                 "       addu    %[my_ticket], %[ticket], %[inc]         \n"
90                 "       sc      %[my_ticket], %[ticket_ptr]             \n"
91                 "       beqzl   %[my_ticket], 1b                        \n"
92                 "        nop                                            \n"
93                 "       srl     %[my_ticket], %[ticket], 16             \n"
94                 "       andi    %[ticket], %[ticket], 0xffff            \n"
95                 "       bne     %[ticket], %[my_ticket], 4f             \n"
96                 "        subu   %[ticket], %[my_ticket], %[ticket]      \n"
97                 "2:                                                     \n"
98                 "       .subsection 2                                   \n"
99                 "4:     andi    %[ticket], %[ticket], 0xffff            \n"
100                 "       sll     %[ticket], 5                            \n"
101                 "                                                       \n"
102                 "6:     bnez    %[ticket], 6b                           \n"
103                 "        subu   %[ticket], 1                            \n"
104                 "                                                       \n"
105                 "       lhu     %[ticket], %[serving_now_ptr]           \n"
106                 "       beq     %[ticket], %[my_ticket], 2b             \n"
107                 "        subu   %[ticket], %[my_ticket], %[ticket]      \n"
108                 "       b       4b                                      \n"
109                 "        subu   %[ticket], %[ticket], 1                 \n"
110                 "       .previous                                       \n"
111                 "       .set pop                                        \n"
112                 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
113                   [serving_now_ptr] "+m" (lock->h.serving_now),
114                   [ticket] "=&r" (tmp),
115                   [my_ticket] "=&r" (my_ticket)
116                 : [inc] "r" (inc));
117         } else {
118                 __asm__ __volatile__ (
119                 "       .set push               # arch_spin_lock        \n"
120                 "       .set noreorder                                  \n"
121                 "                                                       \n"
122                 "1:     ll      %[ticket], %[ticket_ptr]                \n"
123                 "       addu    %[my_ticket], %[ticket], %[inc]         \n"
124                 "       sc      %[my_ticket], %[ticket_ptr]             \n"
125                 "       beqz    %[my_ticket], 1b                        \n"
126                 "        srl    %[my_ticket], %[ticket], 16             \n"
127                 "       andi    %[ticket], %[ticket], 0xffff            \n"
128                 "       bne     %[ticket], %[my_ticket], 4f             \n"
129                 "        subu   %[ticket], %[my_ticket], %[ticket]      \n"
130                 "2:                                                     \n"
131                 "       .subsection 2                                   \n"
132                 "4:     andi    %[ticket], %[ticket], 0xffff            \n"
133                 "       sll     %[ticket], 5                            \n"
134                 "                                                       \n"
135                 "6:     bnez    %[ticket], 6b                           \n"
136                 "        subu   %[ticket], 1                            \n"
137                 "                                                       \n"
138                 "       lhu     %[ticket], %[serving_now_ptr]           \n"
139                 "       beq     %[ticket], %[my_ticket], 2b             \n"
140                 "        subu   %[ticket], %[my_ticket], %[ticket]      \n"
141                 "       b       4b                                      \n"
142                 "        subu   %[ticket], %[ticket], 1                 \n"
143                 "       .previous                                       \n"
144                 "       .set pop                                        \n"
145                 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
146                   [serving_now_ptr] "+m" (lock->h.serving_now),
147                   [ticket] "=&r" (tmp),
148                   [my_ticket] "=&r" (my_ticket)
149                 : [inc] "r" (inc));
150         }
151
152         smp_llsc_mb();
153 }
154
155 static inline void arch_spin_unlock(arch_spinlock_t *lock)
156 {
157         unsigned int serving_now = lock->h.serving_now + 1;
158         wmb();
159         lock->h.serving_now = (u16)serving_now;
160         nudge_writes();
161 }
162
163 static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
164 {
165         int tmp, tmp2, tmp3;
166         int inc = 0x10000;
167
168         if (R10000_LLSC_WAR) {
169                 __asm__ __volatile__ (
170                 "       .set push               # arch_spin_trylock     \n"
171                 "       .set noreorder                                  \n"
172                 "                                                       \n"
173                 "1:     ll      %[ticket], %[ticket_ptr]                \n"
174                 "       srl     %[my_ticket], %[ticket], 16             \n"
175                 "       andi    %[now_serving], %[ticket], 0xffff       \n"
176                 "       bne     %[my_ticket], %[now_serving], 3f        \n"
177                 "        addu   %[ticket], %[ticket], %[inc]            \n"
178                 "       sc      %[ticket], %[ticket_ptr]                \n"
179                 "       beqzl   %[ticket], 1b                           \n"
180                 "        li     %[ticket], 1                            \n"
181                 "2:                                                     \n"
182                 "       .subsection 2                                   \n"
183                 "3:     b       2b                                      \n"
184                 "        li     %[ticket], 0                            \n"
185                 "       .previous                                       \n"
186                 "       .set pop                                        \n"
187                 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
188                   [ticket] "=&r" (tmp),
189                   [my_ticket] "=&r" (tmp2),
190                   [now_serving] "=&r" (tmp3)
191                 : [inc] "r" (inc));
192         } else {
193                 __asm__ __volatile__ (
194                 "       .set push               # arch_spin_trylock     \n"
195                 "       .set noreorder                                  \n"
196                 "                                                       \n"
197                 "1:     ll      %[ticket], %[ticket_ptr]                \n"
198                 "       srl     %[my_ticket], %[ticket], 16             \n"
199                 "       andi    %[now_serving], %[ticket], 0xffff       \n"
200                 "       bne     %[my_ticket], %[now_serving], 3f        \n"
201                 "        addu   %[ticket], %[ticket], %[inc]            \n"
202                 "       sc      %[ticket], %[ticket_ptr]                \n"
203                 "       beqz    %[ticket], 1b                           \n"
204                 "        li     %[ticket], 1                            \n"
205                 "2:                                                     \n"
206                 "       .subsection 2                                   \n"
207                 "3:     b       2b                                      \n"
208                 "        li     %[ticket], 0                            \n"
209                 "       .previous                                       \n"
210                 "       .set pop                                        \n"
211                 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
212                   [ticket] "=&r" (tmp),
213                   [my_ticket] "=&r" (tmp2),
214                   [now_serving] "=&r" (tmp3)
215                 : [inc] "r" (inc));
216         }
217
218         smp_llsc_mb();
219
220         return tmp;
221 }
222
223 /*
224  * Read-write spinlocks, allowing multiple readers but only one writer.
225  *
226  * NOTE! it is quite common to have readers in interrupts but no interrupt
227  * writers. For those circumstances we can "mix" irq-safe locks - any writer
228  * needs to get a irq-safe write-lock, but readers can get non-irqsafe
229  * read-locks.
230  */
231
232 /*
233  * read_can_lock - would read_trylock() succeed?
234  * @lock: the rwlock in question.
235  */
236 #define arch_read_can_lock(rw)  ((rw)->lock >= 0)
237
238 /*
239  * write_can_lock - would write_trylock() succeed?
240  * @lock: the rwlock in question.
241  */
242 #define arch_write_can_lock(rw) (!(rw)->lock)
243
244 static inline void arch_read_lock(arch_rwlock_t *rw)
245 {
246         unsigned int tmp;
247
248         if (R10000_LLSC_WAR) {
249                 __asm__ __volatile__(
250                 "       .set    noreorder       # arch_read_lock        \n"
251                 "1:     ll      %1, %2                                  \n"
252                 "       bltz    %1, 1b                                  \n"
253                 "        addu   %1, 1                                   \n"
254                 "       sc      %1, %0                                  \n"
255                 "       beqzl   %1, 1b                                  \n"
256                 "        nop                                            \n"
257                 "       .set    reorder                                 \n"
258                 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
259                 : GCC_OFF_SMALL_ASM() (rw->lock)
260                 : "memory");
261         } else {
262                 do {
263                         __asm__ __volatile__(
264                         "1:     ll      %1, %2  # arch_read_lock        \n"
265                         "       bltz    %1, 1b                          \n"
266                         "        addu   %1, 1                           \n"
267                         "2:     sc      %1, %0                          \n"
268                         : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
269                         : GCC_OFF_SMALL_ASM() (rw->lock)
270                         : "memory");
271                 } while (unlikely(!tmp));
272         }
273
274         smp_llsc_mb();
275 }
276
277 static inline void arch_read_unlock(arch_rwlock_t *rw)
278 {
279         unsigned int tmp;
280
281         smp_mb__before_llsc();
282
283         if (R10000_LLSC_WAR) {
284                 __asm__ __volatile__(
285                 "1:     ll      %1, %2          # arch_read_unlock      \n"
286                 "       addiu   %1, -1                                  \n"
287                 "       sc      %1, %0                                  \n"
288                 "       beqzl   %1, 1b                                  \n"
289                 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
290                 : GCC_OFF_SMALL_ASM() (rw->lock)
291                 : "memory");
292         } else {
293                 do {
294                         __asm__ __volatile__(
295                         "1:     ll      %1, %2  # arch_read_unlock      \n"
296                         "       addiu   %1, -1                          \n"
297                         "       sc      %1, %0                          \n"
298                         : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
299                         : GCC_OFF_SMALL_ASM() (rw->lock)
300                         : "memory");
301                 } while (unlikely(!tmp));
302         }
303 }
304
305 static inline void arch_write_lock(arch_rwlock_t *rw)
306 {
307         unsigned int tmp;
308
309         if (R10000_LLSC_WAR) {
310                 __asm__ __volatile__(
311                 "       .set    noreorder       # arch_write_lock       \n"
312                 "1:     ll      %1, %2                                  \n"
313                 "       bnez    %1, 1b                                  \n"
314                 "        lui    %1, 0x8000                              \n"
315                 "       sc      %1, %0                                  \n"
316                 "       beqzl   %1, 1b                                  \n"
317                 "        nop                                            \n"
318                 "       .set    reorder                                 \n"
319                 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
320                 : GCC_OFF_SMALL_ASM() (rw->lock)
321                 : "memory");
322         } else {
323                 do {
324                         __asm__ __volatile__(
325                         "1:     ll      %1, %2  # arch_write_lock       \n"
326                         "       bnez    %1, 1b                          \n"
327                         "        lui    %1, 0x8000                      \n"
328                         "2:     sc      %1, %0                          \n"
329                         : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
330                         : GCC_OFF_SMALL_ASM() (rw->lock)
331                         : "memory");
332                 } while (unlikely(!tmp));
333         }
334
335         smp_llsc_mb();
336 }
337
338 static inline void arch_write_unlock(arch_rwlock_t *rw)
339 {
340         smp_mb__before_llsc();
341
342         __asm__ __volatile__(
343         "                               # arch_write_unlock     \n"
344         "       sw      $0, %0                                  \n"
345         : "=m" (rw->lock)
346         : "m" (rw->lock)
347         : "memory");
348 }
349
350 static inline int arch_read_trylock(arch_rwlock_t *rw)
351 {
352         unsigned int tmp;
353         int ret;
354
355         if (R10000_LLSC_WAR) {
356                 __asm__ __volatile__(
357                 "       .set    noreorder       # arch_read_trylock     \n"
358                 "       li      %2, 0                                   \n"
359                 "1:     ll      %1, %3                                  \n"
360                 "       bltz    %1, 2f                                  \n"
361                 "        addu   %1, 1                                   \n"
362                 "       sc      %1, %0                                  \n"
363                 "       .set    reorder                                 \n"
364                 "       beqzl   %1, 1b                                  \n"
365                 "        nop                                            \n"
366                 __WEAK_LLSC_MB
367                 "       li      %2, 1                                   \n"
368                 "2:                                                     \n"
369                 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
370                 : GCC_OFF_SMALL_ASM() (rw->lock)
371                 : "memory");
372         } else {
373                 __asm__ __volatile__(
374                 "       .set    noreorder       # arch_read_trylock     \n"
375                 "       li      %2, 0                                   \n"
376                 "1:     ll      %1, %3                                  \n"
377                 "       bltz    %1, 2f                                  \n"
378                 "        addu   %1, 1                                   \n"
379                 "       sc      %1, %0                                  \n"
380                 "       beqz    %1, 1b                                  \n"
381                 "        nop                                            \n"
382                 "       .set    reorder                                 \n"
383                 __WEAK_LLSC_MB
384                 "       li      %2, 1                                   \n"
385                 "2:                                                     \n"
386                 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
387                 : GCC_OFF_SMALL_ASM() (rw->lock)
388                 : "memory");
389         }
390
391         return ret;
392 }
393
394 static inline int arch_write_trylock(arch_rwlock_t *rw)
395 {
396         unsigned int tmp;
397         int ret;
398
399         if (R10000_LLSC_WAR) {
400                 __asm__ __volatile__(
401                 "       .set    noreorder       # arch_write_trylock    \n"
402                 "       li      %2, 0                                   \n"
403                 "1:     ll      %1, %3                                  \n"
404                 "       bnez    %1, 2f                                  \n"
405                 "        lui    %1, 0x8000                              \n"
406                 "       sc      %1, %0                                  \n"
407                 "       beqzl   %1, 1b                                  \n"
408                 "        nop                                            \n"
409                 __WEAK_LLSC_MB
410                 "       li      %2, 1                                   \n"
411                 "       .set    reorder                                 \n"
412                 "2:                                                     \n"
413                 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
414                 : GCC_OFF_SMALL_ASM() (rw->lock)
415                 : "memory");
416         } else {
417                 do {
418                         __asm__ __volatile__(
419                         "       ll      %1, %3  # arch_write_trylock    \n"
420                         "       li      %2, 0                           \n"
421                         "       bnez    %1, 2f                          \n"
422                         "       lui     %1, 0x8000                      \n"
423                         "       sc      %1, %0                          \n"
424                         "       li      %2, 1                           \n"
425                         "2:                                             \n"
426                         : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
427                           "=&r" (ret)
428                         : GCC_OFF_SMALL_ASM() (rw->lock)
429                         : "memory");
430                 } while (unlikely(!tmp));
431
432                 smp_llsc_mb();
433         }
434
435         return ret;
436 }
437
438 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
439 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
440
441 #define arch_spin_relax(lock)   cpu_relax()
442 #define arch_read_relax(lock)   cpu_relax()
443 #define arch_write_relax(lock)  cpu_relax()
444
445 #endif /* _ASM_SPINLOCK_H */