]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - include/linux/wait.h
regmap: rbtree: When adding a reg do a bsearch for target node
[karo-tx-linux.git] / include / linux / wait.h
1 #ifndef _LINUX_WAIT_H
2 #define _LINUX_WAIT_H
3 /*
4  * Linux wait queue related types and methods
5  */
6 #include <linux/list.h>
7 #include <linux/stddef.h>
8 #include <linux/spinlock.h>
9 #include <asm/current.h>
10 #include <uapi/linux/wait.h>
11
12 typedef struct __wait_queue wait_queue_t;
13 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14 int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
15
16 /* __wait_queue::flags */
17 #define WQ_FLAG_EXCLUSIVE       0x01
18 #define WQ_FLAG_WOKEN           0x02
19
20 struct __wait_queue {
21         unsigned int            flags;
22         void                    *private;
23         wait_queue_func_t       func;
24         struct list_head        task_list;
25 };
26
27 struct wait_bit_key {
28         void                    *flags;
29         int                     bit_nr;
30 #define WAIT_ATOMIC_T_BIT_NR    -1
31         unsigned long           timeout;
32 };
33
34 struct wait_bit_queue {
35         struct wait_bit_key     key;
36         wait_queue_t            wait;
37 };
38
39 struct __wait_queue_head {
40         spinlock_t              lock;
41         struct list_head        task_list;
42 };
43 typedef struct __wait_queue_head wait_queue_head_t;
44
45 struct task_struct;
46
47 /*
48  * Macros for declaration and initialisaton of the datatypes
49  */
50
51 #define __WAITQUEUE_INITIALIZER(name, tsk) {                            \
52         .private        = tsk,                                          \
53         .func           = default_wake_function,                        \
54         .task_list      = { NULL, NULL } }
55
56 #define DECLARE_WAITQUEUE(name, tsk)                                    \
57         wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
58
59 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {                           \
60         .lock           = __SPIN_LOCK_UNLOCKED(name.lock),              \
61         .task_list      = { &(name).task_list, &(name).task_list } }
62
63 #define DECLARE_WAIT_QUEUE_HEAD(name) \
64         wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
65
66 #define __WAIT_BIT_KEY_INITIALIZER(word, bit)                           \
67         { .flags = word, .bit_nr = bit, }
68
69 #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p)                              \
70         { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
71
72 extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
73
74 #define init_waitqueue_head(q)                          \
75         do {                                            \
76                 static struct lock_class_key __key;     \
77                                                         \
78                 __init_waitqueue_head((q), #q, &__key); \
79         } while (0)
80
81 #ifdef CONFIG_LOCKDEP
82 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
83         ({ init_waitqueue_head(&name); name; })
84 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
85         wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
86 #else
87 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
88 #endif
89
90 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
91 {
92         q->flags        = 0;
93         q->private      = p;
94         q->func         = default_wake_function;
95 }
96
97 static inline void
98 init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
99 {
100         q->flags        = 0;
101         q->private      = NULL;
102         q->func         = func;
103 }
104
105 static inline int waitqueue_active(wait_queue_head_t *q)
106 {
107         return !list_empty(&q->task_list);
108 }
109
110 extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
111 extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
112 extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
113
114 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
115 {
116         list_add(&new->task_list, &head->task_list);
117 }
118
119 /*
120  * Used for wake-one threads:
121  */
122 static inline void
123 __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
124 {
125         wait->flags |= WQ_FLAG_EXCLUSIVE;
126         __add_wait_queue(q, wait);
127 }
128
129 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
130                                          wait_queue_t *new)
131 {
132         list_add_tail(&new->task_list, &head->task_list);
133 }
134
135 static inline void
136 __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
137 {
138         wait->flags |= WQ_FLAG_EXCLUSIVE;
139         __add_wait_queue_tail(q, wait);
140 }
141
142 static inline void
143 __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
144 {
145         list_del(&old->task_list);
146 }
147
148 typedef int wait_bit_action_f(struct wait_bit_key *);
149 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
150 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr,
151                           void *key);
152 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
153 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
154 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
155 void __wake_up_bit(wait_queue_head_t *, void *, int);
156 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
157 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
158 void wake_up_bit(void *, int);
159 void wake_up_atomic_t(atomic_t *);
160 int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
161 int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
162 int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
163 int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
164 wait_queue_head_t *bit_waitqueue(void *, int);
165
166 #define wake_up(x)                      __wake_up(x, TASK_NORMAL, 1, NULL)
167 #define wake_up_nr(x, nr)               __wake_up(x, TASK_NORMAL, nr, NULL)
168 #define wake_up_all(x)                  __wake_up(x, TASK_NORMAL, 0, NULL)
169 #define wake_up_locked(x)               __wake_up_locked((x), TASK_NORMAL, 1)
170 #define wake_up_all_locked(x)           __wake_up_locked((x), TASK_NORMAL, 0)
171
172 #define wake_up_interruptible(x)        __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
173 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
174 #define wake_up_interruptible_all(x)    __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
175 #define wake_up_interruptible_sync(x)   __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
176
177 /*
178  * Wakeup macros to be used to report events to the targets.
179  */
180 #define wake_up_poll(x, m)                                              \
181         __wake_up(x, TASK_NORMAL, 1, (void *) (m))
182 #define wake_up_locked_poll(x, m)                                       \
183         __wake_up_locked_key((x), TASK_NORMAL, 1, (void *) (m))
184 #define wake_up_interruptible_poll(x, m)                                \
185         __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
186 #define wake_up_interruptible_sync_poll(x, m)                           \
187         __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
188
189 #define ___wait_cond_timeout(condition)                                 \
190 ({                                                                      \
191         bool __cond = (condition);                                      \
192         if (__cond && !__ret)                                           \
193                 __ret = 1;                                              \
194         __cond || !__ret;                                               \
195 })
196
197 #define ___wait_is_interruptible(state)                                 \
198         (!__builtin_constant_p(state) ||                                \
199                 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)  \
200
201 /*
202  * The below macro ___wait_event() has an explicit shadow of the __ret
203  * variable when used from the wait_event_*() macros.
204  *
205  * This is so that both can use the ___wait_cond_timeout() construct
206  * to wrap the condition.
207  *
208  * The type inconsistency of the wait_event_*() __ret variable is also
209  * on purpose; we use long where we can return timeout values and int
210  * otherwise.
211  */
212
213 #define ___wait_event(wq, condition, state, exclusive, ret, cmd)        \
214 ({                                                                      \
215         __label__ __out;                                                \
216         wait_queue_t __wait;                                            \
217         long __ret = ret;       /* explicit shadow */                   \
218                                                                         \
219         INIT_LIST_HEAD(&__wait.task_list);                              \
220         if (exclusive)                                                  \
221                 __wait.flags = WQ_FLAG_EXCLUSIVE;                       \
222         else                                                            \
223                 __wait.flags = 0;                                       \
224                                                                         \
225         for (;;) {                                                      \
226                 long __int = prepare_to_wait_event(&wq, &__wait, state);\
227                                                                         \
228                 if (condition)                                          \
229                         break;                                          \
230                                                                         \
231                 if (___wait_is_interruptible(state) && __int) {         \
232                         __ret = __int;                                  \
233                         if (exclusive) {                                \
234                                 abort_exclusive_wait(&wq, &__wait,      \
235                                                      state, NULL);      \
236                                 goto __out;                             \
237                         }                                               \
238                         break;                                          \
239                 }                                                       \
240                                                                         \
241                 cmd;                                                    \
242         }                                                               \
243         finish_wait(&wq, &__wait);                                      \
244 __out:  __ret;                                                          \
245 })
246
247 #define __wait_event(wq, condition)                                     \
248         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
249                             schedule())
250
251 /**
252  * wait_event - sleep until a condition gets true
253  * @wq: the waitqueue to wait on
254  * @condition: a C expression for the event to wait for
255  *
256  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
257  * @condition evaluates to true. The @condition is checked each time
258  * the waitqueue @wq is woken up.
259  *
260  * wake_up() has to be called after changing any variable that could
261  * change the result of the wait condition.
262  */
263 #define wait_event(wq, condition)                                       \
264 do {                                                                    \
265         might_sleep();                                                  \
266         if (condition)                                                  \
267                 break;                                                  \
268         __wait_event(wq, condition);                                    \
269 } while (0)
270
271 #define __io_wait_event(wq, condition)                                  \
272         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
273                             io_schedule())
274
275 /*
276  * io_wait_event() -- like wait_event() but with io_schedule()
277  */
278 #define io_wait_event(wq, condition)                                    \
279 do {                                                                    \
280         might_sleep();                                                  \
281         if (condition)                                                  \
282                 break;                                                  \
283         __io_wait_event(wq, condition);                                 \
284 } while (0)
285
286 #define __wait_event_freezable(wq, condition)                           \
287         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,          \
288                             schedule(); try_to_freeze())
289
290 /**
291  * wait_event - sleep (or freeze) until a condition gets true
292  * @wq: the waitqueue to wait on
293  * @condition: a C expression for the event to wait for
294  *
295  * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
296  * to system load) until the @condition evaluates to true. The
297  * @condition is checked each time the waitqueue @wq is woken up.
298  *
299  * wake_up() has to be called after changing any variable that could
300  * change the result of the wait condition.
301  */
302 #define wait_event_freezable(wq, condition)                             \
303 ({                                                                      \
304         int __ret = 0;                                                  \
305         might_sleep();                                                  \
306         if (!(condition))                                               \
307                 __ret = __wait_event_freezable(wq, condition);          \
308         __ret;                                                          \
309 })
310
311 #define __wait_event_timeout(wq, condition, timeout)                    \
312         ___wait_event(wq, ___wait_cond_timeout(condition),              \
313                       TASK_UNINTERRUPTIBLE, 0, timeout,                 \
314                       __ret = schedule_timeout(__ret))
315
316 /**
317  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
318  * @wq: the waitqueue to wait on
319  * @condition: a C expression for the event to wait for
320  * @timeout: timeout, in jiffies
321  *
322  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
323  * @condition evaluates to true. The @condition is checked each time
324  * the waitqueue @wq is woken up.
325  *
326  * wake_up() has to be called after changing any variable that could
327  * change the result of the wait condition.
328  *
329  * Returns:
330  * 0 if the @condition evaluated to %false after the @timeout elapsed,
331  * 1 if the @condition evaluated to %true after the @timeout elapsed,
332  * or the remaining jiffies (at least 1) if the @condition evaluated
333  * to %true before the @timeout elapsed.
334  */
335 #define wait_event_timeout(wq, condition, timeout)                      \
336 ({                                                                      \
337         long __ret = timeout;                                           \
338         might_sleep();                                                  \
339         if (!___wait_cond_timeout(condition))                           \
340                 __ret = __wait_event_timeout(wq, condition, timeout);   \
341         __ret;                                                          \
342 })
343
344 #define __wait_event_freezable_timeout(wq, condition, timeout)          \
345         ___wait_event(wq, ___wait_cond_timeout(condition),              \
346                       TASK_INTERRUPTIBLE, 0, timeout,                   \
347                       __ret = schedule_timeout(__ret); try_to_freeze())
348
349 /*
350  * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
351  * increasing load and is freezable.
352  */
353 #define wait_event_freezable_timeout(wq, condition, timeout)            \
354 ({                                                                      \
355         long __ret = timeout;                                           \
356         might_sleep();                                                  \
357         if (!___wait_cond_timeout(condition))                           \
358                 __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
359         __ret;                                                          \
360 })
361
362 #define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2)           \
363         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0,  \
364                             cmd1; schedule(); cmd2)
365 /*
366  * Just like wait_event_cmd(), except it sets exclusive flag
367  */
368 #define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2)             \
369 do {                                                                    \
370         if (condition)                                                  \
371                 break;                                                  \
372         __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2);          \
373 } while (0)
374
375 #define __wait_event_cmd(wq, condition, cmd1, cmd2)                     \
376         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
377                             cmd1; schedule(); cmd2)
378
379 /**
380  * wait_event_cmd - sleep until a condition gets true
381  * @wq: the waitqueue to wait on
382  * @condition: a C expression for the event to wait for
383  * @cmd1: the command will be executed before sleep
384  * @cmd2: the command will be executed after sleep
385  *
386  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
387  * @condition evaluates to true. The @condition is checked each time
388  * the waitqueue @wq is woken up.
389  *
390  * wake_up() has to be called after changing any variable that could
391  * change the result of the wait condition.
392  */
393 #define wait_event_cmd(wq, condition, cmd1, cmd2)                       \
394 do {                                                                    \
395         if (condition)                                                  \
396                 break;                                                  \
397         __wait_event_cmd(wq, condition, cmd1, cmd2);                    \
398 } while (0)
399
400 #define __wait_event_interruptible(wq, condition)                       \
401         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,          \
402                       schedule())
403
404 /**
405  * wait_event_interruptible - sleep until a condition gets true
406  * @wq: the waitqueue to wait on
407  * @condition: a C expression for the event to wait for
408  *
409  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
410  * @condition evaluates to true or a signal is received.
411  * The @condition is checked each time the waitqueue @wq is woken up.
412  *
413  * wake_up() has to be called after changing any variable that could
414  * change the result of the wait condition.
415  *
416  * The function will return -ERESTARTSYS if it was interrupted by a
417  * signal and 0 if @condition evaluated to true.
418  */
419 #define wait_event_interruptible(wq, condition)                         \
420 ({                                                                      \
421         int __ret = 0;                                                  \
422         might_sleep();                                                  \
423         if (!(condition))                                               \
424                 __ret = __wait_event_interruptible(wq, condition);      \
425         __ret;                                                          \
426 })
427
428 #define __wait_event_interruptible_timeout(wq, condition, timeout)      \
429         ___wait_event(wq, ___wait_cond_timeout(condition),              \
430                       TASK_INTERRUPTIBLE, 0, timeout,                   \
431                       __ret = schedule_timeout(__ret))
432
433 /**
434  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
435  * @wq: the waitqueue to wait on
436  * @condition: a C expression for the event to wait for
437  * @timeout: timeout, in jiffies
438  *
439  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
440  * @condition evaluates to true or a signal is received.
441  * The @condition is checked each time the waitqueue @wq is woken up.
442  *
443  * wake_up() has to be called after changing any variable that could
444  * change the result of the wait condition.
445  *
446  * Returns:
447  * 0 if the @condition evaluated to %false after the @timeout elapsed,
448  * 1 if the @condition evaluated to %true after the @timeout elapsed,
449  * the remaining jiffies (at least 1) if the @condition evaluated
450  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
451  * interrupted by a signal.
452  */
453 #define wait_event_interruptible_timeout(wq, condition, timeout)        \
454 ({                                                                      \
455         long __ret = timeout;                                           \
456         might_sleep();                                                  \
457         if (!___wait_cond_timeout(condition))                           \
458                 __ret = __wait_event_interruptible_timeout(wq,          \
459                                                 condition, timeout);    \
460         __ret;                                                          \
461 })
462
463 #define __wait_event_hrtimeout(wq, condition, timeout, state)           \
464 ({                                                                      \
465         int __ret = 0;                                                  \
466         struct hrtimer_sleeper __t;                                     \
467                                                                         \
468         hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC,              \
469                               HRTIMER_MODE_REL);                        \
470         hrtimer_init_sleeper(&__t, current);                            \
471         if ((timeout).tv64 != KTIME_MAX)                                \
472                 hrtimer_start_range_ns(&__t.timer, timeout,             \
473                                        current->timer_slack_ns,         \
474                                        HRTIMER_MODE_REL);               \
475                                                                         \
476         __ret = ___wait_event(wq, condition, state, 0, 0,               \
477                 if (!__t.task) {                                        \
478                         __ret = -ETIME;                                 \
479                         break;                                          \
480                 }                                                       \
481                 schedule());                                            \
482                                                                         \
483         hrtimer_cancel(&__t.timer);                                     \
484         destroy_hrtimer_on_stack(&__t.timer);                           \
485         __ret;                                                          \
486 })
487
488 /**
489  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
490  * @wq: the waitqueue to wait on
491  * @condition: a C expression for the event to wait for
492  * @timeout: timeout, as a ktime_t
493  *
494  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
495  * @condition evaluates to true or a signal is received.
496  * The @condition is checked each time the waitqueue @wq is woken up.
497  *
498  * wake_up() has to be called after changing any variable that could
499  * change the result of the wait condition.
500  *
501  * The function returns 0 if @condition became true, or -ETIME if the timeout
502  * elapsed.
503  */
504 #define wait_event_hrtimeout(wq, condition, timeout)                    \
505 ({                                                                      \
506         int __ret = 0;                                                  \
507         might_sleep();                                                  \
508         if (!(condition))                                               \
509                 __ret = __wait_event_hrtimeout(wq, condition, timeout,  \
510                                                TASK_UNINTERRUPTIBLE);   \
511         __ret;                                                          \
512 })
513
514 /**
515  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
516  * @wq: the waitqueue to wait on
517  * @condition: a C expression for the event to wait for
518  * @timeout: timeout, as a ktime_t
519  *
520  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
521  * @condition evaluates to true or a signal is received.
522  * The @condition is checked each time the waitqueue @wq is woken up.
523  *
524  * wake_up() has to be called after changing any variable that could
525  * change the result of the wait condition.
526  *
527  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
528  * interrupted by a signal, or -ETIME if the timeout elapsed.
529  */
530 #define wait_event_interruptible_hrtimeout(wq, condition, timeout)      \
531 ({                                                                      \
532         long __ret = 0;                                                 \
533         might_sleep();                                                  \
534         if (!(condition))                                               \
535                 __ret = __wait_event_hrtimeout(wq, condition, timeout,  \
536                                                TASK_INTERRUPTIBLE);     \
537         __ret;                                                          \
538 })
539
540 #define __wait_event_interruptible_exclusive(wq, condition)             \
541         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,          \
542                       schedule())
543
544 #define wait_event_interruptible_exclusive(wq, condition)               \
545 ({                                                                      \
546         int __ret = 0;                                                  \
547         might_sleep();                                                  \
548         if (!(condition))                                               \
549                 __ret = __wait_event_interruptible_exclusive(wq, condition);\
550         __ret;                                                          \
551 })
552
553
554 #define __wait_event_freezable_exclusive(wq, condition)                 \
555         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,          \
556                         schedule(); try_to_freeze())
557
558 #define wait_event_freezable_exclusive(wq, condition)                   \
559 ({                                                                      \
560         int __ret = 0;                                                  \
561         might_sleep();                                                  \
562         if (!(condition))                                               \
563                 __ret = __wait_event_freezable_exclusive(wq, condition);\
564         __ret;                                                          \
565 })
566
567
568 #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
569 ({                                                                      \
570         int __ret = 0;                                                  \
571         DEFINE_WAIT(__wait);                                            \
572         if (exclusive)                                                  \
573                 __wait.flags |= WQ_FLAG_EXCLUSIVE;                      \
574         do {                                                            \
575                 if (likely(list_empty(&__wait.task_list)))              \
576                         __add_wait_queue_tail(&(wq), &__wait);          \
577                 set_current_state(TASK_INTERRUPTIBLE);                  \
578                 if (signal_pending(current)) {                          \
579                         __ret = -ERESTARTSYS;                           \
580                         break;                                          \
581                 }                                                       \
582                 if (irq)                                                \
583                         spin_unlock_irq(&(wq).lock);                    \
584                 else                                                    \
585                         spin_unlock(&(wq).lock);                        \
586                 schedule();                                             \
587                 if (irq)                                                \
588                         spin_lock_irq(&(wq).lock);                      \
589                 else                                                    \
590                         spin_lock(&(wq).lock);                          \
591         } while (!(condition));                                         \
592         __remove_wait_queue(&(wq), &__wait);                            \
593         __set_current_state(TASK_RUNNING);                              \
594         __ret;                                                          \
595 })
596
597
598 /**
599  * wait_event_interruptible_locked - sleep until a condition gets true
600  * @wq: the waitqueue to wait on
601  * @condition: a C expression for the event to wait for
602  *
603  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
604  * @condition evaluates to true or a signal is received.
605  * The @condition is checked each time the waitqueue @wq is woken up.
606  *
607  * It must be called with wq.lock being held.  This spinlock is
608  * unlocked while sleeping but @condition testing is done while lock
609  * is held and when this macro exits the lock is held.
610  *
611  * The lock is locked/unlocked using spin_lock()/spin_unlock()
612  * functions which must match the way they are locked/unlocked outside
613  * of this macro.
614  *
615  * wake_up_locked() has to be called after changing any variable that could
616  * change the result of the wait condition.
617  *
618  * The function will return -ERESTARTSYS if it was interrupted by a
619  * signal and 0 if @condition evaluated to true.
620  */
621 #define wait_event_interruptible_locked(wq, condition)                  \
622         ((condition)                                                    \
623          ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
624
625 /**
626  * wait_event_interruptible_locked_irq - sleep until a condition gets true
627  * @wq: the waitqueue to wait on
628  * @condition: a C expression for the event to wait for
629  *
630  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
631  * @condition evaluates to true or a signal is received.
632  * The @condition is checked each time the waitqueue @wq is woken up.
633  *
634  * It must be called with wq.lock being held.  This spinlock is
635  * unlocked while sleeping but @condition testing is done while lock
636  * is held and when this macro exits the lock is held.
637  *
638  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
639  * functions which must match the way they are locked/unlocked outside
640  * of this macro.
641  *
642  * wake_up_locked() has to be called after changing any variable that could
643  * change the result of the wait condition.
644  *
645  * The function will return -ERESTARTSYS if it was interrupted by a
646  * signal and 0 if @condition evaluated to true.
647  */
648 #define wait_event_interruptible_locked_irq(wq, condition)              \
649         ((condition)                                                    \
650          ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
651
652 /**
653  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
654  * @wq: the waitqueue to wait on
655  * @condition: a C expression for the event to wait for
656  *
657  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
658  * @condition evaluates to true or a signal is received.
659  * The @condition is checked each time the waitqueue @wq is woken up.
660  *
661  * It must be called with wq.lock being held.  This spinlock is
662  * unlocked while sleeping but @condition testing is done while lock
663  * is held and when this macro exits the lock is held.
664  *
665  * The lock is locked/unlocked using spin_lock()/spin_unlock()
666  * functions which must match the way they are locked/unlocked outside
667  * of this macro.
668  *
669  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
670  * set thus when other process waits process on the list if this
671  * process is awaken further processes are not considered.
672  *
673  * wake_up_locked() has to be called after changing any variable that could
674  * change the result of the wait condition.
675  *
676  * The function will return -ERESTARTSYS if it was interrupted by a
677  * signal and 0 if @condition evaluated to true.
678  */
679 #define wait_event_interruptible_exclusive_locked(wq, condition)        \
680         ((condition)                                                    \
681          ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
682
683 /**
684  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
685  * @wq: the waitqueue to wait on
686  * @condition: a C expression for the event to wait for
687  *
688  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
689  * @condition evaluates to true or a signal is received.
690  * The @condition is checked each time the waitqueue @wq is woken up.
691  *
692  * It must be called with wq.lock being held.  This spinlock is
693  * unlocked while sleeping but @condition testing is done while lock
694  * is held and when this macro exits the lock is held.
695  *
696  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
697  * functions which must match the way they are locked/unlocked outside
698  * of this macro.
699  *
700  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
701  * set thus when other process waits process on the list if this
702  * process is awaken further processes are not considered.
703  *
704  * wake_up_locked() has to be called after changing any variable that could
705  * change the result of the wait condition.
706  *
707  * The function will return -ERESTARTSYS if it was interrupted by a
708  * signal and 0 if @condition evaluated to true.
709  */
710 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)    \
711         ((condition)                                                    \
712          ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
713
714
715 #define __wait_event_killable(wq, condition)                            \
716         ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
717
718 /**
719  * wait_event_killable - sleep until a condition gets true
720  * @wq: the waitqueue to wait on
721  * @condition: a C expression for the event to wait for
722  *
723  * The process is put to sleep (TASK_KILLABLE) until the
724  * @condition evaluates to true or a signal is received.
725  * The @condition is checked each time the waitqueue @wq is woken up.
726  *
727  * wake_up() has to be called after changing any variable that could
728  * change the result of the wait condition.
729  *
730  * The function will return -ERESTARTSYS if it was interrupted by a
731  * signal and 0 if @condition evaluated to true.
732  */
733 #define wait_event_killable(wq, condition)                              \
734 ({                                                                      \
735         int __ret = 0;                                                  \
736         might_sleep();                                                  \
737         if (!(condition))                                               \
738                 __ret = __wait_event_killable(wq, condition);           \
739         __ret;                                                          \
740 })
741
742
743 #define __wait_event_lock_irq(wq, condition, lock, cmd)                 \
744         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
745                             spin_unlock_irq(&lock);                     \
746                             cmd;                                        \
747                             schedule();                                 \
748                             spin_lock_irq(&lock))
749
750 /**
751  * wait_event_lock_irq_cmd - sleep until a condition gets true. The
752  *                           condition is checked under the lock. This
753  *                           is expected to be called with the lock
754  *                           taken.
755  * @wq: the waitqueue to wait on
756  * @condition: a C expression for the event to wait for
757  * @lock: a locked spinlock_t, which will be released before cmd
758  *        and schedule() and reacquired afterwards.
759  * @cmd: a command which is invoked outside the critical section before
760  *       sleep
761  *
762  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
763  * @condition evaluates to true. The @condition is checked each time
764  * the waitqueue @wq is woken up.
765  *
766  * wake_up() has to be called after changing any variable that could
767  * change the result of the wait condition.
768  *
769  * This is supposed to be called while holding the lock. The lock is
770  * dropped before invoking the cmd and going to sleep and is reacquired
771  * afterwards.
772  */
773 #define wait_event_lock_irq_cmd(wq, condition, lock, cmd)               \
774 do {                                                                    \
775         if (condition)                                                  \
776                 break;                                                  \
777         __wait_event_lock_irq(wq, condition, lock, cmd);                \
778 } while (0)
779
780 /**
781  * wait_event_lock_irq - sleep until a condition gets true. The
782  *                       condition is checked under the lock. This
783  *                       is expected to be called with the lock
784  *                       taken.
785  * @wq: the waitqueue to wait on
786  * @condition: a C expression for the event to wait for
787  * @lock: a locked spinlock_t, which will be released before schedule()
788  *        and reacquired afterwards.
789  *
790  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
791  * @condition evaluates to true. The @condition is checked each time
792  * the waitqueue @wq is woken up.
793  *
794  * wake_up() has to be called after changing any variable that could
795  * change the result of the wait condition.
796  *
797  * This is supposed to be called while holding the lock. The lock is
798  * dropped before going to sleep and is reacquired afterwards.
799  */
800 #define wait_event_lock_irq(wq, condition, lock)                        \
801 do {                                                                    \
802         if (condition)                                                  \
803                 break;                                                  \
804         __wait_event_lock_irq(wq, condition, lock, );                   \
805 } while (0)
806
807
808 #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd)   \
809         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,          \
810                       spin_unlock_irq(&lock);                           \
811                       cmd;                                              \
812                       schedule();                                       \
813                       spin_lock_irq(&lock))
814
815 /**
816  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
817  *              The condition is checked under the lock. This is expected to
818  *              be called with the lock taken.
819  * @wq: the waitqueue to wait on
820  * @condition: a C expression for the event to wait for
821  * @lock: a locked spinlock_t, which will be released before cmd and
822  *        schedule() and reacquired afterwards.
823  * @cmd: a command which is invoked outside the critical section before
824  *       sleep
825  *
826  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
827  * @condition evaluates to true or a signal is received. The @condition is
828  * checked each time the waitqueue @wq is woken up.
829  *
830  * wake_up() has to be called after changing any variable that could
831  * change the result of the wait condition.
832  *
833  * This is supposed to be called while holding the lock. The lock is
834  * dropped before invoking the cmd and going to sleep and is reacquired
835  * afterwards.
836  *
837  * The macro will return -ERESTARTSYS if it was interrupted by a signal
838  * and 0 if @condition evaluated to true.
839  */
840 #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
841 ({                                                                      \
842         int __ret = 0;                                                  \
843         if (!(condition))                                               \
844                 __ret = __wait_event_interruptible_lock_irq(wq,         \
845                                                 condition, lock, cmd);  \
846         __ret;                                                          \
847 })
848
849 /**
850  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
851  *              The condition is checked under the lock. This is expected
852  *              to be called with the lock taken.
853  * @wq: the waitqueue to wait on
854  * @condition: a C expression for the event to wait for
855  * @lock: a locked spinlock_t, which will be released before schedule()
856  *        and reacquired afterwards.
857  *
858  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
859  * @condition evaluates to true or signal is received. The @condition is
860  * checked each time the waitqueue @wq is woken up.
861  *
862  * wake_up() has to be called after changing any variable that could
863  * change the result of the wait condition.
864  *
865  * This is supposed to be called while holding the lock. The lock is
866  * dropped before going to sleep and is reacquired afterwards.
867  *
868  * The macro will return -ERESTARTSYS if it was interrupted by a signal
869  * and 0 if @condition evaluated to true.
870  */
871 #define wait_event_interruptible_lock_irq(wq, condition, lock)          \
872 ({                                                                      \
873         int __ret = 0;                                                  \
874         if (!(condition))                                               \
875                 __ret = __wait_event_interruptible_lock_irq(wq,         \
876                                                 condition, lock,);      \
877         __ret;                                                          \
878 })
879
880 #define __wait_event_interruptible_lock_irq_timeout(wq, condition,      \
881                                                     lock, timeout)      \
882         ___wait_event(wq, ___wait_cond_timeout(condition),              \
883                       TASK_INTERRUPTIBLE, 0, timeout,                   \
884                       spin_unlock_irq(&lock);                           \
885                       __ret = schedule_timeout(__ret);                  \
886                       spin_lock_irq(&lock));
887
888 /**
889  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
890  *              true or a timeout elapses. The condition is checked under
891  *              the lock. This is expected to be called with the lock taken.
892  * @wq: the waitqueue to wait on
893  * @condition: a C expression for the event to wait for
894  * @lock: a locked spinlock_t, which will be released before schedule()
895  *        and reacquired afterwards.
896  * @timeout: timeout, in jiffies
897  *
898  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
899  * @condition evaluates to true or signal is received. The @condition is
900  * checked each time the waitqueue @wq is woken up.
901  *
902  * wake_up() has to be called after changing any variable that could
903  * change the result of the wait condition.
904  *
905  * This is supposed to be called while holding the lock. The lock is
906  * dropped before going to sleep and is reacquired afterwards.
907  *
908  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
909  * was interrupted by a signal, and the remaining jiffies otherwise
910  * if the condition evaluated to true before the timeout elapsed.
911  */
912 #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock,  \
913                                                   timeout)              \
914 ({                                                                      \
915         long __ret = timeout;                                           \
916         if (!___wait_cond_timeout(condition))                           \
917                 __ret = __wait_event_interruptible_lock_irq_timeout(    \
918                                         wq, condition, lock, timeout);  \
919         __ret;                                                          \
920 })
921
922 /*
923  * Waitqueues which are removed from the waitqueue_head at wakeup time
924  */
925 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
926 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
927 long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
928 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
929 void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
930 long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
931 int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
932 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
933 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
934
935 #define DEFINE_WAIT_FUNC(name, function)                                \
936         wait_queue_t name = {                                           \
937                 .private        = current,                              \
938                 .func           = function,                             \
939                 .task_list      = LIST_HEAD_INIT((name).task_list),     \
940         }
941
942 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
943
944 #define DEFINE_WAIT_BIT(name, word, bit)                                \
945         struct wait_bit_queue name = {                                  \
946                 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit),           \
947                 .wait   = {                                             \
948                         .private        = current,                      \
949                         .func           = wake_bit_function,            \
950                         .task_list      =                               \
951                                 LIST_HEAD_INIT((name).wait.task_list),  \
952                 },                                                      \
953         }
954
955 #define init_wait(wait)                                                 \
956         do {                                                            \
957                 (wait)->private = current;                              \
958                 (wait)->func = autoremove_wake_function;                \
959                 INIT_LIST_HEAD(&(wait)->task_list);                     \
960                 (wait)->flags = 0;                                      \
961         } while (0)
962
963
964 extern int bit_wait(struct wait_bit_key *);
965 extern int bit_wait_io(struct wait_bit_key *);
966 extern int bit_wait_timeout(struct wait_bit_key *);
967 extern int bit_wait_io_timeout(struct wait_bit_key *);
968
969 /**
970  * wait_on_bit - wait for a bit to be cleared
971  * @word: the word being waited on, a kernel virtual address
972  * @bit: the bit of the word being waited on
973  * @mode: the task state to sleep in
974  *
975  * There is a standard hashed waitqueue table for generic use. This
976  * is the part of the hashtable's accessor API that waits on a bit.
977  * For instance, if one were to have waiters on a bitflag, one would
978  * call wait_on_bit() in threads waiting for the bit to clear.
979  * One uses wait_on_bit() where one is waiting for the bit to clear,
980  * but has no intention of setting it.
981  * Returned value will be zero if the bit was cleared, or non-zero
982  * if the process received a signal and the mode permitted wakeup
983  * on that signal.
984  */
985 static inline int
986 wait_on_bit(unsigned long *word, int bit, unsigned mode)
987 {
988         might_sleep();
989         if (!test_bit(bit, word))
990                 return 0;
991         return out_of_line_wait_on_bit(word, bit,
992                                        bit_wait,
993                                        mode);
994 }
995
996 /**
997  * wait_on_bit_io - wait for a bit to be cleared
998  * @word: the word being waited on, a kernel virtual address
999  * @bit: the bit of the word being waited on
1000  * @mode: the task state to sleep in
1001  *
1002  * Use the standard hashed waitqueue table to wait for a bit
1003  * to be cleared.  This is similar to wait_on_bit(), but calls
1004  * io_schedule() instead of schedule() for the actual waiting.
1005  *
1006  * Returned value will be zero if the bit was cleared, or non-zero
1007  * if the process received a signal and the mode permitted wakeup
1008  * on that signal.
1009  */
1010 static inline int
1011 wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
1012 {
1013         might_sleep();
1014         if (!test_bit(bit, word))
1015                 return 0;
1016         return out_of_line_wait_on_bit(word, bit,
1017                                        bit_wait_io,
1018                                        mode);
1019 }
1020
1021 /**
1022  * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
1023  * @word: the word being waited on, a kernel virtual address
1024  * @bit: the bit of the word being waited on
1025  * @mode: the task state to sleep in
1026  * @timeout: timeout, in jiffies
1027  *
1028  * Use the standard hashed waitqueue table to wait for a bit
1029  * to be cleared. This is similar to wait_on_bit(), except also takes a
1030  * timeout parameter.
1031  *
1032  * Returned value will be zero if the bit was cleared before the
1033  * @timeout elapsed, or non-zero if the @timeout elapsed or process
1034  * received a signal and the mode permitted wakeup on that signal.
1035  */
1036 static inline int
1037 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
1038                     unsigned long timeout)
1039 {
1040         might_sleep();
1041         if (!test_bit(bit, word))
1042                 return 0;
1043         return out_of_line_wait_on_bit_timeout(word, bit,
1044                                                bit_wait_timeout,
1045                                                mode, timeout);
1046 }
1047
1048 /**
1049  * wait_on_bit_action - wait for a bit to be cleared
1050  * @word: the word being waited on, a kernel virtual address
1051  * @bit: the bit of the word being waited on
1052  * @action: the function used to sleep, which may take special actions
1053  * @mode: the task state to sleep in
1054  *
1055  * Use the standard hashed waitqueue table to wait for a bit
1056  * to be cleared, and allow the waiting action to be specified.
1057  * This is like wait_on_bit() but allows fine control of how the waiting
1058  * is done.
1059  *
1060  * Returned value will be zero if the bit was cleared, or non-zero
1061  * if the process received a signal and the mode permitted wakeup
1062  * on that signal.
1063  */
1064 static inline int
1065 wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
1066                    unsigned mode)
1067 {
1068         might_sleep();
1069         if (!test_bit(bit, word))
1070                 return 0;
1071         return out_of_line_wait_on_bit(word, bit, action, mode);
1072 }
1073
1074 /**
1075  * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
1076  * @word: the word being waited on, a kernel virtual address
1077  * @bit: the bit of the word being waited on
1078  * @mode: the task state to sleep in
1079  *
1080  * There is a standard hashed waitqueue table for generic use. This
1081  * is the part of the hashtable's accessor API that waits on a bit
1082  * when one intends to set it, for instance, trying to lock bitflags.
1083  * For instance, if one were to have waiters trying to set bitflag
1084  * and waiting for it to clear before setting it, one would call
1085  * wait_on_bit() in threads waiting to be able to set the bit.
1086  * One uses wait_on_bit_lock() where one is waiting for the bit to
1087  * clear with the intention of setting it, and when done, clearing it.
1088  *
1089  * Returns zero if the bit was (eventually) found to be clear and was
1090  * set.  Returns non-zero if a signal was delivered to the process and
1091  * the @mode allows that signal to wake the process.
1092  */
1093 static inline int
1094 wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
1095 {
1096         might_sleep();
1097         if (!test_and_set_bit(bit, word))
1098                 return 0;
1099         return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
1100 }
1101
1102 /**
1103  * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
1104  * @word: the word being waited on, a kernel virtual address
1105  * @bit: the bit of the word being waited on
1106  * @mode: the task state to sleep in
1107  *
1108  * Use the standard hashed waitqueue table to wait for a bit
1109  * to be cleared and then to atomically set it.  This is similar
1110  * to wait_on_bit(), but calls io_schedule() instead of schedule()
1111  * for the actual waiting.
1112  *
1113  * Returns zero if the bit was (eventually) found to be clear and was
1114  * set.  Returns non-zero if a signal was delivered to the process and
1115  * the @mode allows that signal to wake the process.
1116  */
1117 static inline int
1118 wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
1119 {
1120         might_sleep();
1121         if (!test_and_set_bit(bit, word))
1122                 return 0;
1123         return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
1124 }
1125
1126 /**
1127  * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
1128  * @word: the word being waited on, a kernel virtual address
1129  * @bit: the bit of the word being waited on
1130  * @action: the function used to sleep, which may take special actions
1131  * @mode: the task state to sleep in
1132  *
1133  * Use the standard hashed waitqueue table to wait for a bit
1134  * to be cleared and then to set it, and allow the waiting action
1135  * to be specified.
1136  * This is like wait_on_bit() but allows fine control of how the waiting
1137  * is done.
1138  *
1139  * Returns zero if the bit was (eventually) found to be clear and was
1140  * set.  Returns non-zero if a signal was delivered to the process and
1141  * the @mode allows that signal to wake the process.
1142  */
1143 static inline int
1144 wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
1145                         unsigned mode)
1146 {
1147         might_sleep();
1148         if (!test_and_set_bit(bit, word))
1149                 return 0;
1150         return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1151 }
1152
1153 /**
1154  * wait_on_atomic_t - Wait for an atomic_t to become 0
1155  * @val: The atomic value being waited on, a kernel virtual address
1156  * @action: the function used to sleep, which may take special actions
1157  * @mode: the task state to sleep in
1158  *
1159  * Wait for an atomic_t to become 0.  We abuse the bit-wait waitqueue table for
1160  * the purpose of getting a waitqueue, but we set the key to a bit number
1161  * outside of the target 'word'.
1162  */
1163 static inline
1164 int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1165 {
1166         might_sleep();
1167         if (atomic_read(val) == 0)
1168                 return 0;
1169         return out_of_line_wait_on_atomic_t(val, action, mode);
1170 }
1171
1172 #endif /* _LINUX_WAIT_H */