]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - kernel/futex.c
futex: Make unlock_pi more robust
[karo-tx-linux.git] / kernel / futex.c
1 /*
2  *  Fast Userspace Mutexes (which I call "Futexes!").
3  *  (C) Rusty Russell, IBM 2002
4  *
5  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7  *
8  *  Removed page pinning, fix privately mapped COW pages and other cleanups
9  *  (C) Copyright 2003, 2004 Jamie Lokier
10  *
11  *  Robust futex support started by Ingo Molnar
12  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14  *
15  *  PI-futex support started by Ingo Molnar and Thomas Gleixner
16  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18  *
19  *  PRIVATE futexes by Eric Dumazet
20  *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21  *
22  *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23  *  Copyright (C) IBM Corporation, 2009
24  *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
25  *
26  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27  *  enough at me, Linus for the original (flawed) idea, Matthew
28  *  Kirkwood for proof-of-concept implementation.
29  *
30  *  "The futexes are also cursed."
31  *  "But they come in a choice of three flavours!"
32  *
33  *  This program is free software; you can redistribute it and/or modify
34  *  it under the terms of the GNU General Public License as published by
35  *  the Free Software Foundation; either version 2 of the License, or
36  *  (at your option) any later version.
37  *
38  *  This program is distributed in the hope that it will be useful,
39  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
40  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
41  *  GNU General Public License for more details.
42  *
43  *  You should have received a copy of the GNU General Public License
44  *  along with this program; if not, write to the Free Software
45  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
46  */
47 #include <linux/slab.h>
48 #include <linux/poll.h>
49 #include <linux/fs.h>
50 #include <linux/file.h>
51 #include <linux/jhash.h>
52 #include <linux/init.h>
53 #include <linux/futex.h>
54 #include <linux/mount.h>
55 #include <linux/pagemap.h>
56 #include <linux/syscalls.h>
57 #include <linux/signal.h>
58 #include <linux/export.h>
59 #include <linux/magic.h>
60 #include <linux/pid.h>
61 #include <linux/nsproxy.h>
62 #include <linux/ptrace.h>
63 #include <linux/sched/rt.h>
64 #include <linux/hugetlb.h>
65 #include <linux/freezer.h>
66 #include <linux/bootmem.h>
67
68 #include <asm/futex.h>
69
70 #include "locking/rtmutex_common.h"
71
72 /*
73  * READ this before attempting to hack on futexes!
74  *
75  * Basic futex operation and ordering guarantees
76  * =============================================
77  *
78  * The waiter reads the futex value in user space and calls
79  * futex_wait(). This function computes the hash bucket and acquires
80  * the hash bucket lock. After that it reads the futex user space value
81  * again and verifies that the data has not changed. If it has not changed
82  * it enqueues itself into the hash bucket, releases the hash bucket lock
83  * and schedules.
84  *
85  * The waker side modifies the user space value of the futex and calls
86  * futex_wake(). This function computes the hash bucket and acquires the
87  * hash bucket lock. Then it looks for waiters on that futex in the hash
88  * bucket and wakes them.
89  *
90  * In futex wake up scenarios where no tasks are blocked on a futex, taking
91  * the hb spinlock can be avoided and simply return. In order for this
92  * optimization to work, ordering guarantees must exist so that the waiter
93  * being added to the list is acknowledged when the list is concurrently being
94  * checked by the waker, avoiding scenarios like the following:
95  *
96  * CPU 0                               CPU 1
97  * val = *futex;
98  * sys_futex(WAIT, futex, val);
99  *   futex_wait(futex, val);
100  *   uval = *futex;
101  *                                     *futex = newval;
102  *                                     sys_futex(WAKE, futex);
103  *                                       futex_wake(futex);
104  *                                       if (queue_empty())
105  *                                         return;
106  *   if (uval == val)
107  *      lock(hash_bucket(futex));
108  *      queue();
109  *     unlock(hash_bucket(futex));
110  *     schedule();
111  *
112  * This would cause the waiter on CPU 0 to wait forever because it
113  * missed the transition of the user space value from val to newval
114  * and the waker did not find the waiter in the hash bucket queue.
115  *
116  * The correct serialization ensures that a waiter either observes
117  * the changed user space value before blocking or is woken by a
118  * concurrent waker:
119  *
120  * CPU 0                                 CPU 1
121  * val = *futex;
122  * sys_futex(WAIT, futex, val);
123  *   futex_wait(futex, val);
124  *
125  *   waiters++; (a)
126  *   mb(); (A) <-- paired with -.
127  *                              |
128  *   lock(hash_bucket(futex));  |
129  *                              |
130  *   uval = *futex;             |
131  *                              |        *futex = newval;
132  *                              |        sys_futex(WAKE, futex);
133  *                              |          futex_wake(futex);
134  *                              |
135  *                              `------->  mb(); (B)
136  *   if (uval == val)
137  *     queue();
138  *     unlock(hash_bucket(futex));
139  *     schedule();                         if (waiters)
140  *                                           lock(hash_bucket(futex));
141  *   else                                    wake_waiters(futex);
142  *     waiters--; (b)                        unlock(hash_bucket(futex));
143  *
144  * Where (A) orders the waiters increment and the futex value read through
145  * atomic operations (see hb_waiters_inc) and where (B) orders the write
146  * to futex and the waiters read -- this is done by the barriers in
147  * get_futex_key_refs(), through either ihold or atomic_inc, depending on the
148  * futex type.
149  *
150  * This yields the following case (where X:=waiters, Y:=futex):
151  *
152  *      X = Y = 0
153  *
154  *      w[X]=1          w[Y]=1
155  *      MB              MB
156  *      r[Y]=y          r[X]=x
157  *
158  * Which guarantees that x==0 && y==0 is impossible; which translates back into
159  * the guarantee that we cannot both miss the futex variable change and the
160  * enqueue.
161  *
162  * Note that a new waiter is accounted for in (a) even when it is possible that
163  * the wait call can return error, in which case we backtrack from it in (b).
164  * Refer to the comment in queue_lock().
165  *
166  * Similarly, in order to account for waiters being requeued on another
167  * address we always increment the waiters for the destination bucket before
168  * acquiring the lock. It then decrements them again  after releasing it -
169  * the code that actually moves the futex(es) between hash buckets (requeue_futex)
170  * will do the additional required waiter count housekeeping. This is done for
171  * double_lock_hb() and double_unlock_hb(), respectively.
172  */
173
174 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
175 int __read_mostly futex_cmpxchg_enabled;
176 #endif
177
178 /*
179  * Futex flags used to encode options to functions and preserve them across
180  * restarts.
181  */
182 #define FLAGS_SHARED            0x01
183 #define FLAGS_CLOCKRT           0x02
184 #define FLAGS_HAS_TIMEOUT       0x04
185
186 /*
187  * Priority Inheritance state:
188  */
189 struct futex_pi_state {
190         /*
191          * list of 'owned' pi_state instances - these have to be
192          * cleaned up in do_exit() if the task exits prematurely:
193          */
194         struct list_head list;
195
196         /*
197          * The PI object:
198          */
199         struct rt_mutex pi_mutex;
200
201         struct task_struct *owner;
202         atomic_t refcount;
203
204         union futex_key key;
205 };
206
207 /**
208  * struct futex_q - The hashed futex queue entry, one per waiting task
209  * @list:               priority-sorted list of tasks waiting on this futex
210  * @task:               the task waiting on the futex
211  * @lock_ptr:           the hash bucket lock
212  * @key:                the key the futex is hashed on
213  * @pi_state:           optional priority inheritance state
214  * @rt_waiter:          rt_waiter storage for use with requeue_pi
215  * @requeue_pi_key:     the requeue_pi target futex key
216  * @bitset:             bitset for the optional bitmasked wakeup
217  *
218  * We use this hashed waitqueue, instead of a normal wait_queue_t, so
219  * we can wake only the relevant ones (hashed queues may be shared).
220  *
221  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
222  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
223  * The order of wakeup is always to make the first condition true, then
224  * the second.
225  *
226  * PI futexes are typically woken before they are removed from the hash list via
227  * the rt_mutex code. See unqueue_me_pi().
228  */
229 struct futex_q {
230         struct plist_node list;
231
232         struct task_struct *task;
233         spinlock_t *lock_ptr;
234         union futex_key key;
235         struct futex_pi_state *pi_state;
236         struct rt_mutex_waiter *rt_waiter;
237         union futex_key *requeue_pi_key;
238         u32 bitset;
239 };
240
241 static const struct futex_q futex_q_init = {
242         /* list gets initialized in queue_me()*/
243         .key = FUTEX_KEY_INIT,
244         .bitset = FUTEX_BITSET_MATCH_ANY
245 };
246
247 /*
248  * Hash buckets are shared by all the futex_keys that hash to the same
249  * location.  Each key may have multiple futex_q structures, one for each task
250  * waiting on a futex.
251  */
252 struct futex_hash_bucket {
253         atomic_t waiters;
254         spinlock_t lock;
255         struct plist_head chain;
256 } ____cacheline_aligned_in_smp;
257
258 static unsigned long __read_mostly futex_hashsize;
259
260 static struct futex_hash_bucket *futex_queues;
261
262 static inline void futex_get_mm(union futex_key *key)
263 {
264         atomic_inc(&key->private.mm->mm_count);
265         /*
266          * Ensure futex_get_mm() implies a full barrier such that
267          * get_futex_key() implies a full barrier. This is relied upon
268          * as full barrier (B), see the ordering comment above.
269          */
270         smp_mb__after_atomic();
271 }
272
273 /*
274  * Reflects a new waiter being added to the waitqueue.
275  */
276 static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
277 {
278 #ifdef CONFIG_SMP
279         atomic_inc(&hb->waiters);
280         /*
281          * Full barrier (A), see the ordering comment above.
282          */
283         smp_mb__after_atomic();
284 #endif
285 }
286
287 /*
288  * Reflects a waiter being removed from the waitqueue by wakeup
289  * paths.
290  */
291 static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
292 {
293 #ifdef CONFIG_SMP
294         atomic_dec(&hb->waiters);
295 #endif
296 }
297
298 static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
299 {
300 #ifdef CONFIG_SMP
301         return atomic_read(&hb->waiters);
302 #else
303         return 1;
304 #endif
305 }
306
307 /*
308  * We hash on the keys returned from get_futex_key (see below).
309  */
310 static struct futex_hash_bucket *hash_futex(union futex_key *key)
311 {
312         u32 hash = jhash2((u32*)&key->both.word,
313                           (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
314                           key->both.offset);
315         return &futex_queues[hash & (futex_hashsize - 1)];
316 }
317
318 /*
319  * Return 1 if two futex_keys are equal, 0 otherwise.
320  */
321 static inline int match_futex(union futex_key *key1, union futex_key *key2)
322 {
323         return (key1 && key2
324                 && key1->both.word == key2->both.word
325                 && key1->both.ptr == key2->both.ptr
326                 && key1->both.offset == key2->both.offset);
327 }
328
329 /*
330  * Take a reference to the resource addressed by a key.
331  * Can be called while holding spinlocks.
332  *
333  */
334 static void get_futex_key_refs(union futex_key *key)
335 {
336         if (!key->both.ptr)
337                 return;
338
339         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
340         case FUT_OFF_INODE:
341                 ihold(key->shared.inode); /* implies MB (B) */
342                 break;
343         case FUT_OFF_MMSHARED:
344                 futex_get_mm(key); /* implies MB (B) */
345                 break;
346         }
347 }
348
349 /*
350  * Drop a reference to the resource addressed by a key.
351  * The hash bucket spinlock must not be held.
352  */
353 static void drop_futex_key_refs(union futex_key *key)
354 {
355         if (!key->both.ptr) {
356                 /* If we're here then we tried to put a key we failed to get */
357                 WARN_ON_ONCE(1);
358                 return;
359         }
360
361         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
362         case FUT_OFF_INODE:
363                 iput(key->shared.inode);
364                 break;
365         case FUT_OFF_MMSHARED:
366                 mmdrop(key->private.mm);
367                 break;
368         }
369 }
370
371 /**
372  * get_futex_key() - Get parameters which are the keys for a futex
373  * @uaddr:      virtual address of the futex
374  * @fshared:    0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
375  * @key:        address where result is stored.
376  * @rw:         mapping needs to be read/write (values: VERIFY_READ,
377  *              VERIFY_WRITE)
378  *
379  * Return: a negative error code or 0
380  *
381  * The key words are stored in *key on success.
382  *
383  * For shared mappings, it's (page->index, file_inode(vma->vm_file),
384  * offset_within_page).  For private mappings, it's (uaddr, current->mm).
385  * We can usually work out the index without swapping in the page.
386  *
387  * lock_page() might sleep, the caller should not hold a spinlock.
388  */
389 static int
390 get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
391 {
392         unsigned long address = (unsigned long)uaddr;
393         struct mm_struct *mm = current->mm;
394         struct page *page, *page_head;
395         int err, ro = 0;
396
397         /*
398          * The futex address must be "naturally" aligned.
399          */
400         key->both.offset = address % PAGE_SIZE;
401         if (unlikely((address % sizeof(u32)) != 0))
402                 return -EINVAL;
403         address -= key->both.offset;
404
405         if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
406                 return -EFAULT;
407
408         /*
409          * PROCESS_PRIVATE futexes are fast.
410          * As the mm cannot disappear under us and the 'key' only needs
411          * virtual address, we dont even have to find the underlying vma.
412          * Note : We do have to check 'uaddr' is a valid user address,
413          *        but access_ok() should be faster than find_vma()
414          */
415         if (!fshared) {
416                 key->private.mm = mm;
417                 key->private.address = address;
418                 get_futex_key_refs(key);  /* implies MB (B) */
419                 return 0;
420         }
421
422 again:
423         err = get_user_pages_fast(address, 1, 1, &page);
424         /*
425          * If write access is not required (eg. FUTEX_WAIT), try
426          * and get read-only access.
427          */
428         if (err == -EFAULT && rw == VERIFY_READ) {
429                 err = get_user_pages_fast(address, 1, 0, &page);
430                 ro = 1;
431         }
432         if (err < 0)
433                 return err;
434         else
435                 err = 0;
436
437 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
438         page_head = page;
439         if (unlikely(PageTail(page))) {
440                 put_page(page);
441                 /* serialize against __split_huge_page_splitting() */
442                 local_irq_disable();
443                 if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
444                         page_head = compound_head(page);
445                         /*
446                          * page_head is valid pointer but we must pin
447                          * it before taking the PG_lock and/or
448                          * PG_compound_lock. The moment we re-enable
449                          * irqs __split_huge_page_splitting() can
450                          * return and the head page can be freed from
451                          * under us. We can't take the PG_lock and/or
452                          * PG_compound_lock on a page that could be
453                          * freed from under us.
454                          */
455                         if (page != page_head) {
456                                 get_page(page_head);
457                                 put_page(page);
458                         }
459                         local_irq_enable();
460                 } else {
461                         local_irq_enable();
462                         goto again;
463                 }
464         }
465 #else
466         page_head = compound_head(page);
467         if (page != page_head) {
468                 get_page(page_head);
469                 put_page(page);
470         }
471 #endif
472
473         lock_page(page_head);
474
475         /*
476          * If page_head->mapping is NULL, then it cannot be a PageAnon
477          * page; but it might be the ZERO_PAGE or in the gate area or
478          * in a special mapping (all cases which we are happy to fail);
479          * or it may have been a good file page when get_user_pages_fast
480          * found it, but truncated or holepunched or subjected to
481          * invalidate_complete_page2 before we got the page lock (also
482          * cases which we are happy to fail).  And we hold a reference,
483          * so refcount care in invalidate_complete_page's remove_mapping
484          * prevents drop_caches from setting mapping to NULL beneath us.
485          *
486          * The case we do have to guard against is when memory pressure made
487          * shmem_writepage move it from filecache to swapcache beneath us:
488          * an unlikely race, but we do need to retry for page_head->mapping.
489          */
490         if (!page_head->mapping) {
491                 int shmem_swizzled = PageSwapCache(page_head);
492                 unlock_page(page_head);
493                 put_page(page_head);
494                 if (shmem_swizzled)
495                         goto again;
496                 return -EFAULT;
497         }
498
499         /*
500          * Private mappings are handled in a simple way.
501          *
502          * NOTE: When userspace waits on a MAP_SHARED mapping, even if
503          * it's a read-only handle, it's expected that futexes attach to
504          * the object not the particular process.
505          */
506         if (PageAnon(page_head)) {
507                 /*
508                  * A RO anonymous page will never change and thus doesn't make
509                  * sense for futex operations.
510                  */
511                 if (ro) {
512                         err = -EFAULT;
513                         goto out;
514                 }
515
516                 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
517                 key->private.mm = mm;
518                 key->private.address = address;
519         } else {
520                 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
521                 key->shared.inode = page_head->mapping->host;
522                 key->shared.pgoff = basepage_index(page);
523         }
524
525         get_futex_key_refs(key); /* implies MB (B) */
526
527 out:
528         unlock_page(page_head);
529         put_page(page_head);
530         return err;
531 }
532
533 static inline void put_futex_key(union futex_key *key)
534 {
535         drop_futex_key_refs(key);
536 }
537
538 /**
539  * fault_in_user_writeable() - Fault in user address and verify RW access
540  * @uaddr:      pointer to faulting user space address
541  *
542  * Slow path to fixup the fault we just took in the atomic write
543  * access to @uaddr.
544  *
545  * We have no generic implementation of a non-destructive write to the
546  * user address. We know that we faulted in the atomic pagefault
547  * disabled section so we can as well avoid the #PF overhead by
548  * calling get_user_pages() right away.
549  */
550 static int fault_in_user_writeable(u32 __user *uaddr)
551 {
552         struct mm_struct *mm = current->mm;
553         int ret;
554
555         down_read(&mm->mmap_sem);
556         ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
557                                FAULT_FLAG_WRITE);
558         up_read(&mm->mmap_sem);
559
560         return ret < 0 ? ret : 0;
561 }
562
563 /**
564  * futex_top_waiter() - Return the highest priority waiter on a futex
565  * @hb:         the hash bucket the futex_q's reside in
566  * @key:        the futex key (to distinguish it from other futex futex_q's)
567  *
568  * Must be called with the hb lock held.
569  */
570 static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
571                                         union futex_key *key)
572 {
573         struct futex_q *this;
574
575         plist_for_each_entry(this, &hb->chain, list) {
576                 if (match_futex(&this->key, key))
577                         return this;
578         }
579         return NULL;
580 }
581
582 static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
583                                       u32 uval, u32 newval)
584 {
585         int ret;
586
587         pagefault_disable();
588         ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
589         pagefault_enable();
590
591         return ret;
592 }
593
594 static int get_futex_value_locked(u32 *dest, u32 __user *from)
595 {
596         int ret;
597
598         pagefault_disable();
599         ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
600         pagefault_enable();
601
602         return ret ? -EFAULT : 0;
603 }
604
605
606 /*
607  * PI code:
608  */
609 static int refill_pi_state_cache(void)
610 {
611         struct futex_pi_state *pi_state;
612
613         if (likely(current->pi_state_cache))
614                 return 0;
615
616         pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
617
618         if (!pi_state)
619                 return -ENOMEM;
620
621         INIT_LIST_HEAD(&pi_state->list);
622         /* pi_mutex gets initialized later */
623         pi_state->owner = NULL;
624         atomic_set(&pi_state->refcount, 1);
625         pi_state->key = FUTEX_KEY_INIT;
626
627         current->pi_state_cache = pi_state;
628
629         return 0;
630 }
631
632 static struct futex_pi_state * alloc_pi_state(void)
633 {
634         struct futex_pi_state *pi_state = current->pi_state_cache;
635
636         WARN_ON(!pi_state);
637         current->pi_state_cache = NULL;
638
639         return pi_state;
640 }
641
642 static void free_pi_state(struct futex_pi_state *pi_state)
643 {
644         if (!atomic_dec_and_test(&pi_state->refcount))
645                 return;
646
647         /*
648          * If pi_state->owner is NULL, the owner is most probably dying
649          * and has cleaned up the pi_state already
650          */
651         if (pi_state->owner) {
652                 raw_spin_lock_irq(&pi_state->owner->pi_lock);
653                 list_del_init(&pi_state->list);
654                 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
655
656                 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
657         }
658
659         if (current->pi_state_cache)
660                 kfree(pi_state);
661         else {
662                 /*
663                  * pi_state->list is already empty.
664                  * clear pi_state->owner.
665                  * refcount is at 0 - put it back to 1.
666                  */
667                 pi_state->owner = NULL;
668                 atomic_set(&pi_state->refcount, 1);
669                 current->pi_state_cache = pi_state;
670         }
671 }
672
673 /*
674  * Look up the task based on what TID userspace gave us.
675  * We dont trust it.
676  */
677 static struct task_struct * futex_find_get_task(pid_t pid)
678 {
679         struct task_struct *p;
680
681         rcu_read_lock();
682         p = find_task_by_vpid(pid);
683         if (p)
684                 get_task_struct(p);
685
686         rcu_read_unlock();
687
688         return p;
689 }
690
691 /*
692  * This task is holding PI mutexes at exit time => bad.
693  * Kernel cleans up PI-state, but userspace is likely hosed.
694  * (Robust-futex cleanup is separate and might save the day for userspace.)
695  */
696 void exit_pi_state_list(struct task_struct *curr)
697 {
698         struct list_head *next, *head = &curr->pi_state_list;
699         struct futex_pi_state *pi_state;
700         struct futex_hash_bucket *hb;
701         union futex_key key = FUTEX_KEY_INIT;
702
703         if (!futex_cmpxchg_enabled)
704                 return;
705         /*
706          * We are a ZOMBIE and nobody can enqueue itself on
707          * pi_state_list anymore, but we have to be careful
708          * versus waiters unqueueing themselves:
709          */
710         raw_spin_lock_irq(&curr->pi_lock);
711         while (!list_empty(head)) {
712
713                 next = head->next;
714                 pi_state = list_entry(next, struct futex_pi_state, list);
715                 key = pi_state->key;
716                 hb = hash_futex(&key);
717                 raw_spin_unlock_irq(&curr->pi_lock);
718
719                 spin_lock(&hb->lock);
720
721                 raw_spin_lock_irq(&curr->pi_lock);
722                 /*
723                  * We dropped the pi-lock, so re-check whether this
724                  * task still owns the PI-state:
725                  */
726                 if (head->next != next) {
727                         spin_unlock(&hb->lock);
728                         continue;
729                 }
730
731                 WARN_ON(pi_state->owner != curr);
732                 WARN_ON(list_empty(&pi_state->list));
733                 list_del_init(&pi_state->list);
734                 pi_state->owner = NULL;
735                 raw_spin_unlock_irq(&curr->pi_lock);
736
737                 rt_mutex_unlock(&pi_state->pi_mutex);
738
739                 spin_unlock(&hb->lock);
740
741                 raw_spin_lock_irq(&curr->pi_lock);
742         }
743         raw_spin_unlock_irq(&curr->pi_lock);
744 }
745
746 /*
747  * We need to check the following states:
748  *
749  *      Waiter | pi_state | pi->owner | uTID      | uODIED | ?
750  *
751  * [1]  NULL   | ---      | ---       | 0         | 0/1    | Valid
752  * [2]  NULL   | ---      | ---       | >0        | 0/1    | Valid
753  *
754  * [3]  Found  | NULL     | --        | Any       | 0/1    | Invalid
755  *
756  * [4]  Found  | Found    | NULL      | 0         | 1      | Valid
757  * [5]  Found  | Found    | NULL      | >0        | 1      | Invalid
758  *
759  * [6]  Found  | Found    | task      | 0         | 1      | Valid
760  *
761  * [7]  Found  | Found    | NULL      | Any       | 0      | Invalid
762  *
763  * [8]  Found  | Found    | task      | ==taskTID | 0/1    | Valid
764  * [9]  Found  | Found    | task      | 0         | 0      | Invalid
765  * [10] Found  | Found    | task      | !=taskTID | 0/1    | Invalid
766  *
767  * [1]  Indicates that the kernel can acquire the futex atomically. We
768  *      came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
769  *
770  * [2]  Valid, if TID does not belong to a kernel thread. If no matching
771  *      thread is found then it indicates that the owner TID has died.
772  *
773  * [3]  Invalid. The waiter is queued on a non PI futex
774  *
775  * [4]  Valid state after exit_robust_list(), which sets the user space
776  *      value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
777  *
778  * [5]  The user space value got manipulated between exit_robust_list()
779  *      and exit_pi_state_list()
780  *
781  * [6]  Valid state after exit_pi_state_list() which sets the new owner in
782  *      the pi_state but cannot access the user space value.
783  *
784  * [7]  pi_state->owner can only be NULL when the OWNER_DIED bit is set.
785  *
786  * [8]  Owner and user space value match
787  *
788  * [9]  There is no transient state which sets the user space TID to 0
789  *      except exit_robust_list(), but this is indicated by the
790  *      FUTEX_OWNER_DIED bit. See [4]
791  *
792  * [10] There is no transient state which leaves owner and user space
793  *      TID out of sync.
794  */
795 static int
796 lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
797                 union futex_key *key, struct futex_pi_state **ps)
798 {
799         struct futex_pi_state *pi_state = NULL;
800         struct futex_q *this, *next;
801         struct task_struct *p;
802         pid_t pid = uval & FUTEX_TID_MASK;
803
804         plist_for_each_entry_safe(this, next, &hb->chain, list) {
805                 if (match_futex(&this->key, key)) {
806                         /*
807                          * Sanity check the waiter before increasing
808                          * the refcount and attaching to it.
809                          */
810                         pi_state = this->pi_state;
811                         /*
812                          * Userspace might have messed up non-PI and
813                          * PI futexes [3]
814                          */
815                         if (unlikely(!pi_state))
816                                 return -EINVAL;
817
818                         WARN_ON(!atomic_read(&pi_state->refcount));
819
820                         /*
821                          * Handle the owner died case:
822                          */
823                         if (uval & FUTEX_OWNER_DIED) {
824                                 /*
825                                  * exit_pi_state_list sets owner to NULL and
826                                  * wakes the topmost waiter. The task which
827                                  * acquires the pi_state->rt_mutex will fixup
828                                  * owner.
829                                  */
830                                 if (!pi_state->owner) {
831                                         /*
832                                          * No pi state owner, but the user
833                                          * space TID is not 0. Inconsistent
834                                          * state. [5]
835                                          */
836                                         if (pid)
837                                                 return -EINVAL;
838                                         /*
839                                          * Take a ref on the state and
840                                          * return. [4]
841                                          */
842                                         goto out_state;
843                                 }
844
845                                 /*
846                                  * If TID is 0, then either the dying owner
847                                  * has not yet executed exit_pi_state_list()
848                                  * or some waiter acquired the rtmutex in the
849                                  * pi state, but did not yet fixup the TID in
850                                  * user space.
851                                  *
852                                  * Take a ref on the state and return. [6]
853                                  */
854                                 if (!pid)
855                                         goto out_state;
856                         } else {
857                                 /*
858                                  * If the owner died bit is not set,
859                                  * then the pi_state must have an
860                                  * owner. [7]
861                                  */
862                                 if (!pi_state->owner)
863                                         return -EINVAL;
864                         }
865
866                         /*
867                          * Bail out if user space manipulated the
868                          * futex value. If pi state exists then the
869                          * owner TID must be the same as the user
870                          * space TID. [9/10]
871                          */
872                         if (pid != task_pid_vnr(pi_state->owner))
873                                 return -EINVAL;
874
875                 out_state:
876                         atomic_inc(&pi_state->refcount);
877                         *ps = pi_state;
878                         return 0;
879                 }
880         }
881
882         /*
883          * We are the first waiter - try to look up the real owner and attach
884          * the new pi_state to it, but bail out when TID = 0 [1]
885          */
886         if (!pid)
887                 return -ESRCH;
888         p = futex_find_get_task(pid);
889         if (!p)
890                 return -ESRCH;
891
892         if (!p->mm) {
893                 put_task_struct(p);
894                 return -EPERM;
895         }
896
897         /*
898          * We need to look at the task state flags to figure out,
899          * whether the task is exiting. To protect against the do_exit
900          * change of the task flags, we do this protected by
901          * p->pi_lock:
902          */
903         raw_spin_lock_irq(&p->pi_lock);
904         if (unlikely(p->flags & PF_EXITING)) {
905                 /*
906                  * The task is on the way out. When PF_EXITPIDONE is
907                  * set, we know that the task has finished the
908                  * cleanup:
909                  */
910                 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
911
912                 raw_spin_unlock_irq(&p->pi_lock);
913                 put_task_struct(p);
914                 return ret;
915         }
916
917         /*
918          * No existing pi state. First waiter. [2]
919          */
920         pi_state = alloc_pi_state();
921
922         /*
923          * Initialize the pi_mutex in locked state and make 'p'
924          * the owner of it:
925          */
926         rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
927
928         /* Store the key for possible exit cleanups: */
929         pi_state->key = *key;
930
931         WARN_ON(!list_empty(&pi_state->list));
932         list_add(&pi_state->list, &p->pi_state_list);
933         pi_state->owner = p;
934         raw_spin_unlock_irq(&p->pi_lock);
935
936         put_task_struct(p);
937
938         *ps = pi_state;
939
940         return 0;
941 }
942
943 /**
944  * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
945  * @uaddr:              the pi futex user address
946  * @hb:                 the pi futex hash bucket
947  * @key:                the futex key associated with uaddr and hb
948  * @ps:                 the pi_state pointer where we store the result of the
949  *                      lookup
950  * @task:               the task to perform the atomic lock work for.  This will
951  *                      be "current" except in the case of requeue pi.
952  * @set_waiters:        force setting the FUTEX_WAITERS bit (1) or not (0)
953  *
954  * Return:
955  *  0 - ready to wait;
956  *  1 - acquired the lock;
957  * <0 - error
958  *
959  * The hb->lock and futex_key refs shall be held by the caller.
960  */
961 static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
962                                 union futex_key *key,
963                                 struct futex_pi_state **ps,
964                                 struct task_struct *task, int set_waiters)
965 {
966         int lock_taken, ret, force_take = 0;
967         u32 uval, newval, curval, vpid = task_pid_vnr(task);
968
969 retry:
970         ret = lock_taken = 0;
971
972         /*
973          * To avoid races, we attempt to take the lock here again
974          * (by doing a 0 -> TID atomic cmpxchg), while holding all
975          * the locks. It will most likely not succeed.
976          */
977         newval = vpid;
978         if (set_waiters)
979                 newval |= FUTEX_WAITERS;
980
981         if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
982                 return -EFAULT;
983
984         /*
985          * Detect deadlocks.
986          */
987         if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
988                 return -EDEADLK;
989
990         /*
991          * Surprise - we got the lock, but we do not trust user space at all.
992          */
993         if (unlikely(!curval)) {
994                 /*
995                  * We verify whether there is kernel state for this
996                  * futex. If not, we can safely assume, that the 0 ->
997                  * TID transition is correct. If state exists, we do
998                  * not bother to fixup the user space state as it was
999                  * corrupted already.
1000                  */
1001                 return futex_top_waiter(hb, key) ? -EINVAL : 1;
1002         }
1003
1004         uval = curval;
1005
1006         /*
1007          * Set the FUTEX_WAITERS flag, so the owner will know it has someone
1008          * to wake at the next unlock.
1009          */
1010         newval = curval | FUTEX_WAITERS;
1011
1012         /*
1013          * Should we force take the futex? See below.
1014          */
1015         if (unlikely(force_take)) {
1016                 /*
1017                  * Keep the OWNER_DIED and the WAITERS bit and set the
1018                  * new TID value.
1019                  */
1020                 newval = (curval & ~FUTEX_TID_MASK) | vpid;
1021                 force_take = 0;
1022                 lock_taken = 1;
1023         }
1024
1025         if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
1026                 return -EFAULT;
1027         if (unlikely(curval != uval))
1028                 goto retry;
1029
1030         /*
1031          * We took the lock due to forced take over.
1032          */
1033         if (unlikely(lock_taken))
1034                 return 1;
1035
1036         /*
1037          * We dont have the lock. Look up the PI state (or create it if
1038          * we are the first waiter):
1039          */
1040         ret = lookup_pi_state(uval, hb, key, ps);
1041
1042         if (unlikely(ret)) {
1043                 switch (ret) {
1044                 case -ESRCH:
1045                         /*
1046                          * We failed to find an owner for this
1047                          * futex. So we have no pi_state to block
1048                          * on. This can happen in two cases:
1049                          *
1050                          * 1) The owner died
1051                          * 2) A stale FUTEX_WAITERS bit
1052                          *
1053                          * Re-read the futex value.
1054                          */
1055                         if (get_futex_value_locked(&curval, uaddr))
1056                                 return -EFAULT;
1057
1058                         /*
1059                          * If the owner died or we have a stale
1060                          * WAITERS bit the owner TID in the user space
1061                          * futex is 0.
1062                          */
1063                         if (!(curval & FUTEX_TID_MASK)) {
1064                                 force_take = 1;
1065                                 goto retry;
1066                         }
1067                 default:
1068                         break;
1069                 }
1070         }
1071
1072         return ret;
1073 }
1074
1075 /**
1076  * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
1077  * @q:  The futex_q to unqueue
1078  *
1079  * The q->lock_ptr must not be NULL and must be held by the caller.
1080  */
1081 static void __unqueue_futex(struct futex_q *q)
1082 {
1083         struct futex_hash_bucket *hb;
1084
1085         if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
1086             || WARN_ON(plist_node_empty(&q->list)))
1087                 return;
1088
1089         hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
1090         plist_del(&q->list, &hb->chain);
1091         hb_waiters_dec(hb);
1092 }
1093
1094 /*
1095  * The hash bucket lock must be held when this is called.
1096  * Afterwards, the futex_q must not be accessed.
1097  */
1098 static void wake_futex(struct futex_q *q)
1099 {
1100         struct task_struct *p = q->task;
1101
1102         if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1103                 return;
1104
1105         /*
1106          * We set q->lock_ptr = NULL _before_ we wake up the task. If
1107          * a non-futex wake up happens on another CPU then the task
1108          * might exit and p would dereference a non-existing task
1109          * struct. Prevent this by holding a reference on p across the
1110          * wake up.
1111          */
1112         get_task_struct(p);
1113
1114         __unqueue_futex(q);
1115         /*
1116          * The waiting task can free the futex_q as soon as
1117          * q->lock_ptr = NULL is written, without taking any locks. A
1118          * memory barrier is required here to prevent the following
1119          * store to lock_ptr from getting ahead of the plist_del.
1120          */
1121         smp_wmb();
1122         q->lock_ptr = NULL;
1123
1124         wake_up_state(p, TASK_NORMAL);
1125         put_task_struct(p);
1126 }
1127
1128 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
1129 {
1130         struct task_struct *new_owner;
1131         struct futex_pi_state *pi_state = this->pi_state;
1132         u32 uninitialized_var(curval), newval;
1133         int ret = 0;
1134
1135         if (!pi_state)
1136                 return -EINVAL;
1137
1138         /*
1139          * If current does not own the pi_state then the futex is
1140          * inconsistent and user space fiddled with the futex value.
1141          */
1142         if (pi_state->owner != current)
1143                 return -EINVAL;
1144
1145         raw_spin_lock(&pi_state->pi_mutex.wait_lock);
1146         new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1147
1148         /*
1149          * It is possible that the next waiter (the one that brought
1150          * this owner to the kernel) timed out and is no longer
1151          * waiting on the lock.
1152          */
1153         if (!new_owner)
1154                 new_owner = this->task;
1155
1156         /*
1157          * We pass it to the next owner. The WAITERS bit is always
1158          * kept enabled while there is PI state around. We cleanup the
1159          * owner died bit, because we are the owner.
1160          */
1161         newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1162
1163         if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
1164                 ret = -EFAULT;
1165         else if (curval != uval)
1166                 ret = -EINVAL;
1167         if (ret) {
1168                 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1169                 return ret;
1170         }
1171
1172         raw_spin_lock_irq(&pi_state->owner->pi_lock);
1173         WARN_ON(list_empty(&pi_state->list));
1174         list_del_init(&pi_state->list);
1175         raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1176
1177         raw_spin_lock_irq(&new_owner->pi_lock);
1178         WARN_ON(!list_empty(&pi_state->list));
1179         list_add(&pi_state->list, &new_owner->pi_state_list);
1180         pi_state->owner = new_owner;
1181         raw_spin_unlock_irq(&new_owner->pi_lock);
1182
1183         raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1184         rt_mutex_unlock(&pi_state->pi_mutex);
1185
1186         return 0;
1187 }
1188
1189 /*
1190  * Express the locking dependencies for lockdep:
1191  */
1192 static inline void
1193 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1194 {
1195         if (hb1 <= hb2) {
1196                 spin_lock(&hb1->lock);
1197                 if (hb1 < hb2)
1198                         spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
1199         } else { /* hb1 > hb2 */
1200                 spin_lock(&hb2->lock);
1201                 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
1202         }
1203 }
1204
1205 static inline void
1206 double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1207 {
1208         spin_unlock(&hb1->lock);
1209         if (hb1 != hb2)
1210                 spin_unlock(&hb2->lock);
1211 }
1212
1213 /*
1214  * Wake up waiters matching bitset queued on this futex (uaddr).
1215  */
1216 static int
1217 futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
1218 {
1219         struct futex_hash_bucket *hb;
1220         struct futex_q *this, *next;
1221         union futex_key key = FUTEX_KEY_INIT;
1222         int ret;
1223
1224         if (!bitset)
1225                 return -EINVAL;
1226
1227         ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
1228         if (unlikely(ret != 0))
1229                 goto out;
1230
1231         hb = hash_futex(&key);
1232
1233         /* Make sure we really have tasks to wakeup */
1234         if (!hb_waiters_pending(hb))
1235                 goto out_put_key;
1236
1237         spin_lock(&hb->lock);
1238
1239         plist_for_each_entry_safe(this, next, &hb->chain, list) {
1240                 if (match_futex (&this->key, &key)) {
1241                         if (this->pi_state || this->rt_waiter) {
1242                                 ret = -EINVAL;
1243                                 break;
1244                         }
1245
1246                         /* Check if one of the bits is set in both bitsets */
1247                         if (!(this->bitset & bitset))
1248                                 continue;
1249
1250                         wake_futex(this);
1251                         if (++ret >= nr_wake)
1252                                 break;
1253                 }
1254         }
1255
1256         spin_unlock(&hb->lock);
1257 out_put_key:
1258         put_futex_key(&key);
1259 out:
1260         return ret;
1261 }
1262
1263 /*
1264  * Wake up all waiters hashed on the physical page that is mapped
1265  * to this virtual address:
1266  */
1267 static int
1268 futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1269               int nr_wake, int nr_wake2, int op)
1270 {
1271         union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1272         struct futex_hash_bucket *hb1, *hb2;
1273         struct futex_q *this, *next;
1274         int ret, op_ret;
1275
1276 retry:
1277         ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1278         if (unlikely(ret != 0))
1279                 goto out;
1280         ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
1281         if (unlikely(ret != 0))
1282                 goto out_put_key1;
1283
1284         hb1 = hash_futex(&key1);
1285         hb2 = hash_futex(&key2);
1286
1287 retry_private:
1288         double_lock_hb(hb1, hb2);
1289         op_ret = futex_atomic_op_inuser(op, uaddr2);
1290         if (unlikely(op_ret < 0)) {
1291
1292                 double_unlock_hb(hb1, hb2);
1293
1294 #ifndef CONFIG_MMU
1295                 /*
1296                  * we don't get EFAULT from MMU faults if we don't have an MMU,
1297                  * but we might get them from range checking
1298                  */
1299                 ret = op_ret;
1300                 goto out_put_keys;
1301 #endif
1302
1303                 if (unlikely(op_ret != -EFAULT)) {
1304                         ret = op_ret;
1305                         goto out_put_keys;
1306                 }
1307
1308                 ret = fault_in_user_writeable(uaddr2);
1309                 if (ret)
1310                         goto out_put_keys;
1311
1312                 if (!(flags & FLAGS_SHARED))
1313                         goto retry_private;
1314
1315                 put_futex_key(&key2);
1316                 put_futex_key(&key1);
1317                 goto retry;
1318         }
1319
1320         plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1321                 if (match_futex (&this->key, &key1)) {
1322                         if (this->pi_state || this->rt_waiter) {
1323                                 ret = -EINVAL;
1324                                 goto out_unlock;
1325                         }
1326                         wake_futex(this);
1327                         if (++ret >= nr_wake)
1328                                 break;
1329                 }
1330         }
1331
1332         if (op_ret > 0) {
1333                 op_ret = 0;
1334                 plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1335                         if (match_futex (&this->key, &key2)) {
1336                                 if (this->pi_state || this->rt_waiter) {
1337                                         ret = -EINVAL;
1338                                         goto out_unlock;
1339                                 }
1340                                 wake_futex(this);
1341                                 if (++op_ret >= nr_wake2)
1342                                         break;
1343                         }
1344                 }
1345                 ret += op_ret;
1346         }
1347
1348 out_unlock:
1349         double_unlock_hb(hb1, hb2);
1350 out_put_keys:
1351         put_futex_key(&key2);
1352 out_put_key1:
1353         put_futex_key(&key1);
1354 out:
1355         return ret;
1356 }
1357
1358 /**
1359  * requeue_futex() - Requeue a futex_q from one hb to another
1360  * @q:          the futex_q to requeue
1361  * @hb1:        the source hash_bucket
1362  * @hb2:        the target hash_bucket
1363  * @key2:       the new key for the requeued futex_q
1364  */
1365 static inline
1366 void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1367                    struct futex_hash_bucket *hb2, union futex_key *key2)
1368 {
1369
1370         /*
1371          * If key1 and key2 hash to the same bucket, no need to
1372          * requeue.
1373          */
1374         if (likely(&hb1->chain != &hb2->chain)) {
1375                 plist_del(&q->list, &hb1->chain);
1376                 hb_waiters_dec(hb1);
1377                 plist_add(&q->list, &hb2->chain);
1378                 hb_waiters_inc(hb2);
1379                 q->lock_ptr = &hb2->lock;
1380         }
1381         get_futex_key_refs(key2);
1382         q->key = *key2;
1383 }
1384
1385 /**
1386  * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1387  * @q:          the futex_q
1388  * @key:        the key of the requeue target futex
1389  * @hb:         the hash_bucket of the requeue target futex
1390  *
1391  * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1392  * target futex if it is uncontended or via a lock steal.  Set the futex_q key
1393  * to the requeue target futex so the waiter can detect the wakeup on the right
1394  * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1395  * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock
1396  * to protect access to the pi_state to fixup the owner later.  Must be called
1397  * with both q->lock_ptr and hb->lock held.
1398  */
1399 static inline
1400 void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1401                            struct futex_hash_bucket *hb)
1402 {
1403         get_futex_key_refs(key);
1404         q->key = *key;
1405
1406         __unqueue_futex(q);
1407
1408         WARN_ON(!q->rt_waiter);
1409         q->rt_waiter = NULL;
1410
1411         q->lock_ptr = &hb->lock;
1412
1413         wake_up_state(q->task, TASK_NORMAL);
1414 }
1415
1416 /**
1417  * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1418  * @pifutex:            the user address of the to futex
1419  * @hb1:                the from futex hash bucket, must be locked by the caller
1420  * @hb2:                the to futex hash bucket, must be locked by the caller
1421  * @key1:               the from futex key
1422  * @key2:               the to futex key
1423  * @ps:                 address to store the pi_state pointer
1424  * @set_waiters:        force setting the FUTEX_WAITERS bit (1) or not (0)
1425  *
1426  * Try and get the lock on behalf of the top waiter if we can do it atomically.
1427  * Wake the top waiter if we succeed.  If the caller specified set_waiters,
1428  * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1429  * hb1 and hb2 must be held by the caller.
1430  *
1431  * Return:
1432  *  0 - failed to acquire the lock atomically;
1433  * >0 - acquired the lock, return value is vpid of the top_waiter
1434  * <0 - error
1435  */
1436 static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1437                                  struct futex_hash_bucket *hb1,
1438                                  struct futex_hash_bucket *hb2,
1439                                  union futex_key *key1, union futex_key *key2,
1440                                  struct futex_pi_state **ps, int set_waiters)
1441 {
1442         struct futex_q *top_waiter = NULL;
1443         u32 curval;
1444         int ret, vpid;
1445
1446         if (get_futex_value_locked(&curval, pifutex))
1447                 return -EFAULT;
1448
1449         /*
1450          * Find the top_waiter and determine if there are additional waiters.
1451          * If the caller intends to requeue more than 1 waiter to pifutex,
1452          * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1453          * as we have means to handle the possible fault.  If not, don't set
1454          * the bit unecessarily as it will force the subsequent unlock to enter
1455          * the kernel.
1456          */
1457         top_waiter = futex_top_waiter(hb1, key1);
1458
1459         /* There are no waiters, nothing for us to do. */
1460         if (!top_waiter)
1461                 return 0;
1462
1463         /* Ensure we requeue to the expected futex. */
1464         if (!match_futex(top_waiter->requeue_pi_key, key2))
1465                 return -EINVAL;
1466
1467         /*
1468          * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
1469          * the contended case or if set_waiters is 1.  The pi_state is returned
1470          * in ps in contended cases.
1471          */
1472         vpid = task_pid_vnr(top_waiter->task);
1473         ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1474                                    set_waiters);
1475         if (ret == 1) {
1476                 requeue_pi_wake_futex(top_waiter, key2, hb2);
1477                 return vpid;
1478         }
1479         return ret;
1480 }
1481
1482 /**
1483  * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1484  * @uaddr1:     source futex user address
1485  * @flags:      futex flags (FLAGS_SHARED, etc.)
1486  * @uaddr2:     target futex user address
1487  * @nr_wake:    number of waiters to wake (must be 1 for requeue_pi)
1488  * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1489  * @cmpval:     @uaddr1 expected value (or %NULL)
1490  * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
1491  *              pi futex (pi to pi requeue is not supported)
1492  *
1493  * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1494  * uaddr2 atomically on behalf of the top waiter.
1495  *
1496  * Return:
1497  * >=0 - on success, the number of tasks requeued or woken;
1498  *  <0 - on error
1499  */
1500 static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1501                          u32 __user *uaddr2, int nr_wake, int nr_requeue,
1502                          u32 *cmpval, int requeue_pi)
1503 {
1504         union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1505         int drop_count = 0, task_count = 0, ret;
1506         struct futex_pi_state *pi_state = NULL;
1507         struct futex_hash_bucket *hb1, *hb2;
1508         struct futex_q *this, *next;
1509
1510         if (requeue_pi) {
1511                 /*
1512                  * Requeue PI only works on two distinct uaddrs. This
1513                  * check is only valid for private futexes. See below.
1514                  */
1515                 if (uaddr1 == uaddr2)
1516                         return -EINVAL;
1517
1518                 /*
1519                  * requeue_pi requires a pi_state, try to allocate it now
1520                  * without any locks in case it fails.
1521                  */
1522                 if (refill_pi_state_cache())
1523                         return -ENOMEM;
1524                 /*
1525                  * requeue_pi must wake as many tasks as it can, up to nr_wake
1526                  * + nr_requeue, since it acquires the rt_mutex prior to
1527                  * returning to userspace, so as to not leave the rt_mutex with
1528                  * waiters and no owner.  However, second and third wake-ups
1529                  * cannot be predicted as they involve race conditions with the
1530                  * first wake and a fault while looking up the pi_state.  Both
1531                  * pthread_cond_signal() and pthread_cond_broadcast() should
1532                  * use nr_wake=1.
1533                  */
1534                 if (nr_wake != 1)
1535                         return -EINVAL;
1536         }
1537
1538 retry:
1539         if (pi_state != NULL) {
1540                 /*
1541                  * We will have to lookup the pi_state again, so free this one
1542                  * to keep the accounting correct.
1543                  */
1544                 free_pi_state(pi_state);
1545                 pi_state = NULL;
1546         }
1547
1548         ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1549         if (unlikely(ret != 0))
1550                 goto out;
1551         ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1552                             requeue_pi ? VERIFY_WRITE : VERIFY_READ);
1553         if (unlikely(ret != 0))
1554                 goto out_put_key1;
1555
1556         /*
1557          * The check above which compares uaddrs is not sufficient for
1558          * shared futexes. We need to compare the keys:
1559          */
1560         if (requeue_pi && match_futex(&key1, &key2)) {
1561                 ret = -EINVAL;
1562                 goto out_put_keys;
1563         }
1564
1565         hb1 = hash_futex(&key1);
1566         hb2 = hash_futex(&key2);
1567
1568 retry_private:
1569         hb_waiters_inc(hb2);
1570         double_lock_hb(hb1, hb2);
1571
1572         if (likely(cmpval != NULL)) {
1573                 u32 curval;
1574
1575                 ret = get_futex_value_locked(&curval, uaddr1);
1576
1577                 if (unlikely(ret)) {
1578                         double_unlock_hb(hb1, hb2);
1579                         hb_waiters_dec(hb2);
1580
1581                         ret = get_user(curval, uaddr1);
1582                         if (ret)
1583                                 goto out_put_keys;
1584
1585                         if (!(flags & FLAGS_SHARED))
1586                                 goto retry_private;
1587
1588                         put_futex_key(&key2);
1589                         put_futex_key(&key1);
1590                         goto retry;
1591                 }
1592                 if (curval != *cmpval) {
1593                         ret = -EAGAIN;
1594                         goto out_unlock;
1595                 }
1596         }
1597
1598         if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1599                 /*
1600                  * Attempt to acquire uaddr2 and wake the top waiter. If we
1601                  * intend to requeue waiters, force setting the FUTEX_WAITERS
1602                  * bit.  We force this here where we are able to easily handle
1603                  * faults rather in the requeue loop below.
1604                  */
1605                 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1606                                                  &key2, &pi_state, nr_requeue);
1607
1608                 /*
1609                  * At this point the top_waiter has either taken uaddr2 or is
1610                  * waiting on it.  If the former, then the pi_state will not
1611                  * exist yet, look it up one more time to ensure we have a
1612                  * reference to it. If the lock was taken, ret contains the
1613                  * vpid of the top waiter task.
1614                  */
1615                 if (ret > 0) {
1616                         WARN_ON(pi_state);
1617                         drop_count++;
1618                         task_count++;
1619                         /*
1620                          * If we acquired the lock, then the user
1621                          * space value of uaddr2 should be vpid. It
1622                          * cannot be changed by the top waiter as it
1623                          * is blocked on hb2 lock if it tries to do
1624                          * so. If something fiddled with it behind our
1625                          * back the pi state lookup might unearth
1626                          * it. So we rather use the known value than
1627                          * rereading and handing potential crap to
1628                          * lookup_pi_state.
1629                          */
1630                         ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
1631                 }
1632
1633                 switch (ret) {
1634                 case 0:
1635                         break;
1636                 case -EFAULT:
1637                         double_unlock_hb(hb1, hb2);
1638                         hb_waiters_dec(hb2);
1639                         put_futex_key(&key2);
1640                         put_futex_key(&key1);
1641                         ret = fault_in_user_writeable(uaddr2);
1642                         if (!ret)
1643                                 goto retry;
1644                         goto out;
1645                 case -EAGAIN:
1646                         /* The owner was exiting, try again. */
1647                         double_unlock_hb(hb1, hb2);
1648                         hb_waiters_dec(hb2);
1649                         put_futex_key(&key2);
1650                         put_futex_key(&key1);
1651                         cond_resched();
1652                         goto retry;
1653                 default:
1654                         goto out_unlock;
1655                 }
1656         }
1657
1658         plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1659                 if (task_count - nr_wake >= nr_requeue)
1660                         break;
1661
1662                 if (!match_futex(&this->key, &key1))
1663                         continue;
1664
1665                 /*
1666                  * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1667                  * be paired with each other and no other futex ops.
1668                  *
1669                  * We should never be requeueing a futex_q with a pi_state,
1670                  * which is awaiting a futex_unlock_pi().
1671                  */
1672                 if ((requeue_pi && !this->rt_waiter) ||
1673                     (!requeue_pi && this->rt_waiter) ||
1674                     this->pi_state) {
1675                         ret = -EINVAL;
1676                         break;
1677                 }
1678
1679                 /*
1680                  * Wake nr_wake waiters.  For requeue_pi, if we acquired the
1681                  * lock, we already woke the top_waiter.  If not, it will be
1682                  * woken by futex_unlock_pi().
1683                  */
1684                 if (++task_count <= nr_wake && !requeue_pi) {
1685                         wake_futex(this);
1686                         continue;
1687                 }
1688
1689                 /* Ensure we requeue to the expected futex for requeue_pi. */
1690                 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1691                         ret = -EINVAL;
1692                         break;
1693                 }
1694
1695                 /*
1696                  * Requeue nr_requeue waiters and possibly one more in the case
1697                  * of requeue_pi if we couldn't acquire the lock atomically.
1698                  */
1699                 if (requeue_pi) {
1700                         /* Prepare the waiter to take the rt_mutex. */
1701                         atomic_inc(&pi_state->refcount);
1702                         this->pi_state = pi_state;
1703                         ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1704                                                         this->rt_waiter,
1705                                                         this->task);
1706                         if (ret == 1) {
1707                                 /* We got the lock. */
1708                                 requeue_pi_wake_futex(this, &key2, hb2);
1709                                 drop_count++;
1710                                 continue;
1711                         } else if (ret) {
1712                                 /* -EDEADLK */
1713                                 this->pi_state = NULL;
1714                                 free_pi_state(pi_state);
1715                                 goto out_unlock;
1716                         }
1717                 }
1718                 requeue_futex(this, hb1, hb2, &key2);
1719                 drop_count++;
1720         }
1721
1722 out_unlock:
1723         double_unlock_hb(hb1, hb2);
1724         hb_waiters_dec(hb2);
1725
1726         /*
1727          * drop_futex_key_refs() must be called outside the spinlocks. During
1728          * the requeue we moved futex_q's from the hash bucket at key1 to the
1729          * one at key2 and updated their key pointer.  We no longer need to
1730          * hold the references to key1.
1731          */
1732         while (--drop_count >= 0)
1733                 drop_futex_key_refs(&key1);
1734
1735 out_put_keys:
1736         put_futex_key(&key2);
1737 out_put_key1:
1738         put_futex_key(&key1);
1739 out:
1740         if (pi_state != NULL)
1741                 free_pi_state(pi_state);
1742         return ret ? ret : task_count;
1743 }
1744
1745 /* The key must be already stored in q->key. */
1746 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1747         __acquires(&hb->lock)
1748 {
1749         struct futex_hash_bucket *hb;
1750
1751         hb = hash_futex(&q->key);
1752
1753         /*
1754          * Increment the counter before taking the lock so that
1755          * a potential waker won't miss a to-be-slept task that is
1756          * waiting for the spinlock. This is safe as all queue_lock()
1757          * users end up calling queue_me(). Similarly, for housekeeping,
1758          * decrement the counter at queue_unlock() when some error has
1759          * occurred and we don't end up adding the task to the list.
1760          */
1761         hb_waiters_inc(hb);
1762
1763         q->lock_ptr = &hb->lock;
1764
1765         spin_lock(&hb->lock); /* implies MB (A) */
1766         return hb;
1767 }
1768
1769 static inline void
1770 queue_unlock(struct futex_hash_bucket *hb)
1771         __releases(&hb->lock)
1772 {
1773         spin_unlock(&hb->lock);
1774         hb_waiters_dec(hb);
1775 }
1776
1777 /**
1778  * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1779  * @q:  The futex_q to enqueue
1780  * @hb: The destination hash bucket
1781  *
1782  * The hb->lock must be held by the caller, and is released here. A call to
1783  * queue_me() is typically paired with exactly one call to unqueue_me().  The
1784  * exceptions involve the PI related operations, which may use unqueue_me_pi()
1785  * or nothing if the unqueue is done as part of the wake process and the unqueue
1786  * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1787  * an example).
1788  */
1789 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1790         __releases(&hb->lock)
1791 {
1792         int prio;
1793
1794         /*
1795          * The priority used to register this element is
1796          * - either the real thread-priority for the real-time threads
1797          * (i.e. threads with a priority lower than MAX_RT_PRIO)
1798          * - or MAX_RT_PRIO for non-RT threads.
1799          * Thus, all RT-threads are woken first in priority order, and
1800          * the others are woken last, in FIFO order.
1801          */
1802         prio = min(current->normal_prio, MAX_RT_PRIO);
1803
1804         plist_node_init(&q->list, prio);
1805         plist_add(&q->list, &hb->chain);
1806         q->task = current;
1807         spin_unlock(&hb->lock);
1808 }
1809
1810 /**
1811  * unqueue_me() - Remove the futex_q from its futex_hash_bucket
1812  * @q:  The futex_q to unqueue
1813  *
1814  * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
1815  * be paired with exactly one earlier call to queue_me().
1816  *
1817  * Return:
1818  *   1 - if the futex_q was still queued (and we removed unqueued it);
1819  *   0 - if the futex_q was already removed by the waking thread
1820  */
1821 static int unqueue_me(struct futex_q *q)
1822 {
1823         spinlock_t *lock_ptr;
1824         int ret = 0;
1825
1826         /* In the common case we don't take the spinlock, which is nice. */
1827 retry:
1828         lock_ptr = q->lock_ptr;
1829         barrier();
1830         if (lock_ptr != NULL) {
1831                 spin_lock(lock_ptr);
1832                 /*
1833                  * q->lock_ptr can change between reading it and
1834                  * spin_lock(), causing us to take the wrong lock.  This
1835                  * corrects the race condition.
1836                  *
1837                  * Reasoning goes like this: if we have the wrong lock,
1838                  * q->lock_ptr must have changed (maybe several times)
1839                  * between reading it and the spin_lock().  It can
1840                  * change again after the spin_lock() but only if it was
1841                  * already changed before the spin_lock().  It cannot,
1842                  * however, change back to the original value.  Therefore
1843                  * we can detect whether we acquired the correct lock.
1844                  */
1845                 if (unlikely(lock_ptr != q->lock_ptr)) {
1846                         spin_unlock(lock_ptr);
1847                         goto retry;
1848                 }
1849                 __unqueue_futex(q);
1850
1851                 BUG_ON(q->pi_state);
1852
1853                 spin_unlock(lock_ptr);
1854                 ret = 1;
1855         }
1856
1857         drop_futex_key_refs(&q->key);
1858         return ret;
1859 }
1860
1861 /*
1862  * PI futexes can not be requeued and must remove themself from the
1863  * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1864  * and dropped here.
1865  */
1866 static void unqueue_me_pi(struct futex_q *q)
1867         __releases(q->lock_ptr)
1868 {
1869         __unqueue_futex(q);
1870
1871         BUG_ON(!q->pi_state);
1872         free_pi_state(q->pi_state);
1873         q->pi_state = NULL;
1874
1875         spin_unlock(q->lock_ptr);
1876 }
1877
1878 /*
1879  * Fixup the pi_state owner with the new owner.
1880  *
1881  * Must be called with hash bucket lock held and mm->sem held for non
1882  * private futexes.
1883  */
1884 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1885                                 struct task_struct *newowner)
1886 {
1887         u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1888         struct futex_pi_state *pi_state = q->pi_state;
1889         struct task_struct *oldowner = pi_state->owner;
1890         u32 uval, uninitialized_var(curval), newval;
1891         int ret;
1892
1893         /* Owner died? */
1894         if (!pi_state->owner)
1895                 newtid |= FUTEX_OWNER_DIED;
1896
1897         /*
1898          * We are here either because we stole the rtmutex from the
1899          * previous highest priority waiter or we are the highest priority
1900          * waiter but failed to get the rtmutex the first time.
1901          * We have to replace the newowner TID in the user space variable.
1902          * This must be atomic as we have to preserve the owner died bit here.
1903          *
1904          * Note: We write the user space value _before_ changing the pi_state
1905          * because we can fault here. Imagine swapped out pages or a fork
1906          * that marked all the anonymous memory readonly for cow.
1907          *
1908          * Modifying pi_state _before_ the user space value would
1909          * leave the pi_state in an inconsistent state when we fault
1910          * here, because we need to drop the hash bucket lock to
1911          * handle the fault. This might be observed in the PID check
1912          * in lookup_pi_state.
1913          */
1914 retry:
1915         if (get_futex_value_locked(&uval, uaddr))
1916                 goto handle_fault;
1917
1918         while (1) {
1919                 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1920
1921                 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
1922                         goto handle_fault;
1923                 if (curval == uval)
1924                         break;
1925                 uval = curval;
1926         }
1927
1928         /*
1929          * We fixed up user space. Now we need to fix the pi_state
1930          * itself.
1931          */
1932         if (pi_state->owner != NULL) {
1933                 raw_spin_lock_irq(&pi_state->owner->pi_lock);
1934                 WARN_ON(list_empty(&pi_state->list));
1935                 list_del_init(&pi_state->list);
1936                 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1937         }
1938
1939         pi_state->owner = newowner;
1940
1941         raw_spin_lock_irq(&newowner->pi_lock);
1942         WARN_ON(!list_empty(&pi_state->list));
1943         list_add(&pi_state->list, &newowner->pi_state_list);
1944         raw_spin_unlock_irq(&newowner->pi_lock);
1945         return 0;
1946
1947         /*
1948          * To handle the page fault we need to drop the hash bucket
1949          * lock here. That gives the other task (either the highest priority
1950          * waiter itself or the task which stole the rtmutex) the
1951          * chance to try the fixup of the pi_state. So once we are
1952          * back from handling the fault we need to check the pi_state
1953          * after reacquiring the hash bucket lock and before trying to
1954          * do another fixup. When the fixup has been done already we
1955          * simply return.
1956          */
1957 handle_fault:
1958         spin_unlock(q->lock_ptr);
1959
1960         ret = fault_in_user_writeable(uaddr);
1961
1962         spin_lock(q->lock_ptr);
1963
1964         /*
1965          * Check if someone else fixed it for us:
1966          */
1967         if (pi_state->owner != oldowner)
1968                 return 0;
1969
1970         if (ret)
1971                 return ret;
1972
1973         goto retry;
1974 }
1975
1976 static long futex_wait_restart(struct restart_block *restart);
1977
1978 /**
1979  * fixup_owner() - Post lock pi_state and corner case management
1980  * @uaddr:      user address of the futex
1981  * @q:          futex_q (contains pi_state and access to the rt_mutex)
1982  * @locked:     if the attempt to take the rt_mutex succeeded (1) or not (0)
1983  *
1984  * After attempting to lock an rt_mutex, this function is called to cleanup
1985  * the pi_state owner as well as handle race conditions that may allow us to
1986  * acquire the lock. Must be called with the hb lock held.
1987  *
1988  * Return:
1989  *  1 - success, lock taken;
1990  *  0 - success, lock not taken;
1991  * <0 - on error (-EFAULT)
1992  */
1993 static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
1994 {
1995         struct task_struct *owner;
1996         int ret = 0;
1997
1998         if (locked) {
1999                 /*
2000                  * Got the lock. We might not be the anticipated owner if we
2001                  * did a lock-steal - fix up the PI-state in that case:
2002                  */
2003                 if (q->pi_state->owner != current)
2004                         ret = fixup_pi_state_owner(uaddr, q, current);
2005                 goto out;
2006         }
2007
2008         /*
2009          * Catch the rare case, where the lock was released when we were on the
2010          * way back before we locked the hash bucket.
2011          */
2012         if (q->pi_state->owner == current) {
2013                 /*
2014                  * Try to get the rt_mutex now. This might fail as some other
2015                  * task acquired the rt_mutex after we removed ourself from the
2016                  * rt_mutex waiters list.
2017                  */
2018                 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
2019                         locked = 1;
2020                         goto out;
2021                 }
2022
2023                 /*
2024                  * pi_state is incorrect, some other task did a lock steal and
2025                  * we returned due to timeout or signal without taking the
2026                  * rt_mutex. Too late.
2027                  */
2028                 raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
2029                 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
2030                 if (!owner)
2031                         owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
2032                 raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
2033                 ret = fixup_pi_state_owner(uaddr, q, owner);
2034                 goto out;
2035         }
2036
2037         /*
2038          * Paranoia check. If we did not take the lock, then we should not be
2039          * the owner of the rt_mutex.
2040          */
2041         if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
2042                 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
2043                                 "pi-state %p\n", ret,
2044                                 q->pi_state->pi_mutex.owner,
2045                                 q->pi_state->owner);
2046
2047 out:
2048         return ret ? ret : locked;
2049 }
2050
2051 /**
2052  * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
2053  * @hb:         the futex hash bucket, must be locked by the caller
2054  * @q:          the futex_q to queue up on
2055  * @timeout:    the prepared hrtimer_sleeper, or null for no timeout
2056  */
2057 static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
2058                                 struct hrtimer_sleeper *timeout)
2059 {
2060         /*
2061          * The task state is guaranteed to be set before another task can
2062          * wake it. set_current_state() is implemented using set_mb() and
2063          * queue_me() calls spin_unlock() upon completion, both serializing
2064          * access to the hash list and forcing another memory barrier.
2065          */
2066         set_current_state(TASK_INTERRUPTIBLE);
2067         queue_me(q, hb);
2068
2069         /* Arm the timer */
2070         if (timeout) {
2071                 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
2072                 if (!hrtimer_active(&timeout->timer))
2073                         timeout->task = NULL;
2074         }
2075
2076         /*
2077          * If we have been removed from the hash list, then another task
2078          * has tried to wake us, and we can skip the call to schedule().
2079          */
2080         if (likely(!plist_node_empty(&q->list))) {
2081                 /*
2082                  * If the timer has already expired, current will already be
2083                  * flagged for rescheduling. Only call schedule if there
2084                  * is no timeout, or if it has yet to expire.
2085                  */
2086                 if (!timeout || timeout->task)
2087                         freezable_schedule();
2088         }
2089         __set_current_state(TASK_RUNNING);
2090 }
2091
2092 /**
2093  * futex_wait_setup() - Prepare to wait on a futex
2094  * @uaddr:      the futex userspace address
2095  * @val:        the expected value
2096  * @flags:      futex flags (FLAGS_SHARED, etc.)
2097  * @q:          the associated futex_q
2098  * @hb:         storage for hash_bucket pointer to be returned to caller
2099  *
2100  * Setup the futex_q and locate the hash_bucket.  Get the futex value and
2101  * compare it with the expected value.  Handle atomic faults internally.
2102  * Return with the hb lock held and a q.key reference on success, and unlocked
2103  * with no q.key reference on failure.
2104  *
2105  * Return:
2106  *  0 - uaddr contains val and hb has been locked;
2107  * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2108  */
2109 static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2110                            struct futex_q *q, struct futex_hash_bucket **hb)
2111 {
2112         u32 uval;
2113         int ret;
2114
2115         /*
2116          * Access the page AFTER the hash-bucket is locked.
2117          * Order is important:
2118          *
2119          *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
2120          *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
2121          *
2122          * The basic logical guarantee of a futex is that it blocks ONLY
2123          * if cond(var) is known to be true at the time of blocking, for
2124          * any cond.  If we locked the hash-bucket after testing *uaddr, that
2125          * would open a race condition where we could block indefinitely with
2126          * cond(var) false, which would violate the guarantee.
2127          *
2128          * On the other hand, we insert q and release the hash-bucket only
2129          * after testing *uaddr.  This guarantees that futex_wait() will NOT
2130          * absorb a wakeup if *uaddr does not match the desired values
2131          * while the syscall executes.
2132          */
2133 retry:
2134         ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
2135         if (unlikely(ret != 0))
2136                 return ret;
2137
2138 retry_private:
2139         *hb = queue_lock(q);
2140
2141         ret = get_futex_value_locked(&uval, uaddr);
2142
2143         if (ret) {
2144                 queue_unlock(*hb);
2145
2146                 ret = get_user(uval, uaddr);
2147                 if (ret)
2148                         goto out;
2149
2150                 if (!(flags & FLAGS_SHARED))
2151                         goto retry_private;
2152
2153                 put_futex_key(&q->key);
2154                 goto retry;
2155         }
2156
2157         if (uval != val) {
2158                 queue_unlock(*hb);
2159                 ret = -EWOULDBLOCK;
2160         }
2161
2162 out:
2163         if (ret)
2164                 put_futex_key(&q->key);
2165         return ret;
2166 }
2167
2168 static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
2169                       ktime_t *abs_time, u32 bitset)
2170 {
2171         struct hrtimer_sleeper timeout, *to = NULL;
2172         struct restart_block *restart;
2173         struct futex_hash_bucket *hb;
2174         struct futex_q q = futex_q_init;
2175         int ret;
2176
2177         if (!bitset)
2178                 return -EINVAL;
2179         q.bitset = bitset;
2180
2181         if (abs_time) {
2182                 to = &timeout;
2183
2184                 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2185                                       CLOCK_REALTIME : CLOCK_MONOTONIC,
2186                                       HRTIMER_MODE_ABS);
2187                 hrtimer_init_sleeper(to, current);
2188                 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2189                                              current->timer_slack_ns);
2190         }
2191
2192 retry:
2193         /*
2194          * Prepare to wait on uaddr. On success, holds hb lock and increments
2195          * q.key refs.
2196          */
2197         ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2198         if (ret)
2199                 goto out;
2200
2201         /* queue_me and wait for wakeup, timeout, or a signal. */
2202         futex_wait_queue_me(hb, &q, to);
2203
2204         /* If we were woken (and unqueued), we succeeded, whatever. */
2205         ret = 0;
2206         /* unqueue_me() drops q.key ref */
2207         if (!unqueue_me(&q))
2208                 goto out;
2209         ret = -ETIMEDOUT;
2210         if (to && !to->task)
2211                 goto out;
2212
2213         /*
2214          * We expect signal_pending(current), but we might be the
2215          * victim of a spurious wakeup as well.
2216          */
2217         if (!signal_pending(current))
2218                 goto retry;
2219
2220         ret = -ERESTARTSYS;
2221         if (!abs_time)
2222                 goto out;
2223
2224         restart = &current_thread_info()->restart_block;
2225         restart->fn = futex_wait_restart;
2226         restart->futex.uaddr = uaddr;
2227         restart->futex.val = val;
2228         restart->futex.time = abs_time->tv64;
2229         restart->futex.bitset = bitset;
2230         restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2231
2232         ret = -ERESTART_RESTARTBLOCK;
2233
2234 out:
2235         if (to) {
2236                 hrtimer_cancel(&to->timer);
2237                 destroy_hrtimer_on_stack(&to->timer);
2238         }
2239         return ret;
2240 }
2241
2242
2243 static long futex_wait_restart(struct restart_block *restart)
2244 {
2245         u32 __user *uaddr = restart->futex.uaddr;
2246         ktime_t t, *tp = NULL;
2247
2248         if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
2249                 t.tv64 = restart->futex.time;
2250                 tp = &t;
2251         }
2252         restart->fn = do_no_restart_syscall;
2253
2254         return (long)futex_wait(uaddr, restart->futex.flags,
2255                                 restart->futex.val, tp, restart->futex.bitset);
2256 }
2257
2258
2259 /*
2260  * Userspace tried a 0 -> TID atomic transition of the futex value
2261  * and failed. The kernel side here does the whole locking operation:
2262  * if there are waiters then it will block, it does PI, etc. (Due to
2263  * races the kernel might see a 0 value of the futex too.)
2264  */
2265 static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
2266                          ktime_t *time, int trylock)
2267 {
2268         struct hrtimer_sleeper timeout, *to = NULL;
2269         struct futex_hash_bucket *hb;
2270         struct futex_q q = futex_q_init;
2271         int res, ret;
2272
2273         if (refill_pi_state_cache())
2274                 return -ENOMEM;
2275
2276         if (time) {
2277                 to = &timeout;
2278                 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
2279                                       HRTIMER_MODE_ABS);
2280                 hrtimer_init_sleeper(to, current);
2281                 hrtimer_set_expires(&to->timer, *time);
2282         }
2283
2284 retry:
2285         ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
2286         if (unlikely(ret != 0))
2287                 goto out;
2288
2289 retry_private:
2290         hb = queue_lock(&q);
2291
2292         ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2293         if (unlikely(ret)) {
2294                 switch (ret) {
2295                 case 1:
2296                         /* We got the lock. */
2297                         ret = 0;
2298                         goto out_unlock_put_key;
2299                 case -EFAULT:
2300                         goto uaddr_faulted;
2301                 case -EAGAIN:
2302                         /*
2303                          * Task is exiting and we just wait for the
2304                          * exit to complete.
2305                          */
2306                         queue_unlock(hb);
2307                         put_futex_key(&q.key);
2308                         cond_resched();
2309                         goto retry;
2310                 default:
2311                         goto out_unlock_put_key;
2312                 }
2313         }
2314
2315         /*
2316          * Only actually queue now that the atomic ops are done:
2317          */
2318         queue_me(&q, hb);
2319
2320         WARN_ON(!q.pi_state);
2321         /*
2322          * Block on the PI mutex:
2323          */
2324         if (!trylock) {
2325                 ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
2326         } else {
2327                 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
2328                 /* Fixup the trylock return value: */
2329                 ret = ret ? 0 : -EWOULDBLOCK;
2330         }
2331
2332         spin_lock(q.lock_ptr);
2333         /*
2334          * Fixup the pi_state owner and possibly acquire the lock if we
2335          * haven't already.
2336          */
2337         res = fixup_owner(uaddr, &q, !ret);
2338         /*
2339          * If fixup_owner() returned an error, proprogate that.  If it acquired
2340          * the lock, clear our -ETIMEDOUT or -EINTR.
2341          */
2342         if (res)
2343                 ret = (res < 0) ? res : 0;
2344
2345         /*
2346          * If fixup_owner() faulted and was unable to handle the fault, unlock
2347          * it and return the fault to userspace.
2348          */
2349         if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
2350                 rt_mutex_unlock(&q.pi_state->pi_mutex);
2351
2352         /* Unqueue and drop the lock */
2353         unqueue_me_pi(&q);
2354
2355         goto out_put_key;
2356
2357 out_unlock_put_key:
2358         queue_unlock(hb);
2359
2360 out_put_key:
2361         put_futex_key(&q.key);
2362 out:
2363         if (to)
2364                 destroy_hrtimer_on_stack(&to->timer);
2365         return ret != -EINTR ? ret : -ERESTARTNOINTR;
2366
2367 uaddr_faulted:
2368         queue_unlock(hb);
2369
2370         ret = fault_in_user_writeable(uaddr);
2371         if (ret)
2372                 goto out_put_key;
2373
2374         if (!(flags & FLAGS_SHARED))
2375                 goto retry_private;
2376
2377         put_futex_key(&q.key);
2378         goto retry;
2379 }
2380
2381 /*
2382  * Userspace attempted a TID -> 0 atomic transition, and failed.
2383  * This is the in-kernel slowpath: we look up the PI state (if any),
2384  * and do the rt-mutex unlock.
2385  */
2386 static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2387 {
2388         u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
2389         union futex_key key = FUTEX_KEY_INIT;
2390         struct futex_hash_bucket *hb;
2391         struct futex_q *match;
2392         int ret;
2393
2394 retry:
2395         if (get_user(uval, uaddr))
2396                 return -EFAULT;
2397         /*
2398          * We release only a lock we actually own:
2399          */
2400         if ((uval & FUTEX_TID_MASK) != vpid)
2401                 return -EPERM;
2402
2403         ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
2404         if (ret)
2405                 return ret;
2406
2407         hb = hash_futex(&key);
2408         spin_lock(&hb->lock);
2409
2410         /*
2411          * Check waiters first. We do not trust user space values at
2412          * all and we at least want to know if user space fiddled
2413          * with the futex value instead of blindly unlocking.
2414          */
2415         match = futex_top_waiter(hb, &key);
2416         if (match) {
2417                 ret = wake_futex_pi(uaddr, uval, match);
2418                 /*
2419                  * The atomic access to the futex value generated a
2420                  * pagefault, so retry the user-access and the wakeup:
2421                  */
2422                 if (ret == -EFAULT)
2423                         goto pi_faulted;
2424                 goto out_unlock;
2425         }
2426
2427         /*
2428          * We have no kernel internal state, i.e. no waiters in the
2429          * kernel. Waiters which are about to queue themselves are stuck
2430          * on hb->lock. So we can safely ignore them. We do neither
2431          * preserve the WAITERS bit not the OWNER_DIED one. We are the
2432          * owner.
2433          */
2434         if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))
2435                 goto pi_faulted;
2436
2437         /*
2438          * If uval has changed, let user space handle it.
2439          */
2440         ret = (curval == uval) ? 0 : -EAGAIN;
2441
2442 out_unlock:
2443         spin_unlock(&hb->lock);
2444         put_futex_key(&key);
2445         return ret;
2446
2447 pi_faulted:
2448         spin_unlock(&hb->lock);
2449         put_futex_key(&key);
2450
2451         ret = fault_in_user_writeable(uaddr);
2452         if (!ret)
2453                 goto retry;
2454
2455         return ret;
2456 }
2457
2458 /**
2459  * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2460  * @hb:         the hash_bucket futex_q was original enqueued on
2461  * @q:          the futex_q woken while waiting to be requeued
2462  * @key2:       the futex_key of the requeue target futex
2463  * @timeout:    the timeout associated with the wait (NULL if none)
2464  *
2465  * Detect if the task was woken on the initial futex as opposed to the requeue
2466  * target futex.  If so, determine if it was a timeout or a signal that caused
2467  * the wakeup and return the appropriate error code to the caller.  Must be
2468  * called with the hb lock held.
2469  *
2470  * Return:
2471  *  0 = no early wakeup detected;
2472  * <0 = -ETIMEDOUT or -ERESTARTNOINTR
2473  */
2474 static inline
2475 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2476                                    struct futex_q *q, union futex_key *key2,
2477                                    struct hrtimer_sleeper *timeout)
2478 {
2479         int ret = 0;
2480
2481         /*
2482          * With the hb lock held, we avoid races while we process the wakeup.
2483          * We only need to hold hb (and not hb2) to ensure atomicity as the
2484          * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2485          * It can't be requeued from uaddr2 to something else since we don't
2486          * support a PI aware source futex for requeue.
2487          */
2488         if (!match_futex(&q->key, key2)) {
2489                 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2490                 /*
2491                  * We were woken prior to requeue by a timeout or a signal.
2492                  * Unqueue the futex_q and determine which it was.
2493                  */
2494                 plist_del(&q->list, &hb->chain);
2495                 hb_waiters_dec(hb);
2496
2497                 /* Handle spurious wakeups gracefully */
2498                 ret = -EWOULDBLOCK;
2499                 if (timeout && !timeout->task)
2500                         ret = -ETIMEDOUT;
2501                 else if (signal_pending(current))
2502                         ret = -ERESTARTNOINTR;
2503         }
2504         return ret;
2505 }
2506
2507 /**
2508  * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2509  * @uaddr:      the futex we initially wait on (non-pi)
2510  * @flags:      futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
2511  *              the same type, no requeueing from private to shared, etc.
2512  * @val:        the expected value of uaddr
2513  * @abs_time:   absolute timeout
2514  * @bitset:     32 bit wakeup bitset set by userspace, defaults to all
2515  * @uaddr2:     the pi futex we will take prior to returning to user-space
2516  *
2517  * The caller will wait on uaddr and will be requeued by futex_requeue() to
2518  * uaddr2 which must be PI aware and unique from uaddr.  Normal wakeup will wake
2519  * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
2520  * userspace.  This ensures the rt_mutex maintains an owner when it has waiters;
2521  * without one, the pi logic would not know which task to boost/deboost, if
2522  * there was a need to.
2523  *
2524  * We call schedule in futex_wait_queue_me() when we enqueue and return there
2525  * via the following--
2526  * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2527  * 2) wakeup on uaddr2 after a requeue
2528  * 3) signal
2529  * 4) timeout
2530  *
2531  * If 3, cleanup and return -ERESTARTNOINTR.
2532  *
2533  * If 2, we may then block on trying to take the rt_mutex and return via:
2534  * 5) successful lock
2535  * 6) signal
2536  * 7) timeout
2537  * 8) other lock acquisition failure
2538  *
2539  * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2540  *
2541  * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2542  *
2543  * Return:
2544  *  0 - On success;
2545  * <0 - On error
2546  */
2547 static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2548                                  u32 val, ktime_t *abs_time, u32 bitset,
2549                                  u32 __user *uaddr2)
2550 {
2551         struct hrtimer_sleeper timeout, *to = NULL;
2552         struct rt_mutex_waiter rt_waiter;
2553         struct rt_mutex *pi_mutex = NULL;
2554         struct futex_hash_bucket *hb;
2555         union futex_key key2 = FUTEX_KEY_INIT;
2556         struct futex_q q = futex_q_init;
2557         int res, ret;
2558
2559         if (uaddr == uaddr2)
2560                 return -EINVAL;
2561
2562         if (!bitset)
2563                 return -EINVAL;
2564
2565         if (abs_time) {
2566                 to = &timeout;
2567                 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2568                                       CLOCK_REALTIME : CLOCK_MONOTONIC,
2569                                       HRTIMER_MODE_ABS);
2570                 hrtimer_init_sleeper(to, current);
2571                 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2572                                              current->timer_slack_ns);
2573         }
2574
2575         /*
2576          * The waiter is allocated on our stack, manipulated by the requeue
2577          * code while we sleep on uaddr.
2578          */
2579         debug_rt_mutex_init_waiter(&rt_waiter);
2580         RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
2581         RB_CLEAR_NODE(&rt_waiter.tree_entry);
2582         rt_waiter.task = NULL;
2583
2584         ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
2585         if (unlikely(ret != 0))
2586                 goto out;
2587
2588         q.bitset = bitset;
2589         q.rt_waiter = &rt_waiter;
2590         q.requeue_pi_key = &key2;
2591
2592         /*
2593          * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2594          * count.
2595          */
2596         ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2597         if (ret)
2598                 goto out_key2;
2599
2600         /*
2601          * The check above which compares uaddrs is not sufficient for
2602          * shared futexes. We need to compare the keys:
2603          */
2604         if (match_futex(&q.key, &key2)) {
2605                 ret = -EINVAL;
2606                 goto out_put_keys;
2607         }
2608
2609         /* Queue the futex_q, drop the hb lock, wait for wakeup. */
2610         futex_wait_queue_me(hb, &q, to);
2611
2612         spin_lock(&hb->lock);
2613         ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2614         spin_unlock(&hb->lock);
2615         if (ret)
2616                 goto out_put_keys;
2617
2618         /*
2619          * In order for us to be here, we know our q.key == key2, and since
2620          * we took the hb->lock above, we also know that futex_requeue() has
2621          * completed and we no longer have to concern ourselves with a wakeup
2622          * race with the atomic proxy lock acquisition by the requeue code. The
2623          * futex_requeue dropped our key1 reference and incremented our key2
2624          * reference count.
2625          */
2626
2627         /* Check if the requeue code acquired the second futex for us. */
2628         if (!q.rt_waiter) {
2629                 /*
2630                  * Got the lock. We might not be the anticipated owner if we
2631                  * did a lock-steal - fix up the PI-state in that case.
2632                  */
2633                 if (q.pi_state && (q.pi_state->owner != current)) {
2634                         spin_lock(q.lock_ptr);
2635                         ret = fixup_pi_state_owner(uaddr2, &q, current);
2636                         spin_unlock(q.lock_ptr);
2637                 }
2638         } else {
2639                 /*
2640                  * We have been woken up by futex_unlock_pi(), a timeout, or a
2641                  * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
2642                  * the pi_state.
2643                  */
2644                 WARN_ON(!q.pi_state);
2645                 pi_mutex = &q.pi_state->pi_mutex;
2646                 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
2647                 debug_rt_mutex_free_waiter(&rt_waiter);
2648
2649                 spin_lock(q.lock_ptr);
2650                 /*
2651                  * Fixup the pi_state owner and possibly acquire the lock if we
2652                  * haven't already.
2653                  */
2654                 res = fixup_owner(uaddr2, &q, !ret);
2655                 /*
2656                  * If fixup_owner() returned an error, proprogate that.  If it
2657                  * acquired the lock, clear -ETIMEDOUT or -EINTR.
2658                  */
2659                 if (res)
2660                         ret = (res < 0) ? res : 0;
2661
2662                 /* Unqueue and drop the lock. */
2663                 unqueue_me_pi(&q);
2664         }
2665
2666         /*
2667          * If fixup_pi_state_owner() faulted and was unable to handle the
2668          * fault, unlock the rt_mutex and return the fault to userspace.
2669          */
2670         if (ret == -EFAULT) {
2671                 if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
2672                         rt_mutex_unlock(pi_mutex);
2673         } else if (ret == -EINTR) {
2674                 /*
2675                  * We've already been requeued, but cannot restart by calling
2676                  * futex_lock_pi() directly. We could restart this syscall, but
2677                  * it would detect that the user space "val" changed and return
2678                  * -EWOULDBLOCK.  Save the overhead of the restart and return
2679                  * -EWOULDBLOCK directly.
2680                  */
2681                 ret = -EWOULDBLOCK;
2682         }
2683
2684 out_put_keys:
2685         put_futex_key(&q.key);
2686 out_key2:
2687         put_futex_key(&key2);
2688
2689 out:
2690         if (to) {
2691                 hrtimer_cancel(&to->timer);
2692                 destroy_hrtimer_on_stack(&to->timer);
2693         }
2694         return ret;
2695 }
2696
2697 /*
2698  * Support for robust futexes: the kernel cleans up held futexes at
2699  * thread exit time.
2700  *
2701  * Implementation: user-space maintains a per-thread list of locks it
2702  * is holding. Upon do_exit(), the kernel carefully walks this list,
2703  * and marks all locks that are owned by this thread with the
2704  * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2705  * always manipulated with the lock held, so the list is private and
2706  * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2707  * field, to allow the kernel to clean up if the thread dies after
2708  * acquiring the lock, but just before it could have added itself to
2709  * the list. There can only be one such pending lock.
2710  */
2711
2712 /**
2713  * sys_set_robust_list() - Set the robust-futex list head of a task
2714  * @head:       pointer to the list-head
2715  * @len:        length of the list-head, as userspace expects
2716  */
2717 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2718                 size_t, len)
2719 {
2720         if (!futex_cmpxchg_enabled)
2721                 return -ENOSYS;
2722         /*
2723          * The kernel knows only one size for now:
2724          */
2725         if (unlikely(len != sizeof(*head)))
2726                 return -EINVAL;
2727
2728         current->robust_list = head;
2729
2730         return 0;
2731 }
2732
2733 /**
2734  * sys_get_robust_list() - Get the robust-futex list head of a task
2735  * @pid:        pid of the process [zero for current task]
2736  * @head_ptr:   pointer to a list-head pointer, the kernel fills it in
2737  * @len_ptr:    pointer to a length field, the kernel fills in the header size
2738  */
2739 SYSCALL_DEFINE3(get_robust_list, int, pid,
2740                 struct robust_list_head __user * __user *, head_ptr,
2741                 size_t __user *, len_ptr)
2742 {
2743         struct robust_list_head __user *head;
2744         unsigned long ret;
2745         struct task_struct *p;
2746
2747         if (!futex_cmpxchg_enabled)
2748                 return -ENOSYS;
2749
2750         rcu_read_lock();
2751
2752         ret = -ESRCH;
2753         if (!pid)
2754                 p = current;
2755         else {
2756                 p = find_task_by_vpid(pid);
2757                 if (!p)
2758                         goto err_unlock;
2759         }
2760
2761         ret = -EPERM;
2762         if (!ptrace_may_access(p, PTRACE_MODE_READ))
2763                 goto err_unlock;
2764
2765         head = p->robust_list;
2766         rcu_read_unlock();
2767
2768         if (put_user(sizeof(*head), len_ptr))
2769                 return -EFAULT;
2770         return put_user(head, head_ptr);
2771
2772 err_unlock:
2773         rcu_read_unlock();
2774
2775         return ret;
2776 }
2777
2778 /*
2779  * Process a futex-list entry, check whether it's owned by the
2780  * dying task, and do notification if so:
2781  */
2782 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
2783 {
2784         u32 uval, uninitialized_var(nval), mval;
2785
2786 retry:
2787         if (get_user(uval, uaddr))
2788                 return -1;
2789
2790         if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
2791                 /*
2792                  * Ok, this dying thread is truly holding a futex
2793                  * of interest. Set the OWNER_DIED bit atomically
2794                  * via cmpxchg, and if the value had FUTEX_WAITERS
2795                  * set, wake up a waiter (if any). (We have to do a
2796                  * futex_wake() even if OWNER_DIED is already set -
2797                  * to handle the rare but possible case of recursive
2798                  * thread-death.) The rest of the cleanup is done in
2799                  * userspace.
2800                  */
2801                 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2802                 /*
2803                  * We are not holding a lock here, but we want to have
2804                  * the pagefault_disable/enable() protection because
2805                  * we want to handle the fault gracefully. If the
2806                  * access fails we try to fault in the futex with R/W
2807                  * verification via get_user_pages. get_user() above
2808                  * does not guarantee R/W access. If that fails we
2809                  * give up and leave the futex locked.
2810                  */
2811                 if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
2812                         if (fault_in_user_writeable(uaddr))
2813                                 return -1;
2814                         goto retry;
2815                 }
2816                 if (nval != uval)
2817                         goto retry;
2818
2819                 /*
2820                  * Wake robust non-PI futexes here. The wakeup of
2821                  * PI futexes happens in exit_pi_state():
2822                  */
2823                 if (!pi && (uval & FUTEX_WAITERS))
2824                         futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
2825         }
2826         return 0;
2827 }
2828
2829 /*
2830  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
2831  */
2832 static inline int fetch_robust_entry(struct robust_list __user **entry,
2833                                      struct robust_list __user * __user *head,
2834                                      unsigned int *pi)
2835 {
2836         unsigned long uentry;
2837
2838         if (get_user(uentry, (unsigned long __user *)head))
2839                 return -EFAULT;
2840
2841         *entry = (void __user *)(uentry & ~1UL);
2842         *pi = uentry & 1;
2843
2844         return 0;
2845 }
2846
2847 /*
2848  * Walk curr->robust_list (very carefully, it's a userspace list!)
2849  * and mark any locks found there dead, and notify any waiters.
2850  *
2851  * We silently return on any sign of list-walking problem.
2852  */
2853 void exit_robust_list(struct task_struct *curr)
2854 {
2855         struct robust_list_head __user *head = curr->robust_list;
2856         struct robust_list __user *entry, *next_entry, *pending;
2857         unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
2858         unsigned int uninitialized_var(next_pi);
2859         unsigned long futex_offset;
2860         int rc;
2861
2862         if (!futex_cmpxchg_enabled)
2863                 return;
2864
2865         /*
2866          * Fetch the list head (which was registered earlier, via
2867          * sys_set_robust_list()):
2868          */
2869         if (fetch_robust_entry(&entry, &head->list.next, &pi))
2870                 return;
2871         /*
2872          * Fetch the relative futex offset:
2873          */
2874         if (get_user(futex_offset, &head->futex_offset))
2875                 return;
2876         /*
2877          * Fetch any possibly pending lock-add first, and handle it
2878          * if it exists:
2879          */
2880         if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
2881                 return;
2882
2883         next_entry = NULL;      /* avoid warning with gcc */
2884         while (entry != &head->list) {
2885                 /*
2886                  * Fetch the next entry in the list before calling
2887                  * handle_futex_death:
2888                  */
2889                 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
2890                 /*
2891                  * A pending lock might already be on the list, so
2892                  * don't process it twice:
2893                  */
2894                 if (entry != pending)
2895                         if (handle_futex_death((void __user *)entry + futex_offset,
2896                                                 curr, pi))
2897                                 return;
2898                 if (rc)
2899                         return;
2900                 entry = next_entry;
2901                 pi = next_pi;
2902                 /*
2903                  * Avoid excessively long or circular lists:
2904                  */
2905                 if (!--limit)
2906                         break;
2907
2908                 cond_resched();
2909         }
2910
2911         if (pending)
2912                 handle_futex_death((void __user *)pending + futex_offset,
2913                                    curr, pip);
2914 }
2915
2916 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2917                 u32 __user *uaddr2, u32 val2, u32 val3)
2918 {
2919         int cmd = op & FUTEX_CMD_MASK;
2920         unsigned int flags = 0;
2921
2922         if (!(op & FUTEX_PRIVATE_FLAG))
2923                 flags |= FLAGS_SHARED;
2924
2925         if (op & FUTEX_CLOCK_REALTIME) {
2926                 flags |= FLAGS_CLOCKRT;
2927                 if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
2928                         return -ENOSYS;
2929         }
2930
2931         switch (cmd) {
2932         case FUTEX_LOCK_PI:
2933         case FUTEX_UNLOCK_PI:
2934         case FUTEX_TRYLOCK_PI:
2935         case FUTEX_WAIT_REQUEUE_PI:
2936         case FUTEX_CMP_REQUEUE_PI:
2937                 if (!futex_cmpxchg_enabled)
2938                         return -ENOSYS;
2939         }
2940
2941         switch (cmd) {
2942         case FUTEX_WAIT:
2943                 val3 = FUTEX_BITSET_MATCH_ANY;
2944         case FUTEX_WAIT_BITSET:
2945                 return futex_wait(uaddr, flags, val, timeout, val3);
2946         case FUTEX_WAKE:
2947                 val3 = FUTEX_BITSET_MATCH_ANY;
2948         case FUTEX_WAKE_BITSET:
2949                 return futex_wake(uaddr, flags, val, val3);
2950         case FUTEX_REQUEUE:
2951                 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
2952         case FUTEX_CMP_REQUEUE:
2953                 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
2954         case FUTEX_WAKE_OP:
2955                 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
2956         case FUTEX_LOCK_PI:
2957                 return futex_lock_pi(uaddr, flags, val, timeout, 0);
2958         case FUTEX_UNLOCK_PI:
2959                 return futex_unlock_pi(uaddr, flags);
2960         case FUTEX_TRYLOCK_PI:
2961                 return futex_lock_pi(uaddr, flags, 0, timeout, 1);
2962         case FUTEX_WAIT_REQUEUE_PI:
2963                 val3 = FUTEX_BITSET_MATCH_ANY;
2964                 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
2965                                              uaddr2);
2966         case FUTEX_CMP_REQUEUE_PI:
2967                 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
2968         }
2969         return -ENOSYS;
2970 }
2971
2972
2973 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
2974                 struct timespec __user *, utime, u32 __user *, uaddr2,
2975                 u32, val3)
2976 {
2977         struct timespec ts;
2978         ktime_t t, *tp = NULL;
2979         u32 val2 = 0;
2980         int cmd = op & FUTEX_CMD_MASK;
2981
2982         if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
2983                       cmd == FUTEX_WAIT_BITSET ||
2984                       cmd == FUTEX_WAIT_REQUEUE_PI)) {
2985                 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
2986                         return -EFAULT;
2987                 if (!timespec_valid(&ts))
2988                         return -EINVAL;
2989
2990                 t = timespec_to_ktime(ts);
2991                 if (cmd == FUTEX_WAIT)
2992                         t = ktime_add_safe(ktime_get(), t);
2993                 tp = &t;
2994         }
2995         /*
2996          * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
2997          * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
2998          */
2999         if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3000             cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3001                 val2 = (u32) (unsigned long) utime;
3002
3003         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3004 }
3005
3006 static void __init futex_detect_cmpxchg(void)
3007 {
3008 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3009         u32 curval;
3010
3011         /*
3012          * This will fail and we want it. Some arch implementations do
3013          * runtime detection of the futex_atomic_cmpxchg_inatomic()
3014          * functionality. We want to know that before we call in any
3015          * of the complex code paths. Also we want to prevent
3016          * registration of robust lists in that case. NULL is
3017          * guaranteed to fault and we get -EFAULT on functional
3018          * implementation, the non-functional ones will return
3019          * -ENOSYS.
3020          */
3021         if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
3022                 futex_cmpxchg_enabled = 1;
3023 #endif
3024 }
3025
3026 static int __init futex_init(void)
3027 {
3028         unsigned int futex_shift;
3029         unsigned long i;
3030
3031 #if CONFIG_BASE_SMALL
3032         futex_hashsize = 16;
3033 #else
3034         futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
3035 #endif
3036
3037         futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
3038                                                futex_hashsize, 0,
3039                                                futex_hashsize < 256 ? HASH_SMALL : 0,
3040                                                &futex_shift, NULL,
3041                                                futex_hashsize, futex_hashsize);
3042         futex_hashsize = 1UL << futex_shift;
3043
3044         futex_detect_cmpxchg();
3045
3046         for (i = 0; i < futex_hashsize; i++) {
3047                 atomic_set(&futex_queues[i].waiters, 0);
3048                 plist_head_init(&futex_queues[i].chain);
3049                 spin_lock_init(&futex_queues[i].lock);
3050         }
3051
3052         return 0;
3053 }
3054 __initcall(futex_init);