3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
6 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
8 * SMP-threaded, sysctl's added
9 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
10 * Enforced range limit on SEM_UNDO
11 * (c) 2001 Red Hat Inc
13 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
14 * Further wakeup optimizations, documentation
15 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
22 * Pavel Emelianov <xemul@openvz.org>
24 * Implementation notes: (May 2010)
25 * This file implements System V semaphores.
27 * User space visible behavior:
28 * - FIFO ordering for semop() operations (just FIFO, not starvation
30 * - multiple semaphore operations that alter the same semaphore in
31 * one semop() are handled.
32 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
34 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
35 * - undo adjustments at process exit are limited to 0..SEMVMX.
36 * - namespace are supported.
37 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
38 * to /proc/sys/kernel/sem.
39 * - statistics about the usage are reported in /proc/sysvipc/sem.
43 * - all global variables are read-mostly.
44 * - semop() calls and semctl(RMID) are synchronized by RCU.
45 * - most operations do write operations (actually: spin_lock calls) to
46 * the per-semaphore array structure.
47 * Thus: Perfect SMP scaling between independent semaphore arrays.
48 * If multiple semaphores in one array are used, then cache line
49 * trashing on the semaphore array spinlock will limit the scaling.
50 * - semncnt and semzcnt are calculated on demand in count_semncnt() and
52 * - the task that performs a successful semop() scans the list of all
53 * sleeping tasks and completes any pending operations that can be fulfilled.
54 * Semaphores are actively given to waiting tasks (necessary for FIFO).
55 * (see update_queue())
56 * - To improve the scalability, the actual wake-up calls are performed after
57 * dropping all locks. (see wake_up_sem_queue_prepare(),
58 * wake_up_sem_queue_do())
59 * - All work is done by the waker, the woken up task does not have to do
60 * anything - not even acquiring a lock or dropping a refcount.
61 * - A woken up task may not even touch the semaphore array anymore, it may
62 * have been destroyed already by a semctl(RMID).
63 * - The synchronizations between wake-ups due to a timeout/signal and a
64 * wake-up due to a completed semaphore operation is achieved by using a
65 * special wakeup scheme (queuewakeup_wait and support functions)
66 * - UNDO values are stored in an array (one per process and per
67 * semaphore array, lazily allocated). For backwards compatibility, multiple
68 * modes for the UNDO variables are supported (per process, per thread)
69 * (see copy_semundo, CLONE_SYSVSEM)
70 * - There are two lists of the pending operations: a per-array list
71 * and per-semaphore list (stored in the array). This allows to achieve FIFO
72 * ordering without always scanning all pending operations.
73 * The worst-case behavior is nevertheless O(N^2) for N wakeups.
76 #include <linux/slab.h>
77 #include <linux/spinlock.h>
78 #include <linux/init.h>
79 #include <linux/proc_fs.h>
80 #include <linux/time.h>
81 #include <linux/security.h>
82 #include <linux/syscalls.h>
83 #include <linux/audit.h>
84 #include <linux/capability.h>
85 #include <linux/seq_file.h>
86 #include <linux/rwsem.h>
87 #include <linux/nsproxy.h>
88 #include <linux/ipc_namespace.h>
90 #include <asm/uaccess.h>
94 #ifdef CONFIG_PREEMPT_RT_BASE
95 #define SYSVSEM_COMPLETION 1
97 #define SYSVSEM_CUSTOM 1
100 #ifdef SYSVSEM_COMPLETION
101 /* Using a completion causes some overhead, but avoids a busy loop
102 * that increases the worst case latency.
105 struct completion done;
108 static void queuewakeup_prepare(void)
110 /* no preparation necessary */
113 static void queuewakeup_completed(void)
118 static void queuewakeup_block(struct queue_done *qd)
123 static void queuewakeup_handsoff(struct queue_done *qd)
125 complete_all(&qd->done);
128 static void queuewakeup_init(struct queue_done *qd)
130 init_completion(&qd->done);
133 static void queuewakeup_wait(struct queue_done *qd)
135 wait_for_completion(&qd->done);
138 #elif defined(SYSVSEM_SPINLOCK)
139 /* Note: Spinlocks do not work because:
140 * - lockdep complains [could be fixed]
141 * - only 255 concurrent spin_lock() calls are permitted, then the
142 * preempt-counter overflows
144 #error SYSVSEM_SPINLOCK is a prove of concept, does not work.
149 static void queuewakeup_prepare(void)
154 static void queuewakeup_completed(void)
159 static void queuewakeup_block(struct queue_done *qd)
161 BUG_ON(spin_is_locked(&qd->done));
162 spin_lock(&qd->done);
165 static void queuewakeup_handsoff(struct queue_done *qd)
167 spin_unlock(&qd->done);
170 static void queuewakeup_init(struct queue_done *qd)
172 spin_lock_init(&qd->done);
175 static void queuewakeup_wait(struct queue_done *qd)
177 spin_unlock_wait(&qd->done);
184 static void queuewakeup_prepare(void)
189 static void queuewakeup_completed(void)
194 static void queuewakeup_block(struct queue_done *qd)
196 BUG_ON(atomic_read(&qd->done) != 1);
197 atomic_set(&qd->done, 2);
200 static void queuewakeup_handsoff(struct queue_done *qd)
202 BUG_ON(atomic_read(&qd->done) != 2);
204 atomic_set(&qd->done, 1);
207 static void queuewakeup_init(struct queue_done *qd)
209 atomic_set(&qd->done, 1);
212 static void queuewakeup_wait(struct queue_done *qd)
214 while (atomic_read(&qd->done) != 1)
222 /* One semaphore structure for each semaphore in the system. */
224 int semval; /* current value */
225 int sempid; /* pid of last operation */
226 struct list_head sem_pending; /* pending single-sop operations */
229 /* One queue for each sleeping process in the system. */
231 struct list_head simple_list; /* queue of pending operations */
232 struct list_head list; /* queue of pending operations */
233 struct task_struct *sleeper; /* this process */
234 struct sem_undo *undo; /* undo structure */
235 int pid; /* process id of requesting process */
236 int status; /* completion status of operation */
237 struct sembuf *sops; /* array of pending operations */
238 int nsops; /* number of operations */
239 int alter; /* does *sops alter the array? */
240 struct queue_done done; /* completion synchronization */
243 /* Each task has a list of undo requests. They are executed automatically
244 * when the process exits.
247 struct list_head list_proc; /* per-process list: *
248 * all undos from one process
250 struct rcu_head rcu; /* rcu struct for sem_undo */
251 struct sem_undo_list *ulp; /* back ptr to sem_undo_list */
252 struct list_head list_id; /* per semaphore array list:
253 * all undos for one array */
254 int semid; /* semaphore set identifier */
255 short *semadj; /* array of adjustments */
256 /* one per semaphore */
259 /* sem_undo_list controls shared access to the list of sem_undo structures
260 * that may be shared among all a CLONE_SYSVSEM task group.
262 struct sem_undo_list {
265 struct list_head list_proc;
269 #define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
271 #define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
272 #define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
274 static int newary(struct ipc_namespace *, struct ipc_params *);
275 static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
276 #ifdef CONFIG_PROC_FS
277 static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
280 #define SEMMSL_FAST 256 /* 512 bytes on stack */
281 #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
284 * linked list protection:
286 * sem_array.sem_pending{,last},
287 * sem_array.sem_undo: sem_lock() for read/write
288 * sem_undo.proc_next: only "current" is allowed to read/write that field.
292 #define sc_semmsl sem_ctls[0]
293 #define sc_semmns sem_ctls[1]
294 #define sc_semopm sem_ctls[2]
295 #define sc_semmni sem_ctls[3]
297 void sem_init_ns(struct ipc_namespace *ns)
299 ns->sc_semmsl = SEMMSL;
300 ns->sc_semmns = SEMMNS;
301 ns->sc_semopm = SEMOPM;
302 ns->sc_semmni = SEMMNI;
304 ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
308 void sem_exit_ns(struct ipc_namespace *ns)
310 free_ipcs(ns, &sem_ids(ns), freeary);
311 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
315 void __init sem_init (void)
317 sem_init_ns(&init_ipc_ns);
318 ipc_init_proc_interface("sysvipc/sem",
319 " key semid perms nsems uid gid cuid cgid otime ctime\n",
320 IPC_SEM_IDS, sysvipc_sem_proc_show);
324 * sem_lock_(check_) routines are called in the paths where the rw_mutex
327 static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id)
329 struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id);
332 return (struct sem_array *)ipcp;
334 return container_of(ipcp, struct sem_array, sem_perm);
337 static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
340 struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
343 return (struct sem_array *)ipcp;
345 return container_of(ipcp, struct sem_array, sem_perm);
348 static inline void sem_lock_and_putref(struct sem_array *sma)
350 ipc_lock_by_ptr(&sma->sem_perm);
354 static inline void sem_getref_and_unlock(struct sem_array *sma)
357 ipc_unlock(&(sma)->sem_perm);
360 static inline void sem_putref(struct sem_array *sma)
362 ipc_lock_by_ptr(&sma->sem_perm);
364 ipc_unlock(&(sma)->sem_perm);
367 static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
369 ipc_rmid(&sem_ids(ns), &s->sem_perm);
373 * Lockless wakeup algorithm:
374 * Without the check/retry algorithm a lockless wakeup is possible:
375 * - queue.status is initialized to -EINTR before blocking.
376 * - wakeup is performed by
377 * * unlinking the queue entry from sma->sem_pending
378 * * setting queue.status to the actual result code
379 * This is the notification for the blocked thread that someone
380 * (usually: update_queue()) completed the semtimedop() operation.
381 * * call wake_up_process
382 * * queuewakeup_handsoff(&q->done);
383 * - the previously blocked thread checks queue.status:
384 * * if it's not -EINTR, then someone completed the operation.
385 * First, queuewakeup_wait() must be called. Afterwards,
386 * semtimedop must return queue.status without performing any
387 * operation on the sem array.
388 * - otherwise it must acquire the spinlock and repeat the test
389 * - If it is still -EINTR, then no update_queue() completed the
390 * operation, thus semtimedop() can proceed normally.
392 * queuewakeup_wait() is necessary to protect against the following
394 * - if queue.status is set after wake_up_process, then the woken up idle
395 * thread could race forward and try (and fail) to acquire sma->lock
396 * before update_queue had a chance to set queue.status.
397 * More importantly, it would mean that wake_up_process must be done
398 * while holding sma->lock, i.e. this would reduce the scalability.
399 * - if queue.status is written before wake_up_process and if the
400 * blocked process is woken up by a signal between writing
401 * queue.status and the wake_up_process, then the woken up
402 * process could return from semtimedop and die by calling
403 * sys_exit before wake_up_process is called. Then wake_up_process
404 * will oops, because the task structure is already invalid.
405 * (yes, this happened on s390 with sysv msg).
410 * newary - Create a new semaphore set
412 * @params: ptr to the structure that contains key, semflg and nsems
414 * Called with sem_ids.rw_mutex held (as a writer)
417 static int newary(struct ipc_namespace *ns, struct ipc_params *params)
421 struct sem_array *sma;
423 key_t key = params->key;
424 int nsems = params->u.nsems;
425 int semflg = params->flg;
430 if (ns->used_sems + nsems > ns->sc_semmns)
433 size = sizeof (*sma) + nsems * sizeof (struct sem);
434 sma = ipc_rcu_alloc(size);
438 memset (sma, 0, size);
440 sma->sem_perm.mode = (semflg & S_IRWXUGO);
441 sma->sem_perm.key = key;
443 sma->sem_perm.security = NULL;
444 retval = security_sem_alloc(sma);
450 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
452 security_sem_free(sma);
456 ns->used_sems += nsems;
458 sma->sem_base = (struct sem *) &sma[1];
460 for (i = 0; i < nsems; i++)
461 INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
463 sma->complex_count = 0;
464 INIT_LIST_HEAD(&sma->sem_pending);
465 INIT_LIST_HEAD(&sma->list_id);
466 sma->sem_nsems = nsems;
467 sma->sem_ctime = get_seconds();
470 return sma->sem_perm.id;
475 * Called with sem_ids.rw_mutex and ipcp locked.
477 static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
479 struct sem_array *sma;
481 sma = container_of(ipcp, struct sem_array, sem_perm);
482 return security_sem_associate(sma, semflg);
486 * Called with sem_ids.rw_mutex and ipcp locked.
488 static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
489 struct ipc_params *params)
491 struct sem_array *sma;
493 sma = container_of(ipcp, struct sem_array, sem_perm);
494 if (params->u.nsems > sma->sem_nsems)
500 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
502 struct ipc_namespace *ns;
503 struct ipc_ops sem_ops;
504 struct ipc_params sem_params;
506 ns = current->nsproxy->ipc_ns;
508 if (nsems < 0 || nsems > ns->sc_semmsl)
511 sem_ops.getnew = newary;
512 sem_ops.associate = sem_security;
513 sem_ops.more_checks = sem_more_checks;
515 sem_params.key = key;
516 sem_params.flg = semflg;
517 sem_params.u.nsems = nsems;
519 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
523 * Determine whether a sequence of semaphore operations would succeed
524 * all at once. Return 0 if yes, 1 if need to sleep, else return error code.
527 static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops,
528 int nsops, struct sem_undo *un, int pid)
534 for (sop = sops; sop < sops + nsops; sop++) {
535 curr = sma->sem_base + sop->sem_num;
536 sem_op = sop->sem_op;
537 result = curr->semval;
539 if (!sem_op && result)
547 if (sop->sem_flg & SEM_UNDO) {
548 int undo = un->semadj[sop->sem_num] - sem_op;
550 * Exceeding the undo range is an error.
552 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
555 curr->semval = result;
559 while (sop >= sops) {
560 sma->sem_base[sop->sem_num].sempid = pid;
561 if (sop->sem_flg & SEM_UNDO)
562 un->semadj[sop->sem_num] -= sop->sem_op;
573 if (sop->sem_flg & IPC_NOWAIT)
580 while (sop >= sops) {
581 sma->sem_base[sop->sem_num].semval -= sop->sem_op;
588 /** wake_up_sem_queue_prepare(q, error): Prepare wake-up
589 * @q: queue entry that must be signaled
590 * @error: Error value for the signal
592 * Prepare the wake-up of the queue entry q.
594 static void wake_up_sem_queue_prepare(struct list_head *pt,
595 struct sem_queue *q, int error)
598 queuewakeup_prepare();
600 queuewakeup_block(&q->done);
603 list_add_tail(&q->simple_list, pt);
607 * wake_up_sem_queue_do(pt) - do the actual wake-up
608 * @pt: list of tasks to be woken up
610 * Do the actual wake-up.
611 * The function is called without any locks held, thus the semaphore array
612 * could be destroyed already and the tasks can disappear as soon as
613 * queuewakeup_handsoff() is called.
615 static void wake_up_sem_queue_do(struct list_head *pt)
617 struct sem_queue *q, *t;
620 did_something = !list_empty(pt);
621 list_for_each_entry_safe(q, t, pt, simple_list) {
622 wake_up_process(q->sleeper);
623 /* q can disappear immediately after completing q->done */
624 queuewakeup_handsoff(&q->done);
627 queuewakeup_completed();
630 static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
634 list_del(&q->simple_list);
636 sma->complex_count--;
639 /** check_restart(sma, q)
640 * @sma: semaphore array
641 * @q: the operation that just completed
643 * update_queue is O(N^2) when it restarts scanning the whole queue of
644 * waiting operations. Therefore this function checks if the restart is
645 * really necessary. It is called after a previously waiting operation
648 static int check_restart(struct sem_array *sma, struct sem_queue *q)
653 /* if the operation didn't modify the array, then no restart */
657 /* pending complex operations are too difficult to analyse */
658 if (sma->complex_count)
661 /* we were a sleeping complex operation. Too difficult */
665 curr = sma->sem_base + q->sops[0].sem_num;
667 /* No-one waits on this queue */
668 if (list_empty(&curr->sem_pending))
671 /* the new semaphore value */
673 /* It is impossible that someone waits for the new value:
674 * - q is a previously sleeping simple operation that
675 * altered the array. It must be a decrement, because
676 * simple increments never sleep.
677 * - The value is not 0, thus wait-for-zero won't proceed.
678 * - If there are older (higher priority) decrements
679 * in the queue, then they have observed the original
680 * semval value and couldn't proceed. The operation
681 * decremented to value - thus they won't proceed either.
683 BUG_ON(q->sops[0].sem_op >= 0);
687 * semval is 0. Check if there are wait-for-zero semops.
688 * They must be the first entries in the per-semaphore simple queue
690 h = list_first_entry(&curr->sem_pending, struct sem_queue, simple_list);
691 BUG_ON(h->nsops != 1);
692 BUG_ON(h->sops[0].sem_num != q->sops[0].sem_num);
694 /* Yes, there is a wait-for-zero semop. Restart */
695 if (h->sops[0].sem_op == 0)
698 /* Again - no-one is waiting for the new value. */
704 * update_queue(sma, semnum): Look for tasks that can be completed.
705 * @sma: semaphore array.
706 * @semnum: semaphore that was modified.
707 * @pt: list head for the tasks that must be woken up.
709 * update_queue must be called after a semaphore in a semaphore array
710 * was modified. If multiple semaphore were modified, then @semnum
712 * The tasks that must be woken up are added to @pt. The return code
713 * is stored in q->pid.
714 * The function return 1 if at least one semop was completed successfully.
716 static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
719 struct list_head *walk;
720 struct list_head *pending_list;
722 int semop_completed = 0;
724 /* if there are complex operations around, then knowing the semaphore
725 * that was modified doesn't help us. Assume that multiple semaphores
728 if (sma->complex_count)
732 pending_list = &sma->sem_pending;
733 offset = offsetof(struct sem_queue, list);
735 pending_list = &sma->sem_base[semnum].sem_pending;
736 offset = offsetof(struct sem_queue, simple_list);
740 walk = pending_list->next;
741 while (walk != pending_list) {
744 q = (struct sem_queue *)((char *)walk - offset);
747 /* If we are scanning the single sop, per-semaphore list of
748 * one semaphore and that semaphore is 0, then it is not
749 * necessary to scan the "alter" entries: simple increments
750 * that affect only one entry succeed immediately and cannot
751 * be in the per semaphore pending queue, and decrements
752 * cannot be successful if the value is already 0.
754 if (semnum != -1 && sma->sem_base[semnum].semval == 0 &&
758 error = try_atomic_semop(sma, q->sops, q->nsops,
761 /* Does q->sleeper still need to sleep? */
765 unlink_queue(sma, q);
771 restart = check_restart(sma, q);
774 wake_up_sem_queue_prepare(pt, q, error);
778 return semop_completed;
782 * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
783 * @sma: semaphore array
784 * @sops: operations that were performed
785 * @nsops: number of operations
786 * @otime: force setting otime
787 * @pt: list head of the tasks that must be woken up.
789 * do_smart_update() does the required called to update_queue, based on the
790 * actual changes that were performed on the semaphore array.
791 * Note that the function does not do the actual wake-up: the caller is
792 * responsible for calling wake_up_sem_queue_do(@pt).
793 * It is safe to perform this call after dropping all locks.
795 static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
796 int otime, struct list_head *pt)
800 if (sma->complex_count || sops == NULL) {
801 if (update_queue(sma, -1, pt))
806 for (i = 0; i < nsops; i++) {
807 if (sops[i].sem_op > 0 ||
808 (sops[i].sem_op < 0 &&
809 sma->sem_base[sops[i].sem_num].semval == 0))
810 if (update_queue(sma, sops[i].sem_num, pt))
815 sma->sem_otime = get_seconds();
819 /* The following counts are associated to each semaphore:
820 * semncnt number of tasks waiting on semval being nonzero
821 * semzcnt number of tasks waiting on semval being zero
822 * This model assumes that a task waits on exactly one semaphore.
823 * Since semaphore operations are to be performed atomically, tasks actually
824 * wait on a whole sequence of semaphores simultaneously.
825 * The counts we return here are a rough approximation, but still
826 * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
828 static int count_semncnt (struct sem_array * sma, ushort semnum)
831 struct sem_queue * q;
834 list_for_each_entry(q, &sma->sem_pending, list) {
835 struct sembuf * sops = q->sops;
836 int nsops = q->nsops;
838 for (i = 0; i < nsops; i++)
839 if (sops[i].sem_num == semnum
840 && (sops[i].sem_op < 0)
841 && !(sops[i].sem_flg & IPC_NOWAIT))
847 static int count_semzcnt (struct sem_array * sma, ushort semnum)
850 struct sem_queue * q;
853 list_for_each_entry(q, &sma->sem_pending, list) {
854 struct sembuf * sops = q->sops;
855 int nsops = q->nsops;
857 for (i = 0; i < nsops; i++)
858 if (sops[i].sem_num == semnum
859 && (sops[i].sem_op == 0)
860 && !(sops[i].sem_flg & IPC_NOWAIT))
866 /* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
867 * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
868 * remains locked on exit.
870 static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
872 struct sem_undo *un, *tu;
873 struct sem_queue *q, *tq;
874 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
875 struct list_head tasks;
877 /* Free the existing undo structures for this semaphore set. */
878 assert_spin_locked(&sma->sem_perm.lock);
879 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
880 list_del(&un->list_id);
881 spin_lock(&un->ulp->lock);
883 list_del_rcu(&un->list_proc);
884 spin_unlock(&un->ulp->lock);
888 /* Wake up all pending processes and let them fail with EIDRM. */
889 INIT_LIST_HEAD(&tasks);
890 list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
891 unlink_queue(sma, q);
892 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
895 /* Remove the semaphore set from the IDR */
899 wake_up_sem_queue_do(&tasks);
900 ns->used_sems -= sma->sem_nsems;
901 security_sem_free(sma);
905 static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
909 return copy_to_user(buf, in, sizeof(*in));
914 memset(&out, 0, sizeof(out));
916 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
918 out.sem_otime = in->sem_otime;
919 out.sem_ctime = in->sem_ctime;
920 out.sem_nsems = in->sem_nsems;
922 return copy_to_user(buf, &out, sizeof(out));
929 static int semctl_nolock(struct ipc_namespace *ns, int semid,
930 int cmd, int version, union semun arg)
933 struct sem_array *sma;
939 struct seminfo seminfo;
942 err = security_sem_semctl(NULL, cmd);
946 memset(&seminfo,0,sizeof(seminfo));
947 seminfo.semmni = ns->sc_semmni;
948 seminfo.semmns = ns->sc_semmns;
949 seminfo.semmsl = ns->sc_semmsl;
950 seminfo.semopm = ns->sc_semopm;
951 seminfo.semvmx = SEMVMX;
952 seminfo.semmnu = SEMMNU;
953 seminfo.semmap = SEMMAP;
954 seminfo.semume = SEMUME;
955 down_read(&sem_ids(ns).rw_mutex);
956 if (cmd == SEM_INFO) {
957 seminfo.semusz = sem_ids(ns).in_use;
958 seminfo.semaem = ns->used_sems;
960 seminfo.semusz = SEMUSZ;
961 seminfo.semaem = SEMAEM;
963 max_id = ipc_get_maxid(&sem_ids(ns));
964 up_read(&sem_ids(ns).rw_mutex);
965 if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo)))
967 return (max_id < 0) ? 0: max_id;
972 struct semid64_ds tbuf;
975 if (cmd == SEM_STAT) {
976 sma = sem_lock(ns, semid);
979 id = sma->sem_perm.id;
981 sma = sem_lock_check(ns, semid);
988 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
991 err = security_sem_semctl(sma, cmd);
995 memset(&tbuf, 0, sizeof(tbuf));
997 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
998 tbuf.sem_otime = sma->sem_otime;
999 tbuf.sem_ctime = sma->sem_ctime;
1000 tbuf.sem_nsems = sma->sem_nsems;
1002 if (copy_semid_to_user (arg.buf, &tbuf, version))
1014 static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1015 int cmd, int version, union semun arg)
1017 struct sem_array *sma;
1020 ushort fast_sem_io[SEMMSL_FAST];
1021 ushort* sem_io = fast_sem_io;
1023 struct list_head tasks;
1025 sma = sem_lock_check(ns, semid);
1027 return PTR_ERR(sma);
1029 INIT_LIST_HEAD(&tasks);
1030 nsems = sma->sem_nsems;
1033 if (ipcperms(ns, &sma->sem_perm,
1034 (cmd == SETVAL || cmd == SETALL) ? S_IWUGO : S_IRUGO))
1037 err = security_sem_semctl(sma, cmd);
1045 ushort __user *array = arg.array;
1048 if(nsems > SEMMSL_FAST) {
1049 sem_getref_and_unlock(sma);
1051 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1052 if(sem_io == NULL) {
1057 sem_lock_and_putref(sma);
1058 if (sma->sem_perm.deleted) {
1065 for (i = 0; i < sma->sem_nsems; i++)
1066 sem_io[i] = sma->sem_base[i].semval;
1069 if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1076 struct sem_undo *un;
1078 sem_getref_and_unlock(sma);
1080 if(nsems > SEMMSL_FAST) {
1081 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1082 if(sem_io == NULL) {
1088 if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) {
1094 for (i = 0; i < nsems; i++) {
1095 if (sem_io[i] > SEMVMX) {
1101 sem_lock_and_putref(sma);
1102 if (sma->sem_perm.deleted) {
1108 for (i = 0; i < nsems; i++)
1109 sma->sem_base[i].semval = sem_io[i];
1111 assert_spin_locked(&sma->sem_perm.lock);
1112 list_for_each_entry(un, &sma->list_id, list_id) {
1113 for (i = 0; i < nsems; i++)
1116 sma->sem_ctime = get_seconds();
1117 /* maybe some queued-up processes were waiting for this */
1118 do_smart_update(sma, NULL, 0, 0, &tasks);
1122 /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */
1125 if(semnum < 0 || semnum >= nsems)
1128 curr = &sma->sem_base[semnum];
1138 err = count_semncnt(sma,semnum);
1141 err = count_semzcnt(sma,semnum);
1146 struct sem_undo *un;
1149 if (val > SEMVMX || val < 0)
1152 assert_spin_locked(&sma->sem_perm.lock);
1153 list_for_each_entry(un, &sma->list_id, list_id)
1154 un->semadj[semnum] = 0;
1157 curr->sempid = task_tgid_vnr(current);
1158 sma->sem_ctime = get_seconds();
1159 /* maybe some queued-up processes were waiting for this */
1160 do_smart_update(sma, NULL, 0, 0, &tasks);
1167 wake_up_sem_queue_do(&tasks);
1170 if(sem_io != fast_sem_io)
1171 ipc_free(sem_io, sizeof(ushort)*nsems);
1175 static inline unsigned long
1176 copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1180 if (copy_from_user(out, buf, sizeof(*out)))
1185 struct semid_ds tbuf_old;
1187 if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1190 out->sem_perm.uid = tbuf_old.sem_perm.uid;
1191 out->sem_perm.gid = tbuf_old.sem_perm.gid;
1192 out->sem_perm.mode = tbuf_old.sem_perm.mode;
1202 * This function handles some semctl commands which require the rw_mutex
1203 * to be held in write mode.
1204 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
1206 static int semctl_down(struct ipc_namespace *ns, int semid,
1207 int cmd, int version, union semun arg)
1209 struct sem_array *sma;
1211 struct semid64_ds semid64;
1212 struct kern_ipc_perm *ipcp;
1214 if(cmd == IPC_SET) {
1215 if (copy_semid_from_user(&semid64, arg.buf, version))
1219 ipcp = ipcctl_pre_down(ns, &sem_ids(ns), semid, cmd,
1220 &semid64.sem_perm, 0);
1222 return PTR_ERR(ipcp);
1224 sma = container_of(ipcp, struct sem_array, sem_perm);
1226 err = security_sem_semctl(sma, cmd);
1235 ipc_update_perm(&semid64.sem_perm, ipcp);
1236 sma->sem_ctime = get_seconds();
1245 up_write(&sem_ids(ns).rw_mutex);
1249 SYSCALL_DEFINE(semctl)(int semid, int semnum, int cmd, union semun arg)
1253 struct ipc_namespace *ns;
1258 version = ipc_parse_version(&cmd);
1259 ns = current->nsproxy->ipc_ns;
1266 err = semctl_nolock(ns, semid, cmd, version, arg);
1275 err = semctl_main(ns,semid,semnum,cmd,version,arg);
1279 err = semctl_down(ns, semid, cmd, version, arg);
1285 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
1286 asmlinkage long SyS_semctl(int semid, int semnum, int cmd, union semun arg)
1288 return SYSC_semctl((int) semid, (int) semnum, (int) cmd, arg);
1290 SYSCALL_ALIAS(sys_semctl, SyS_semctl);
1293 /* If the task doesn't already have a undo_list, then allocate one
1294 * here. We guarantee there is only one thread using this undo list,
1295 * and current is THE ONE
1297 * If this allocation and assignment succeeds, but later
1298 * portions of this code fail, there is no need to free the sem_undo_list.
1299 * Just let it stay associated with the task, and it'll be freed later
1302 * This can block, so callers must hold no locks.
1304 static inline int get_undo_list(struct sem_undo_list **undo_listp)
1306 struct sem_undo_list *undo_list;
1308 undo_list = current->sysvsem.undo_list;
1310 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1311 if (undo_list == NULL)
1313 spin_lock_init(&undo_list->lock);
1314 atomic_set(&undo_list->refcnt, 1);
1315 INIT_LIST_HEAD(&undo_list->list_proc);
1317 current->sysvsem.undo_list = undo_list;
1319 *undo_listp = undo_list;
1323 static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1325 struct sem_undo *un;
1327 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1328 if (un->semid == semid)
1334 static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1336 struct sem_undo *un;
1338 assert_spin_locked(&ulp->lock);
1340 un = __lookup_undo(ulp, semid);
1342 list_del_rcu(&un->list_proc);
1343 list_add_rcu(&un->list_proc, &ulp->list_proc);
1349 * find_alloc_undo - Lookup (and if not present create) undo array
1351 * @semid: semaphore array id
1353 * The function looks up (and if not present creates) the undo structure.
1354 * The size of the undo structure depends on the size of the semaphore
1355 * array, thus the alloc path is not that straightforward.
1356 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1357 * performs a rcu_read_lock().
1359 static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1361 struct sem_array *sma;
1362 struct sem_undo_list *ulp;
1363 struct sem_undo *un, *new;
1367 error = get_undo_list(&ulp);
1369 return ERR_PTR(error);
1372 spin_lock(&ulp->lock);
1373 un = lookup_undo(ulp, semid);
1374 spin_unlock(&ulp->lock);
1375 if (likely(un!=NULL))
1379 /* no undo structure around - allocate one. */
1380 /* step 1: figure out the size of the semaphore array */
1381 sma = sem_lock_check(ns, semid);
1383 return ERR_CAST(sma);
1385 nsems = sma->sem_nsems;
1386 sem_getref_and_unlock(sma);
1388 /* step 2: allocate new undo structure */
1389 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1392 return ERR_PTR(-ENOMEM);
1395 /* step 3: Acquire the lock on semaphore array */
1396 sem_lock_and_putref(sma);
1397 if (sma->sem_perm.deleted) {
1400 un = ERR_PTR(-EIDRM);
1403 spin_lock(&ulp->lock);
1406 * step 4: check for races: did someone else allocate the undo struct?
1408 un = lookup_undo(ulp, semid);
1413 /* step 5: initialize & link new undo structure */
1414 new->semadj = (short *) &new[1];
1417 assert_spin_locked(&ulp->lock);
1418 list_add_rcu(&new->list_proc, &ulp->list_proc);
1419 assert_spin_locked(&sma->sem_perm.lock);
1420 list_add(&new->list_id, &sma->list_id);
1424 spin_unlock(&ulp->lock);
1431 SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1432 unsigned, nsops, const struct timespec __user *, timeout)
1434 int error = -EINVAL;
1435 struct sem_array *sma;
1436 struct sembuf fast_sops[SEMOPM_FAST];
1437 struct sembuf* sops = fast_sops, *sop;
1438 struct sem_undo *un;
1439 int undos = 0, alter = 0, max;
1440 struct sem_queue queue;
1441 unsigned long jiffies_left = 0;
1442 struct ipc_namespace *ns;
1443 struct list_head tasks;
1445 ns = current->nsproxy->ipc_ns;
1447 if (nsops < 1 || semid < 0)
1449 if (nsops > ns->sc_semopm)
1451 if(nsops > SEMOPM_FAST) {
1452 sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL);
1456 if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) {
1461 struct timespec _timeout;
1462 if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1466 if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1467 _timeout.tv_nsec >= 1000000000L) {
1471 jiffies_left = timespec_to_jiffies(&_timeout);
1474 for (sop = sops; sop < sops + nsops; sop++) {
1475 if (sop->sem_num >= max)
1477 if (sop->sem_flg & SEM_UNDO)
1479 if (sop->sem_op != 0)
1484 un = find_alloc_undo(ns, semid);
1486 error = PTR_ERR(un);
1492 INIT_LIST_HEAD(&tasks);
1494 sma = sem_lock_check(ns, semid);
1498 error = PTR_ERR(sma);
1503 * semid identifiers are not unique - find_alloc_undo may have
1504 * allocated an undo structure, it was invalidated by an RMID
1505 * and now a new array with received the same id. Check and fail.
1506 * This case can be detected checking un->semid. The existence of
1507 * "un" itself is guaranteed by rcu.
1511 if (un->semid == -1) {
1513 goto out_unlock_free;
1516 * rcu lock can be released, "un" cannot disappear:
1517 * - sem_lock is acquired, thus IPC_RMID is
1519 * - exit_sem is impossible, it always operates on
1520 * current (or a dead task).
1528 if (max >= sma->sem_nsems)
1529 goto out_unlock_free;
1532 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1533 goto out_unlock_free;
1535 error = security_sem_semop(sma, sops, nsops, alter);
1537 goto out_unlock_free;
1539 error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
1541 if (alter && error == 0)
1542 do_smart_update(sma, sops, nsops, 1, &tasks);
1544 goto out_unlock_free;
1547 /* We need to sleep on this operation, so we put the current
1548 * task into the pending queue and go to sleep.
1552 queue.nsops = nsops;
1554 queue.pid = task_tgid_vnr(current);
1555 queue.alter = alter;
1557 list_add_tail(&queue.list, &sma->sem_pending);
1559 list_add(&queue.list, &sma->sem_pending);
1563 curr = &sma->sem_base[sops->sem_num];
1566 list_add_tail(&queue.simple_list, &curr->sem_pending);
1568 list_add(&queue.simple_list, &curr->sem_pending);
1570 INIT_LIST_HEAD(&queue.simple_list);
1571 sma->complex_count++;
1574 queue.status = -EINTR;
1575 queue.sleeper = current;
1576 queuewakeup_init(&queue.done);
1579 current->state = TASK_INTERRUPTIBLE;
1583 jiffies_left = schedule_timeout(jiffies_left);
1587 error = queue.status;
1589 if (error != -EINTR) {
1590 /* fast path: update_queue already obtained all requested
1591 * resources. Just ensure that update_queue completed
1592 * it's access to &queue.
1594 queuewakeup_wait(&queue.done);
1599 sma = sem_lock(ns, semid);
1602 * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
1604 error = queue.status;
1605 if (error != -EINTR) {
1606 /* If there is a return code, then we can leave immediately. */
1608 /* sem_lock() succeeded - then unlock */
1611 /* Except that we must wait for the hands-off */
1612 queuewakeup_wait(&queue.done);
1617 * If an interrupt occurred we have to clean up the queue
1619 if (timeout && jiffies_left == 0)
1623 * If the wakeup was spurious, just retry
1625 if (error == -EINTR && !signal_pending(current))
1628 unlink_queue(sma, &queue);
1633 wake_up_sem_queue_do(&tasks);
1635 if(sops != fast_sops)
1640 SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
1643 return sys_semtimedop(semid, tsops, nsops, NULL);
1646 /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
1647 * parent and child tasks.
1650 int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
1652 struct sem_undo_list *undo_list;
1655 if (clone_flags & CLONE_SYSVSEM) {
1656 error = get_undo_list(&undo_list);
1659 atomic_inc(&undo_list->refcnt);
1660 tsk->sysvsem.undo_list = undo_list;
1662 tsk->sysvsem.undo_list = NULL;
1668 * add semadj values to semaphores, free undo structures.
1669 * undo structures are not freed when semaphore arrays are destroyed
1670 * so some of them may be out of date.
1671 * IMPLEMENTATION NOTE: There is some confusion over whether the
1672 * set of adjustments that needs to be done should be done in an atomic
1673 * manner or not. That is, if we are attempting to decrement the semval
1674 * should we queue up and wait until we can do so legally?
1675 * The original implementation attempted to do this (queue and wait).
1676 * The current implementation does not do so. The POSIX standard
1677 * and SVID should be consulted to determine what behavior is mandated.
1679 void exit_sem(struct task_struct *tsk)
1681 struct sem_undo_list *ulp;
1683 ulp = tsk->sysvsem.undo_list;
1686 tsk->sysvsem.undo_list = NULL;
1688 if (!atomic_dec_and_test(&ulp->refcnt))
1692 struct sem_array *sma;
1693 struct sem_undo *un;
1694 struct list_head tasks;
1699 un = list_entry_rcu(ulp->list_proc.next,
1700 struct sem_undo, list_proc);
1701 if (&un->list_proc == &ulp->list_proc)
1710 sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
1712 /* exit_sem raced with IPC_RMID, nothing to do */
1716 un = __lookup_undo(ulp, semid);
1718 /* exit_sem raced with IPC_RMID+semget() that created
1719 * exactly the same semid. Nothing to do.
1725 /* remove un from the linked lists */
1726 assert_spin_locked(&sma->sem_perm.lock);
1727 list_del(&un->list_id);
1729 spin_lock(&ulp->lock);
1730 list_del_rcu(&un->list_proc);
1731 spin_unlock(&ulp->lock);
1733 /* perform adjustments registered in un */
1734 for (i = 0; i < sma->sem_nsems; i++) {
1735 struct sem * semaphore = &sma->sem_base[i];
1736 if (un->semadj[i]) {
1737 semaphore->semval += un->semadj[i];
1739 * Range checks of the new semaphore value,
1740 * not defined by sus:
1741 * - Some unices ignore the undo entirely
1742 * (e.g. HP UX 11i 11.22, Tru64 V5.1)
1743 * - some cap the value (e.g. FreeBSD caps
1744 * at 0, but doesn't enforce SEMVMX)
1746 * Linux caps the semaphore value, both at 0
1749 * Manfred <manfred@colorfullife.com>
1751 if (semaphore->semval < 0)
1752 semaphore->semval = 0;
1753 if (semaphore->semval > SEMVMX)
1754 semaphore->semval = SEMVMX;
1755 semaphore->sempid = task_tgid_vnr(current);
1758 /* maybe some queued-up processes were waiting for this */
1759 INIT_LIST_HEAD(&tasks);
1760 do_smart_update(sma, NULL, 0, 1, &tasks);
1762 wake_up_sem_queue_do(&tasks);
1769 #ifdef CONFIG_PROC_FS
1770 static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
1772 struct sem_array *sma = it;
1774 return seq_printf(s,
1775 "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",