]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - kernel/signal.c
[PATCH] jiffies_64 cleanup
[karo-tx-linux.git] / kernel / signal.c
index 4980a073237ff45ed28c39b38225c40923da0848..9d1512dcf17664e197352f498f95039abdba0184 100644 (file)
@@ -262,7 +262,7 @@ next_signal(struct sigpending *pending, sigset_t *mask)
        return sig;
 }
 
-static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __nocast flags,
+static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
                                         int override_rlimit)
 {
        struct sigqueue *q = NULL;
@@ -277,7 +277,6 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __n
        } else {
                INIT_LIST_HEAD(&q->list);
                q->flags = 0;
-               q->lock = NULL;
                q->user = get_uid(t->user);
        }
        return(q);
@@ -397,20 +396,8 @@ void __exit_signal(struct task_struct *tsk)
        flush_sigqueue(&tsk->pending);
        if (sig) {
                /*
-                * We are cleaning up the signal_struct here.  We delayed
-                * calling exit_itimers until after flush_sigqueue, just in
-                * case our thread-local pending queue contained a queued
-                * timer signal that would have been cleared in
-                * exit_itimers.  When that called sigqueue_free, it would
-                * attempt to re-take the tasklist_lock and deadlock.  This
-                * can never happen if we ensure that all queues the
-                * timer's signal might be queued on have been flushed
-                * first.  The shared_pending queue, and our own pending
-                * queue are the only queues the timer could be on, since
-                * there are no other threads left in the group and timer
-                * signals are constrained to threads inside the group.
+                * We are cleaning up the signal_struct here.
                 */
-               exit_itimers(sig);
                exit_thread_group_keys(sig);
                kmem_cache_free(signal_cachep, sig);
        }
@@ -418,6 +405,8 @@ void __exit_signal(struct task_struct *tsk)
 
 void exit_signal(struct task_struct *tsk)
 {
+       atomic_dec(&tsk->signal->live);
+
        write_lock_irq(&tasklist_lock);
        __exit_signal(tsk);
        write_unlock_irq(&tasklist_lock);
@@ -578,7 +567,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
                 * is to alert stop-signal processing code when another
                 * processor has come along and cleared the flag.
                 */
-               tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
+               if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
+                       tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
        }
        if ( signr &&
             ((info->si_code & __SI_MASK) == __SI_TIMER) &&
@@ -936,34 +926,31 @@ force_sig_specific(int sig, struct task_struct *t)
  * as soon as they're available, so putting the signal on the shared queue
  * will be equivalent to sending it to one such thread.
  */
-#define wants_signal(sig, p, mask)                     \
-       (!sigismember(&(p)->blocked, sig)               \
-        && !((p)->state & mask)                        \
-        && !((p)->flags & PF_EXITING)                  \
-        && (task_curr(p) || !signal_pending(p)))
-
+static inline int wants_signal(int sig, struct task_struct *p)
+{
+       if (sigismember(&p->blocked, sig))
+               return 0;
+       if (p->flags & PF_EXITING)
+               return 0;
+       if (sig == SIGKILL)
+               return 1;
+       if (p->state & (TASK_STOPPED | TASK_TRACED))
+               return 0;
+       return task_curr(p) || !signal_pending(p);
+}
 
 static void
 __group_complete_signal(int sig, struct task_struct *p)
 {
-       unsigned int mask;
        struct task_struct *t;
 
-       /*
-        * Don't bother traced and stopped tasks (but
-        * SIGKILL will punch through that).
-        */
-       mask = TASK_STOPPED | TASK_TRACED;
-       if (sig == SIGKILL)
-               mask = 0;
-
        /*
         * Now find a thread we can wake up to take the signal off the queue.
         *
         * If the main thread wants the signal, it gets first crack.
         * Probably the least surprising to the average bear.
         */
-       if (wants_signal(sig, p, mask))
+       if (wants_signal(sig, p))
                t = p;
        else if (thread_group_empty(p))
                /*
@@ -981,7 +968,7 @@ __group_complete_signal(int sig, struct task_struct *p)
                        t = p->signal->curr_target = p;
                BUG_ON(t->tgid != p->tgid);
 
-               while (!wants_signal(sig, t, mask)) {
+               while (!wants_signal(sig, t)) {
                        t = next_thread(t);
                        if (t == p->signal->curr_target)
                                /*
@@ -1121,8 +1108,8 @@ void zap_other_threads(struct task_struct *p)
                if (t != p->group_leader)
                        t->exit_signal = -1;
 
+               /* SIGKILL will be handled before any pending SIGSTOP */
                sigaddset(&t->pending.signal, SIGKILL);
-               rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
                signal_wake_up(t, 1);
        }
 }
@@ -1195,6 +1182,40 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid)
        return error;
 }
 
+/* like kill_proc_info(), but doesn't use uid/euid of "current" */
+int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
+                     uid_t uid, uid_t euid)
+{
+       int ret = -EINVAL;
+       struct task_struct *p;
+
+       if (!valid_signal(sig))
+               return ret;
+
+       read_lock(&tasklist_lock);
+       p = find_task_by_pid(pid);
+       if (!p) {
+               ret = -ESRCH;
+               goto out_unlock;
+       }
+       if ((!info || ((unsigned long)info != 1 &&
+                       (unsigned long)info != 2 && SI_FROMUSER(info)))
+           && (euid != p->suid) && (euid != p->uid)
+           && (uid != p->suid) && (uid != p->uid)) {
+               ret = -EPERM;
+               goto out_unlock;
+       }
+       if (sig && p->sighand) {
+               unsigned long flags;
+               spin_lock_irqsave(&p->sighand->siglock, flags);
+               ret = __group_send_sig_info(sig, info, p);
+               spin_unlock_irqrestore(&p->sighand->siglock, flags);
+       }
+out_unlock:
+       read_unlock(&tasklist_lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
 
 /*
  * kill_something_info() interprets pid in interesting ways just like kill(2).
@@ -1349,11 +1370,12 @@ void sigqueue_free(struct sigqueue *q)
         * pending queue.
         */
        if (unlikely(!list_empty(&q->list))) {
-               read_lock(&tasklist_lock);  
-               spin_lock_irqsave(q->lock, flags);
+               spinlock_t *lock = &current->sighand->siglock;
+               read_lock(&tasklist_lock);
+               spin_lock_irqsave(lock, flags);
                if (!list_empty(&q->list))
                        list_del_init(&q->list);
-               spin_unlock_irqrestore(q->lock, flags);
+               spin_unlock_irqrestore(lock, flags);
                read_unlock(&tasklist_lock);
        }
        q->flags &= ~SIGQUEUE_PREALLOC;
@@ -1392,7 +1414,6 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
                goto out;
        }
 
-       q->lock = &p->sighand->siglock;
        list_add_tail(&q->list, &p->pending.list);
        sigaddset(&p->pending.signal, sig);
        if (!sigismember(&p->blocked, sig))
@@ -1440,7 +1461,6 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
         * We always use the shared queue for process-wide signals,
         * to avoid several races.
         */
-       q->lock = &p->sighand->siglock;
        list_add_tail(&q->list, &p->signal->shared_pending.list);
        sigaddset(&p->signal->shared_pending.signal, sig);
 
@@ -1766,7 +1786,8 @@ do_signal_stop(int signr)
                                 * stop is always done with the siglock held,
                                 * so this check has no races.
                                 */
-                               if (t->state < TASK_STOPPED) {
+                               if (!t->exit_state &&
+                                   !(t->state & (TASK_STOPPED|TASK_TRACED))) {
                                        stop_count++;
                                        signal_wake_up(t, 0);
                                }
@@ -1858,9 +1879,9 @@ relock:
                        /* Let the debugger run.  */
                        ptrace_stop(signr, signr, info);
 
-                       /* We're back.  Did the debugger cancel the sig */
+                       /* We're back.  Did the debugger cancel the sig or group_exit? */
                        signr = current->exit_code;
-                       if (signr == 0)
+                       if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT)
                                continue;
 
                        current->exit_code = 0;
@@ -2221,8 +2242,7 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese,
                        recalc_sigpending();
                        spin_unlock_irq(&current->sighand->siglock);
 
-                       current->state = TASK_INTERRUPTIBLE;
-                       timeout = schedule_timeout(timeout);
+                       timeout = schedule_timeout_interruptible(timeout);
 
                        try_to_freeze();
                        spin_lock_irq(&current->sighand->siglock);
@@ -2263,26 +2283,13 @@ sys_kill(int pid, int sig)
        return kill_something_info(sig, &info, pid);
 }
 
-/**
- *  sys_tgkill - send signal to one specific thread
- *  @tgid: the thread group ID of the thread
- *  @pid: the PID of the thread
- *  @sig: signal to be sent
- *
- *  This syscall also checks the tgid and returns -ESRCH even if the PID
- *  exists but it's not belonging to the target process anymore. This
- *  method solves the problem of threads exiting and PIDs getting reused.
- */
-asmlinkage long sys_tgkill(int tgid, int pid, int sig)
+static int do_tkill(int tgid, int pid, int sig)
 {
-       struct siginfo info;
        int error;
+       struct siginfo info;
        struct task_struct *p;
 
-       /* This is only valid for single tasks */
-       if (pid <= 0 || tgid <= 0)
-               return -EINVAL;
-
+       error = -ESRCH;
        info.si_signo = sig;
        info.si_errno = 0;
        info.si_code = SI_TKILL;
@@ -2291,8 +2298,7 @@ asmlinkage long sys_tgkill(int tgid, int pid, int sig)
 
        read_lock(&tasklist_lock);
        p = find_task_by_pid(pid);
-       error = -ESRCH;
-       if (p && (p->tgid == tgid)) {
+       if (p && (tgid <= 0 || p->tgid == tgid)) {
                error = check_kill_permission(sig, &info, p);
                /*
                 * The null signal is a permissions and process existence
@@ -2306,47 +2312,40 @@ asmlinkage long sys_tgkill(int tgid, int pid, int sig)
                }
        }
        read_unlock(&tasklist_lock);
+
        return error;
 }
 
+/**
+ *  sys_tgkill - send signal to one specific thread
+ *  @tgid: the thread group ID of the thread
+ *  @pid: the PID of the thread
+ *  @sig: signal to be sent
+ *
+ *  This syscall also checks the tgid and returns -ESRCH even if the PID
+ *  exists but it's not belonging to the target process anymore. This
+ *  method solves the problem of threads exiting and PIDs getting reused.
+ */
+asmlinkage long sys_tgkill(int tgid, int pid, int sig)
+{
+       /* This is only valid for single tasks */
+       if (pid <= 0 || tgid <= 0)
+               return -EINVAL;
+
+       return do_tkill(tgid, pid, sig);
+}
+
 /*
  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
  */
 asmlinkage long
 sys_tkill(int pid, int sig)
 {
-       struct siginfo info;
-       int error;
-       struct task_struct *p;
-
        /* This is only valid for single tasks */
        if (pid <= 0)
                return -EINVAL;
 
-       info.si_signo = sig;
-       info.si_errno = 0;
-       info.si_code = SI_TKILL;
-       info.si_pid = current->tgid;
-       info.si_uid = current->uid;
-
-       read_lock(&tasklist_lock);
-       p = find_task_by_pid(pid);
-       error = -ESRCH;
-       if (p) {
-               error = check_kill_permission(sig, &info, p);
-               /*
-                * The null signal is a permissions and process existence
-                * probe.  No signal is actually delivered.
-                */
-               if (!error && sig && p->sighand) {
-                       spin_lock_irq(&p->sighand->siglock);
-                       handle_stop_signal(sig, p);
-                       error = specific_send_sig_info(sig, &info, p);
-                       spin_unlock_irq(&p->sighand->siglock);
-               }
-       }
-       read_unlock(&tasklist_lock);
-       return error;
+       return do_tkill(0, pid, sig);
 }
 
 asmlinkage long