2 * linux/kernel/ptrace.c
4 * (C) Copyright 1999 Linus Torvalds
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/ptrace.h>
19 #include <linux/security.h>
20 #include <linux/signal.h>
21 #include <linux/audit.h>
22 #include <linux/pid_namespace.h>
23 #include <linux/syscalls.h>
25 #include <asm/pgtable.h>
26 #include <asm/uaccess.h>
30 * ptrace a task: make the debugger its new parent and
31 * move it to the ptrace list.
33 * Must be called with the tasklist lock write-held.
35 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
37 BUG_ON(!list_empty(&child->ptrace_entry));
38 list_add(&child->ptrace_entry, &new_parent->ptraced);
39 child->parent = new_parent;
43 * Turn a tracing stop into a normal stop now, since with no tracer there
44 * would be no way to wake it up with SIGCONT or SIGKILL. If there was a
45 * signal sent that would resume the child, but didn't because it was in
46 * TASK_TRACED, resume it now.
47 * Requires that irqs be disabled.
49 static void ptrace_untrace(struct task_struct *child)
51 spin_lock(&child->sighand->siglock);
52 if (task_is_traced(child)) {
54 * If the group stop is completed or in progress,
55 * this thread was already counted as stopped.
57 if (child->signal->flags & SIGNAL_STOP_STOPPED ||
58 child->signal->group_stop_count)
59 __set_task_state(child, TASK_STOPPED);
61 signal_wake_up(child, 1);
63 spin_unlock(&child->sighand->siglock);
67 * unptrace a task: move it back to its original parent and
68 * remove it from the ptrace list.
70 * Must be called with the tasklist lock write-held.
72 void __ptrace_unlink(struct task_struct *child)
74 BUG_ON(!child->ptrace);
77 child->parent = child->real_parent;
78 list_del_init(&child->ptrace_entry);
80 arch_ptrace_untrace(child);
81 if (task_is_traced(child))
82 ptrace_untrace(child);
86 * Check that we have indeed attached to the thing..
88 int ptrace_check_attach(struct task_struct *child, int kill)
93 * We take the read lock around doing both checks to close a
94 * possible race where someone else was tracing our child and
95 * detached between these two checks. After this locked check,
96 * we are sure that this is our traced child and that can only
97 * be changed by us so it's not changing right after this.
99 read_lock(&tasklist_lock);
100 if ((child->ptrace & PT_PTRACED) && child->parent == current) {
103 * child->sighand can't be NULL, release_task()
104 * does ptrace_unlink() before __exit_signal().
106 spin_lock_irq(&child->sighand->siglock);
107 if (task_is_stopped(child))
108 child->state = TASK_TRACED;
109 else if (!task_is_traced(child) && !kill)
111 spin_unlock_irq(&child->sighand->siglock);
113 read_unlock(&tasklist_lock);
116 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
118 /* All systems go.. */
122 int __ptrace_may_access(struct task_struct *task, unsigned int mode)
124 const struct cred *cred = current_cred(), *tcred;
126 /* May we inspect the given task?
127 * This check is used both for attaching with ptrace
128 * and for allowing access to sensitive information in /proc.
130 * ptrace_attach denies several cases that /proc allows
131 * because setting up the necessary parent/child relationship
132 * or halting the specified task is impossible.
135 /* Don't let security modules deny introspection */
139 tcred = __task_cred(task);
140 if ((cred->uid != tcred->euid ||
141 cred->uid != tcred->suid ||
142 cred->uid != tcred->uid ||
143 cred->gid != tcred->egid ||
144 cred->gid != tcred->sgid ||
145 cred->gid != tcred->gid) &&
146 !capable(CAP_SYS_PTRACE)) {
153 dumpable = get_dumpable(task->mm);
154 if (!dumpable && !capable(CAP_SYS_PTRACE))
157 return security_ptrace_may_access(task, mode);
160 bool ptrace_may_access(struct task_struct *task, unsigned int mode)
164 err = __ptrace_may_access(task, mode);
166 return (!err ? true : false);
169 int ptrace_attach(struct task_struct *task)
177 if (same_thread_group(task, current))
180 /* Protect exec's credential calculations against our interference;
181 * SUID, SGID and LSM creds get determined differently under ptrace.
183 retval = mutex_lock_interruptible(¤t->cred_exec_mutex);
192 * We want to hold both the task-lock and the
193 * tasklist_lock for writing at the same time.
194 * But that's against the rules (tasklist_lock
195 * is taken for reading by interrupts on other
196 * cpu's that may have task_lock).
199 if (!write_trylock_irqsave(&tasklist_lock, flags)) {
203 } while (!write_can_lock(&tasklist_lock));
209 /* the same process cannot be attached many times */
210 if (task->ptrace & PT_PTRACED)
212 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
217 task->ptrace |= PT_PTRACED;
218 if (capable(CAP_SYS_PTRACE))
219 task->ptrace |= PT_PTRACE_CAP;
221 __ptrace_link(task, current);
223 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
225 write_unlock_irqrestore(&tasklist_lock, flags);
227 mutex_unlock(¤t->cred_exec_mutex);
233 * Called with irqs disabled, returns true if childs should reap themselves.
235 static int ignoring_children(struct sighand_struct *sigh)
238 spin_lock(&sigh->siglock);
239 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
240 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
241 spin_unlock(&sigh->siglock);
246 * Called with tasklist_lock held for writing.
247 * Unlink a traced task, and clean it up if it was a traced zombie.
248 * Return true if it needs to be reaped with release_task().
249 * (We can't call release_task() here because we already hold tasklist_lock.)
251 * If it's a zombie, our attachedness prevented normal parent notification
252 * or self-reaping. Do notification now if it would have happened earlier.
253 * If it should reap itself, return true.
255 * If it's our own child, there is no notification to do.
256 * But if our normal children self-reap, then this child
257 * was prevented by ptrace and we must reap it now.
259 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
263 if (p->exit_state == EXIT_ZOMBIE) {
264 if (!task_detached(p) && thread_group_empty(p)) {
265 if (!same_thread_group(p->real_parent, tracer))
266 do_notify_parent(p, p->exit_signal);
267 else if (ignoring_children(tracer->sighand))
270 if (task_detached(p)) {
271 /* Mark it as in the process of being reaped. */
272 p->exit_state = EXIT_DEAD;
280 int ptrace_detach(struct task_struct *child, unsigned int data)
284 if (!valid_signal(data))
287 /* Architecture-specific hardware disable .. */
288 ptrace_disable(child);
289 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
291 write_lock_irq(&tasklist_lock);
293 * This child can be already killed. Make sure de_thread() or
294 * our sub-thread doing do_wait() didn't do release_task() yet.
297 child->exit_code = data;
298 dead = __ptrace_detach(current, child);
300 write_unlock_irq(&tasklist_lock);
309 * Detach all tasks we were using ptrace on.
311 void exit_ptrace(struct task_struct *tracer)
313 struct task_struct *p, *n;
314 LIST_HEAD(ptrace_dead);
316 write_lock_irq(&tasklist_lock);
317 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
318 if (__ptrace_detach(tracer, p))
319 list_add(&p->ptrace_entry, &ptrace_dead);
321 write_unlock_irq(&tasklist_lock);
323 BUG_ON(!list_empty(&tracer->ptraced));
325 list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
326 list_del_init(&p->ptrace_entry);
331 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
337 int this_len, retval;
339 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
340 retval = access_process_vm(tsk, src, buf, this_len, 0);
346 if (copy_to_user(dst, buf, retval))
356 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
362 int this_len, retval;
364 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
365 if (copy_from_user(buf, src, this_len))
367 retval = access_process_vm(tsk, dst, buf, this_len, 1);
381 static int ptrace_setoptions(struct task_struct *child, long data)
383 child->ptrace &= ~PT_TRACE_MASK;
385 if (data & PTRACE_O_TRACESYSGOOD)
386 child->ptrace |= PT_TRACESYSGOOD;
388 if (data & PTRACE_O_TRACEFORK)
389 child->ptrace |= PT_TRACE_FORK;
391 if (data & PTRACE_O_TRACEVFORK)
392 child->ptrace |= PT_TRACE_VFORK;
394 if (data & PTRACE_O_TRACECLONE)
395 child->ptrace |= PT_TRACE_CLONE;
397 if (data & PTRACE_O_TRACEEXEC)
398 child->ptrace |= PT_TRACE_EXEC;
400 if (data & PTRACE_O_TRACEVFORKDONE)
401 child->ptrace |= PT_TRACE_VFORK_DONE;
403 if (data & PTRACE_O_TRACEEXIT)
404 child->ptrace |= PT_TRACE_EXIT;
406 return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
409 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
413 read_lock(&tasklist_lock);
414 if (likely(child->sighand != NULL)) {
416 spin_lock_irq(&child->sighand->siglock);
417 if (likely(child->last_siginfo != NULL)) {
418 *info = *child->last_siginfo;
421 spin_unlock_irq(&child->sighand->siglock);
423 read_unlock(&tasklist_lock);
427 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
431 read_lock(&tasklist_lock);
432 if (likely(child->sighand != NULL)) {
434 spin_lock_irq(&child->sighand->siglock);
435 if (likely(child->last_siginfo != NULL)) {
436 *child->last_siginfo = *info;
439 spin_unlock_irq(&child->sighand->siglock);
441 read_unlock(&tasklist_lock);
446 #ifdef PTRACE_SINGLESTEP
447 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
449 #define is_singlestep(request) 0
452 #ifdef PTRACE_SINGLEBLOCK
453 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
455 #define is_singleblock(request) 0
459 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
461 #define is_sysemu_singlestep(request) 0
464 static int ptrace_resume(struct task_struct *child, long request, long data)
466 if (!valid_signal(data))
469 if (request == PTRACE_SYSCALL)
470 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
472 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
474 #ifdef TIF_SYSCALL_EMU
475 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
476 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
478 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
481 if (is_singleblock(request)) {
482 if (unlikely(!arch_has_block_step()))
484 user_enable_block_step(child);
485 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
486 if (unlikely(!arch_has_single_step()))
488 user_enable_single_step(child);
491 user_disable_single_step(child);
493 child->exit_code = data;
494 wake_up_process(child);
499 int ptrace_request(struct task_struct *child, long request,
500 long addr, long data)
506 case PTRACE_PEEKTEXT:
507 case PTRACE_PEEKDATA:
508 return generic_ptrace_peekdata(child, addr, data);
509 case PTRACE_POKETEXT:
510 case PTRACE_POKEDATA:
511 return generic_ptrace_pokedata(child, addr, data);
513 #ifdef PTRACE_OLDSETOPTIONS
514 case PTRACE_OLDSETOPTIONS:
516 case PTRACE_SETOPTIONS:
517 ret = ptrace_setoptions(child, data);
519 case PTRACE_GETEVENTMSG:
520 ret = put_user(child->ptrace_message, (unsigned long __user *) data);
523 case PTRACE_GETSIGINFO:
524 ret = ptrace_getsiginfo(child, &siginfo);
526 ret = copy_siginfo_to_user((siginfo_t __user *) data,
530 case PTRACE_SETSIGINFO:
531 if (copy_from_user(&siginfo, (siginfo_t __user *) data,
535 ret = ptrace_setsiginfo(child, &siginfo);
538 case PTRACE_DETACH: /* detach a process that was attached. */
539 ret = ptrace_detach(child, data);
542 #ifdef PTRACE_SINGLESTEP
543 case PTRACE_SINGLESTEP:
545 #ifdef PTRACE_SINGLEBLOCK
546 case PTRACE_SINGLEBLOCK:
550 case PTRACE_SYSEMU_SINGLESTEP:
554 return ptrace_resume(child, request, data);
557 if (child->exit_state) /* already dead */
559 return ptrace_resume(child, request, SIGKILL);
569 * ptrace_traceme -- helper for PTRACE_TRACEME
571 * Performs checks and sets PT_PTRACED.
572 * Should be used by all ptrace implementations for PTRACE_TRACEME.
574 int ptrace_traceme(void)
579 * Are we already being traced?
583 if (!(current->ptrace & PT_PTRACED)) {
585 * See ptrace_attach() comments about the locking here.
588 if (!write_trylock_irqsave(&tasklist_lock, flags)) {
589 task_unlock(current);
592 } while (!write_can_lock(&tasklist_lock));
596 ret = security_ptrace_traceme(current->parent);
599 * Set the ptrace bit in the process ptrace flags.
600 * Then link us on our parent's ptraced list.
603 current->ptrace |= PT_PTRACED;
604 __ptrace_link(current, current->real_parent);
607 write_unlock_irqrestore(&tasklist_lock, flags);
609 task_unlock(current);
614 * ptrace_get_task_struct -- grab a task struct reference for ptrace
615 * @pid: process id to grab a task_struct reference of
617 * This function is a helper for ptrace implementations. It checks
618 * permissions and then grabs a task struct for use of the actual
619 * ptrace implementation.
621 * Returns the task_struct for @pid or an ERR_PTR() on failure.
623 struct task_struct *ptrace_get_task_struct(pid_t pid)
625 struct task_struct *child;
627 read_lock(&tasklist_lock);
628 child = find_task_by_vpid(pid);
630 get_task_struct(child);
632 read_unlock(&tasklist_lock);
634 return ERR_PTR(-ESRCH);
638 #ifndef arch_ptrace_attach
639 #define arch_ptrace_attach(child) do { } while (0)
642 SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
644 struct task_struct *child;
648 * This lock_kernel fixes a subtle race with suid exec
651 if (request == PTRACE_TRACEME) {
652 ret = ptrace_traceme();
654 arch_ptrace_attach(current);
658 child = ptrace_get_task_struct(pid);
660 ret = PTR_ERR(child);
664 if (request == PTRACE_ATTACH) {
665 ret = ptrace_attach(child);
667 * Some architectures need to do book-keeping after
671 arch_ptrace_attach(child);
672 goto out_put_task_struct;
675 ret = ptrace_check_attach(child, request == PTRACE_KILL);
677 goto out_put_task_struct;
679 ret = arch_ptrace(child, request, addr, data);
682 put_task_struct(child);
688 int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
693 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
694 if (copied != sizeof(tmp))
696 return put_user(tmp, (unsigned long __user *)data);
699 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
703 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
704 return (copied == sizeof(data)) ? 0 : -EIO;
707 #if defined CONFIG_COMPAT
708 #include <linux/compat.h>
710 int compat_ptrace_request(struct task_struct *child, compat_long_t request,
711 compat_ulong_t addr, compat_ulong_t data)
713 compat_ulong_t __user *datap = compat_ptr(data);
719 case PTRACE_PEEKTEXT:
720 case PTRACE_PEEKDATA:
721 ret = access_process_vm(child, addr, &word, sizeof(word), 0);
722 if (ret != sizeof(word))
725 ret = put_user(word, datap);
728 case PTRACE_POKETEXT:
729 case PTRACE_POKEDATA:
730 ret = access_process_vm(child, addr, &data, sizeof(data), 1);
731 ret = (ret != sizeof(data) ? -EIO : 0);
734 case PTRACE_GETEVENTMSG:
735 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
738 case PTRACE_GETSIGINFO:
739 ret = ptrace_getsiginfo(child, &siginfo);
741 ret = copy_siginfo_to_user32(
742 (struct compat_siginfo __user *) datap,
746 case PTRACE_SETSIGINFO:
747 memset(&siginfo, 0, sizeof siginfo);
748 if (copy_siginfo_from_user32(
749 &siginfo, (struct compat_siginfo __user *) datap))
752 ret = ptrace_setsiginfo(child, &siginfo);
756 ret = ptrace_request(child, request, addr, data);
762 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
763 compat_long_t addr, compat_long_t data)
765 struct task_struct *child;
769 * This lock_kernel fixes a subtle race with suid exec
772 if (request == PTRACE_TRACEME) {
773 ret = ptrace_traceme();
777 child = ptrace_get_task_struct(pid);
779 ret = PTR_ERR(child);
783 if (request == PTRACE_ATTACH) {
784 ret = ptrace_attach(child);
786 * Some architectures need to do book-keeping after
790 arch_ptrace_attach(child);
791 goto out_put_task_struct;
794 ret = ptrace_check_attach(child, request == PTRACE_KILL);
796 ret = compat_arch_ptrace(child, request, addr, data);
799 put_task_struct(child);
804 #endif /* CONFIG_COMPAT */