]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/x86/kernel/ptrace.c
08fef04ac180fe1932d257ee1bfc2e3c6fc75707
[karo-tx-linux.git] / arch / x86 / kernel / ptrace.c
1 /* By Ross Biro 1/23/92 */
2 /*
3  * Pentium III FXSR, SSE support
4  *      Gareth Hughes <gareth@valinux.com>, May 2000
5  */
6
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/smp.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/ptrace.h>
14 #include <linux/regset.h>
15 #include <linux/tracehook.h>
16 #include <linux/user.h>
17 #include <linux/elf.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
20 #include <linux/seccomp.h>
21 #include <linux/signal.h>
22 #include <linux/perf_event.h>
23 #include <linux/hw_breakpoint.h>
24
25 #include <asm/uaccess.h>
26 #include <asm/pgtable.h>
27 #include <asm/system.h>
28 #include <asm/processor.h>
29 #include <asm/i387.h>
30 #include <asm/debugreg.h>
31 #include <asm/ldt.h>
32 #include <asm/desc.h>
33 #include <asm/prctl.h>
34 #include <asm/proto.h>
35 #include <asm/hw_breakpoint.h>
36
37 #include "tls.h"
38
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/syscalls.h>
41
42 enum x86_regset {
43         REGSET_GENERAL,
44         REGSET_FP,
45         REGSET_XFP,
46         REGSET_IOPERM64 = REGSET_XFP,
47         REGSET_XSTATE,
48         REGSET_TLS,
49         REGSET_IOPERM32,
50 };
51
52 struct pt_regs_offset {
53         const char *name;
54         int offset;
55 };
56
57 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
58 #define REG_OFFSET_END {.name = NULL, .offset = 0}
59
60 static const struct pt_regs_offset regoffset_table[] = {
61 #ifdef CONFIG_X86_64
62         REG_OFFSET_NAME(r15),
63         REG_OFFSET_NAME(r14),
64         REG_OFFSET_NAME(r13),
65         REG_OFFSET_NAME(r12),
66         REG_OFFSET_NAME(r11),
67         REG_OFFSET_NAME(r10),
68         REG_OFFSET_NAME(r9),
69         REG_OFFSET_NAME(r8),
70 #endif
71         REG_OFFSET_NAME(bx),
72         REG_OFFSET_NAME(cx),
73         REG_OFFSET_NAME(dx),
74         REG_OFFSET_NAME(si),
75         REG_OFFSET_NAME(di),
76         REG_OFFSET_NAME(bp),
77         REG_OFFSET_NAME(ax),
78 #ifdef CONFIG_X86_32
79         REG_OFFSET_NAME(ds),
80         REG_OFFSET_NAME(es),
81         REG_OFFSET_NAME(fs),
82         REG_OFFSET_NAME(gs),
83 #endif
84         REG_OFFSET_NAME(orig_ax),
85         REG_OFFSET_NAME(ip),
86         REG_OFFSET_NAME(cs),
87         REG_OFFSET_NAME(flags),
88         REG_OFFSET_NAME(sp),
89         REG_OFFSET_NAME(ss),
90         REG_OFFSET_END,
91 };
92
93 /**
94  * regs_query_register_offset() - query register offset from its name
95  * @name:       the name of a register
96  *
97  * regs_query_register_offset() returns the offset of a register in struct
98  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
99  */
100 int regs_query_register_offset(const char *name)
101 {
102         const struct pt_regs_offset *roff;
103         for (roff = regoffset_table; roff->name != NULL; roff++)
104                 if (!strcmp(roff->name, name))
105                         return roff->offset;
106         return -EINVAL;
107 }
108
109 /**
110  * regs_query_register_name() - query register name from its offset
111  * @offset:     the offset of a register in struct pt_regs.
112  *
113  * regs_query_register_name() returns the name of a register from its
114  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
115  */
116 const char *regs_query_register_name(unsigned int offset)
117 {
118         const struct pt_regs_offset *roff;
119         for (roff = regoffset_table; roff->name != NULL; roff++)
120                 if (roff->offset == offset)
121                         return roff->name;
122         return NULL;
123 }
124
125 static const int arg_offs_table[] = {
126 #ifdef CONFIG_X86_32
127         [0] = offsetof(struct pt_regs, ax),
128         [1] = offsetof(struct pt_regs, dx),
129         [2] = offsetof(struct pt_regs, cx)
130 #else /* CONFIG_X86_64 */
131         [0] = offsetof(struct pt_regs, di),
132         [1] = offsetof(struct pt_regs, si),
133         [2] = offsetof(struct pt_regs, dx),
134         [3] = offsetof(struct pt_regs, cx),
135         [4] = offsetof(struct pt_regs, r8),
136         [5] = offsetof(struct pt_regs, r9)
137 #endif
138 };
139
140 /*
141  * does not yet catch signals sent when the child dies.
142  * in exit.c or in signal.c.
143  */
144
145 /*
146  * Determines which flags the user has access to [1 = access, 0 = no access].
147  */
148 #define FLAG_MASK_32            ((unsigned long)                        \
149                                  (X86_EFLAGS_CF | X86_EFLAGS_PF |       \
150                                   X86_EFLAGS_AF | X86_EFLAGS_ZF |       \
151                                   X86_EFLAGS_SF | X86_EFLAGS_TF |       \
152                                   X86_EFLAGS_DF | X86_EFLAGS_OF |       \
153                                   X86_EFLAGS_RF | X86_EFLAGS_AC))
154
155 /*
156  * Determines whether a value may be installed in a segment register.
157  */
158 static inline bool invalid_selector(u16 value)
159 {
160         return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
161 }
162
163 #ifdef CONFIG_X86_32
164
165 #define FLAG_MASK               FLAG_MASK_32
166
167 /*
168  * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
169  * when it traps.  The previous stack will be directly underneath the saved
170  * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
171  *
172  * Now, if the stack is empty, '&regs->sp' is out of range. In this
173  * case we try to take the previous stack. To always return a non-null
174  * stack pointer we fall back to regs as stack if no previous stack
175  * exists.
176  *
177  * This is valid only for kernel mode traps.
178  */
179 unsigned long kernel_stack_pointer(struct pt_regs *regs)
180 {
181         unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
182         unsigned long sp = (unsigned long)&regs->sp;
183         struct thread_info *tinfo;
184
185         if (context == (sp & ~(THREAD_SIZE - 1)))
186                 return sp;
187
188         tinfo = (struct thread_info *)context;
189         if (tinfo->previous_esp)
190                 return tinfo->previous_esp;
191
192         return (unsigned long)regs;
193 }
194
195 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
196 {
197         BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
198         return &regs->bx + (regno >> 2);
199 }
200
201 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
202 {
203         /*
204          * Returning the value truncates it to 16 bits.
205          */
206         unsigned int retval;
207         if (offset != offsetof(struct user_regs_struct, gs))
208                 retval = *pt_regs_access(task_pt_regs(task), offset);
209         else {
210                 if (task == current)
211                         retval = get_user_gs(task_pt_regs(task));
212                 else
213                         retval = task_user_gs(task);
214         }
215         return retval;
216 }
217
218 static int set_segment_reg(struct task_struct *task,
219                            unsigned long offset, u16 value)
220 {
221         /*
222          * The value argument was already truncated to 16 bits.
223          */
224         if (invalid_selector(value))
225                 return -EIO;
226
227         /*
228          * For %cs and %ss we cannot permit a null selector.
229          * We can permit a bogus selector as long as it has USER_RPL.
230          * Null selectors are fine for other segment registers, but
231          * we will never get back to user mode with invalid %cs or %ss
232          * and will take the trap in iret instead.  Much code relies
233          * on user_mode() to distinguish a user trap frame (which can
234          * safely use invalid selectors) from a kernel trap frame.
235          */
236         switch (offset) {
237         case offsetof(struct user_regs_struct, cs):
238         case offsetof(struct user_regs_struct, ss):
239                 if (unlikely(value == 0))
240                         return -EIO;
241
242         default:
243                 *pt_regs_access(task_pt_regs(task), offset) = value;
244                 break;
245
246         case offsetof(struct user_regs_struct, gs):
247                 if (task == current)
248                         set_user_gs(task_pt_regs(task), value);
249                 else
250                         task_user_gs(task) = value;
251         }
252
253         return 0;
254 }
255
256 #else  /* CONFIG_X86_64 */
257
258 #define FLAG_MASK               (FLAG_MASK_32 | X86_EFLAGS_NT)
259
260 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
261 {
262         BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
263         return &regs->r15 + (offset / sizeof(regs->r15));
264 }
265
266 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
267 {
268         /*
269          * Returning the value truncates it to 16 bits.
270          */
271         unsigned int seg;
272
273         switch (offset) {
274         case offsetof(struct user_regs_struct, fs):
275                 if (task == current) {
276                         /* Older gas can't assemble movq %?s,%r?? */
277                         asm("movl %%fs,%0" : "=r" (seg));
278                         return seg;
279                 }
280                 return task->thread.fsindex;
281         case offsetof(struct user_regs_struct, gs):
282                 if (task == current) {
283                         asm("movl %%gs,%0" : "=r" (seg));
284                         return seg;
285                 }
286                 return task->thread.gsindex;
287         case offsetof(struct user_regs_struct, ds):
288                 if (task == current) {
289                         asm("movl %%ds,%0" : "=r" (seg));
290                         return seg;
291                 }
292                 return task->thread.ds;
293         case offsetof(struct user_regs_struct, es):
294                 if (task == current) {
295                         asm("movl %%es,%0" : "=r" (seg));
296                         return seg;
297                 }
298                 return task->thread.es;
299
300         case offsetof(struct user_regs_struct, cs):
301         case offsetof(struct user_regs_struct, ss):
302                 break;
303         }
304         return *pt_regs_access(task_pt_regs(task), offset);
305 }
306
307 static int set_segment_reg(struct task_struct *task,
308                            unsigned long offset, u16 value)
309 {
310         /*
311          * The value argument was already truncated to 16 bits.
312          */
313         if (invalid_selector(value))
314                 return -EIO;
315
316         switch (offset) {
317         case offsetof(struct user_regs_struct,fs):
318                 /*
319                  * If this is setting fs as for normal 64-bit use but
320                  * setting fs_base has implicitly changed it, leave it.
321                  */
322                 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
323                      task->thread.fs != 0) ||
324                     (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
325                      task->thread.fs == 0))
326                         break;
327                 task->thread.fsindex = value;
328                 if (task == current)
329                         loadsegment(fs, task->thread.fsindex);
330                 break;
331         case offsetof(struct user_regs_struct,gs):
332                 /*
333                  * If this is setting gs as for normal 64-bit use but
334                  * setting gs_base has implicitly changed it, leave it.
335                  */
336                 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
337                      task->thread.gs != 0) ||
338                     (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
339                      task->thread.gs == 0))
340                         break;
341                 task->thread.gsindex = value;
342                 if (task == current)
343                         load_gs_index(task->thread.gsindex);
344                 break;
345         case offsetof(struct user_regs_struct,ds):
346                 task->thread.ds = value;
347                 if (task == current)
348                         loadsegment(ds, task->thread.ds);
349                 break;
350         case offsetof(struct user_regs_struct,es):
351                 task->thread.es = value;
352                 if (task == current)
353                         loadsegment(es, task->thread.es);
354                 break;
355
356                 /*
357                  * Can't actually change these in 64-bit mode.
358                  */
359         case offsetof(struct user_regs_struct,cs):
360                 if (unlikely(value == 0))
361                         return -EIO;
362 #ifdef CONFIG_IA32_EMULATION
363                 if (test_tsk_thread_flag(task, TIF_IA32))
364                         task_pt_regs(task)->cs = value;
365 #endif
366                 break;
367         case offsetof(struct user_regs_struct,ss):
368                 if (unlikely(value == 0))
369                         return -EIO;
370 #ifdef CONFIG_IA32_EMULATION
371                 if (test_tsk_thread_flag(task, TIF_IA32))
372                         task_pt_regs(task)->ss = value;
373 #endif
374                 break;
375         }
376
377         return 0;
378 }
379
380 #endif  /* CONFIG_X86_32 */
381
382 static unsigned long get_flags(struct task_struct *task)
383 {
384         unsigned long retval = task_pt_regs(task)->flags;
385
386         /*
387          * If the debugger set TF, hide it from the readout.
388          */
389         if (test_tsk_thread_flag(task, TIF_FORCED_TF))
390                 retval &= ~X86_EFLAGS_TF;
391
392         return retval;
393 }
394
395 static int set_flags(struct task_struct *task, unsigned long value)
396 {
397         struct pt_regs *regs = task_pt_regs(task);
398
399         /*
400          * If the user value contains TF, mark that
401          * it was not "us" (the debugger) that set it.
402          * If not, make sure it stays set if we had.
403          */
404         if (value & X86_EFLAGS_TF)
405                 clear_tsk_thread_flag(task, TIF_FORCED_TF);
406         else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
407                 value |= X86_EFLAGS_TF;
408
409         regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
410
411         return 0;
412 }
413
414 static int putreg(struct task_struct *child,
415                   unsigned long offset, unsigned long value)
416 {
417         switch (offset) {
418         case offsetof(struct user_regs_struct, cs):
419         case offsetof(struct user_regs_struct, ds):
420         case offsetof(struct user_regs_struct, es):
421         case offsetof(struct user_regs_struct, fs):
422         case offsetof(struct user_regs_struct, gs):
423         case offsetof(struct user_regs_struct, ss):
424                 return set_segment_reg(child, offset, value);
425
426         case offsetof(struct user_regs_struct, flags):
427                 return set_flags(child, value);
428
429 #ifdef CONFIG_X86_64
430         case offsetof(struct user_regs_struct,fs_base):
431                 if (value >= TASK_SIZE_OF(child))
432                         return -EIO;
433                 /*
434                  * When changing the segment base, use do_arch_prctl
435                  * to set either thread.fs or thread.fsindex and the
436                  * corresponding GDT slot.
437                  */
438                 if (child->thread.fs != value)
439                         return do_arch_prctl(child, ARCH_SET_FS, value);
440                 return 0;
441         case offsetof(struct user_regs_struct,gs_base):
442                 /*
443                  * Exactly the same here as the %fs handling above.
444                  */
445                 if (value >= TASK_SIZE_OF(child))
446                         return -EIO;
447                 if (child->thread.gs != value)
448                         return do_arch_prctl(child, ARCH_SET_GS, value);
449                 return 0;
450 #endif
451         }
452
453         *pt_regs_access(task_pt_regs(child), offset) = value;
454         return 0;
455 }
456
457 static unsigned long getreg(struct task_struct *task, unsigned long offset)
458 {
459         switch (offset) {
460         case offsetof(struct user_regs_struct, cs):
461         case offsetof(struct user_regs_struct, ds):
462         case offsetof(struct user_regs_struct, es):
463         case offsetof(struct user_regs_struct, fs):
464         case offsetof(struct user_regs_struct, gs):
465         case offsetof(struct user_regs_struct, ss):
466                 return get_segment_reg(task, offset);
467
468         case offsetof(struct user_regs_struct, flags):
469                 return get_flags(task);
470
471 #ifdef CONFIG_X86_64
472         case offsetof(struct user_regs_struct, fs_base): {
473                 /*
474                  * do_arch_prctl may have used a GDT slot instead of
475                  * the MSR.  To userland, it appears the same either
476                  * way, except the %fs segment selector might not be 0.
477                  */
478                 unsigned int seg = task->thread.fsindex;
479                 if (task->thread.fs != 0)
480                         return task->thread.fs;
481                 if (task == current)
482                         asm("movl %%fs,%0" : "=r" (seg));
483                 if (seg != FS_TLS_SEL)
484                         return 0;
485                 return get_desc_base(&task->thread.tls_array[FS_TLS]);
486         }
487         case offsetof(struct user_regs_struct, gs_base): {
488                 /*
489                  * Exactly the same here as the %fs handling above.
490                  */
491                 unsigned int seg = task->thread.gsindex;
492                 if (task->thread.gs != 0)
493                         return task->thread.gs;
494                 if (task == current)
495                         asm("movl %%gs,%0" : "=r" (seg));
496                 if (seg != GS_TLS_SEL)
497                         return 0;
498                 return get_desc_base(&task->thread.tls_array[GS_TLS]);
499         }
500 #endif
501         }
502
503         return *pt_regs_access(task_pt_regs(task), offset);
504 }
505
506 static int genregs_get(struct task_struct *target,
507                        const struct user_regset *regset,
508                        unsigned int pos, unsigned int count,
509                        void *kbuf, void __user *ubuf)
510 {
511         if (kbuf) {
512                 unsigned long *k = kbuf;
513                 while (count >= sizeof(*k)) {
514                         *k++ = getreg(target, pos);
515                         count -= sizeof(*k);
516                         pos += sizeof(*k);
517                 }
518         } else {
519                 unsigned long __user *u = ubuf;
520                 while (count >= sizeof(*u)) {
521                         if (__put_user(getreg(target, pos), u++))
522                                 return -EFAULT;
523                         count -= sizeof(*u);
524                         pos += sizeof(*u);
525                 }
526         }
527
528         return 0;
529 }
530
531 static int genregs_set(struct task_struct *target,
532                        const struct user_regset *regset,
533                        unsigned int pos, unsigned int count,
534                        const void *kbuf, const void __user *ubuf)
535 {
536         int ret = 0;
537         if (kbuf) {
538                 const unsigned long *k = kbuf;
539                 while (count >= sizeof(*k) && !ret) {
540                         ret = putreg(target, pos, *k++);
541                         count -= sizeof(*k);
542                         pos += sizeof(*k);
543                 }
544         } else {
545                 const unsigned long  __user *u = ubuf;
546                 while (count >= sizeof(*u) && !ret) {
547                         unsigned long word;
548                         ret = __get_user(word, u++);
549                         if (ret)
550                                 break;
551                         ret = putreg(target, pos, word);
552                         count -= sizeof(*u);
553                         pos += sizeof(*u);
554                 }
555         }
556         return ret;
557 }
558
559 static void ptrace_triggered(struct perf_event *bp,
560                              struct perf_sample_data *data,
561                              struct pt_regs *regs)
562 {
563         int i;
564         struct thread_struct *thread = &(current->thread);
565
566         /*
567          * Store in the virtual DR6 register the fact that the breakpoint
568          * was hit so the thread's debugger will see it.
569          */
570         for (i = 0; i < HBP_NUM; i++) {
571                 if (thread->ptrace_bps[i] == bp)
572                         break;
573         }
574
575         thread->debugreg6 |= (DR_TRAP0 << i);
576 }
577
578 /*
579  * Walk through every ptrace breakpoints for this thread and
580  * build the dr7 value on top of their attributes.
581  *
582  */
583 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
584 {
585         int i;
586         int dr7 = 0;
587         struct arch_hw_breakpoint *info;
588
589         for (i = 0; i < HBP_NUM; i++) {
590                 if (bp[i] && !bp[i]->attr.disabled) {
591                         info = counter_arch_bp(bp[i]);
592                         dr7 |= encode_dr7(i, info->len, info->type);
593                 }
594         }
595
596         return dr7;
597 }
598
599 static int
600 ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
601                          struct task_struct *tsk, int disabled)
602 {
603         int err;
604         int gen_len, gen_type;
605         struct perf_event_attr attr;
606
607         /*
608          * We should have at least an inactive breakpoint at this
609          * slot. It means the user is writing dr7 without having
610          * written the address register first
611          */
612         if (!bp)
613                 return -EINVAL;
614
615         err = arch_bp_generic_fields(len, type, &gen_len, &gen_type);
616         if (err)
617                 return err;
618
619         attr = bp->attr;
620         attr.bp_len = gen_len;
621         attr.bp_type = gen_type;
622         attr.disabled = disabled;
623
624         return modify_user_hw_breakpoint(bp, &attr);
625 }
626
627 /*
628  * Handle ptrace writes to debug register 7.
629  */
630 static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
631 {
632         struct thread_struct *thread = &(tsk->thread);
633         unsigned long old_dr7;
634         int i, orig_ret = 0, rc = 0;
635         int enabled, second_pass = 0;
636         unsigned len, type;
637         struct perf_event *bp;
638
639         if (ptrace_get_breakpoints(tsk) < 0)
640                 return -ESRCH;
641
642         data &= ~DR_CONTROL_RESERVED;
643         old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
644 restore:
645         /*
646          * Loop through all the hardware breakpoints, making the
647          * appropriate changes to each.
648          */
649         for (i = 0; i < HBP_NUM; i++) {
650                 enabled = decode_dr7(data, i, &len, &type);
651                 bp = thread->ptrace_bps[i];
652
653                 if (!enabled) {
654                         if (bp) {
655                                 /*
656                                  * Don't unregister the breakpoints right-away,
657                                  * unless all register_user_hw_breakpoint()
658                                  * requests have succeeded. This prevents
659                                  * any window of opportunity for debug
660                                  * register grabbing by other users.
661                                  */
662                                 if (!second_pass)
663                                         continue;
664
665                                 rc = ptrace_modify_breakpoint(bp, len, type,
666                                                               tsk, 1);
667                                 if (rc)
668                                         break;
669                         }
670                         continue;
671                 }
672
673                 rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0);
674                 if (rc)
675                         break;
676         }
677         /*
678          * Make a second pass to free the remaining unused breakpoints
679          * or to restore the original breakpoints if an error occurred.
680          */
681         if (!second_pass) {
682                 second_pass = 1;
683                 if (rc < 0) {
684                         orig_ret = rc;
685                         data = old_dr7;
686                 }
687                 goto restore;
688         }
689
690         ptrace_put_breakpoints(tsk);
691
692         return ((orig_ret < 0) ? orig_ret : rc);
693 }
694
695 /*
696  * Handle PTRACE_PEEKUSR calls for the debug register area.
697  */
698 static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
699 {
700         struct thread_struct *thread = &(tsk->thread);
701         unsigned long val = 0;
702
703         if (n < HBP_NUM) {
704                 struct perf_event *bp;
705
706                 if (ptrace_get_breakpoints(tsk) < 0)
707                         return -ESRCH;
708
709                 bp = thread->ptrace_bps[n];
710                 if (!bp)
711                         val = 0;
712                 else
713                         val = bp->hw.info.address;
714
715                 ptrace_put_breakpoints(tsk);
716         } else if (n == 6) {
717                 val = thread->debugreg6;
718          } else if (n == 7) {
719                 val = thread->ptrace_dr7;
720         }
721         return val;
722 }
723
724 static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
725                                       unsigned long addr)
726 {
727         struct perf_event *bp;
728         struct thread_struct *t = &tsk->thread;
729         struct perf_event_attr attr;
730         int err = 0;
731
732         if (ptrace_get_breakpoints(tsk) < 0)
733                 return -ESRCH;
734
735         if (!t->ptrace_bps[nr]) {
736                 ptrace_breakpoint_init(&attr);
737                 /*
738                  * Put stub len and type to register (reserve) an inactive but
739                  * correct bp
740                  */
741                 attr.bp_addr = addr;
742                 attr.bp_len = HW_BREAKPOINT_LEN_1;
743                 attr.bp_type = HW_BREAKPOINT_W;
744                 attr.disabled = 1;
745
746                 bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
747                                                  NULL, tsk);
748
749                 /*
750                  * CHECKME: the previous code returned -EIO if the addr wasn't
751                  * a valid task virtual addr. The new one will return -EINVAL in
752                  *  this case.
753                  * -EINVAL may be what we want for in-kernel breakpoints users,
754                  * but -EIO looks better for ptrace, since we refuse a register
755                  * writing for the user. And anyway this is the previous
756                  * behaviour.
757                  */
758                 if (IS_ERR(bp)) {
759                         err = PTR_ERR(bp);
760                         goto put;
761                 }
762
763                 t->ptrace_bps[nr] = bp;
764         } else {
765                 bp = t->ptrace_bps[nr];
766
767                 attr = bp->attr;
768                 attr.bp_addr = addr;
769                 err = modify_user_hw_breakpoint(bp, &attr);
770         }
771
772 put:
773         ptrace_put_breakpoints(tsk);
774         return err;
775 }
776
777 /*
778  * Handle PTRACE_POKEUSR calls for the debug register area.
779  */
780 int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val)
781 {
782         struct thread_struct *thread = &(tsk->thread);
783         int rc = 0;
784
785         /* There are no DR4 or DR5 registers */
786         if (n == 4 || n == 5)
787                 return -EIO;
788
789         if (n == 6) {
790                 thread->debugreg6 = val;
791                 goto ret_path;
792         }
793         if (n < HBP_NUM) {
794                 rc = ptrace_set_breakpoint_addr(tsk, n, val);
795                 if (rc)
796                         return rc;
797         }
798         /* All that's left is DR7 */
799         if (n == 7) {
800                 rc = ptrace_write_dr7(tsk, val);
801                 if (!rc)
802                         thread->ptrace_dr7 = val;
803         }
804
805 ret_path:
806         return rc;
807 }
808
809 /*
810  * These access the current or another (stopped) task's io permission
811  * bitmap for debugging or core dump.
812  */
813 static int ioperm_active(struct task_struct *target,
814                          const struct user_regset *regset)
815 {
816         return target->thread.io_bitmap_max / regset->size;
817 }
818
819 static int ioperm_get(struct task_struct *target,
820                       const struct user_regset *regset,
821                       unsigned int pos, unsigned int count,
822                       void *kbuf, void __user *ubuf)
823 {
824         if (!target->thread.io_bitmap_ptr)
825                 return -ENXIO;
826
827         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
828                                    target->thread.io_bitmap_ptr,
829                                    0, IO_BITMAP_BYTES);
830 }
831
832 /*
833  * Called by kernel/ptrace.c when detaching..
834  *
835  * Make sure the single step bit is not set.
836  */
837 void ptrace_disable(struct task_struct *child)
838 {
839         user_disable_single_step(child);
840 #ifdef TIF_SYSCALL_EMU
841         clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
842 #endif
843 }
844
845 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
846 static const struct user_regset_view user_x86_32_view; /* Initialized below. */
847 #endif
848
849 long arch_ptrace(struct task_struct *child, long request,
850                  unsigned long addr, unsigned long data)
851 {
852         int ret;
853         unsigned long __user *datap = (unsigned long __user *)data;
854
855         switch (request) {
856         /* read the word at location addr in the USER area. */
857         case PTRACE_PEEKUSR: {
858                 unsigned long tmp;
859
860                 ret = -EIO;
861                 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
862                         break;
863
864                 tmp = 0;  /* Default return condition */
865                 if (addr < sizeof(struct user_regs_struct))
866                         tmp = getreg(child, addr);
867                 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
868                          addr <= offsetof(struct user, u_debugreg[7])) {
869                         addr -= offsetof(struct user, u_debugreg[0]);
870                         tmp = ptrace_get_debugreg(child, addr / sizeof(data));
871                 }
872                 ret = put_user(tmp, datap);
873                 break;
874         }
875
876         case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
877                 ret = -EIO;
878                 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
879                         break;
880
881                 if (addr < sizeof(struct user_regs_struct))
882                         ret = putreg(child, addr, data);
883                 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
884                          addr <= offsetof(struct user, u_debugreg[7])) {
885                         addr -= offsetof(struct user, u_debugreg[0]);
886                         ret = ptrace_set_debugreg(child,
887                                                   addr / sizeof(data), data);
888                 }
889                 break;
890
891         case PTRACE_GETREGS:    /* Get all gp regs from the child. */
892                 return copy_regset_to_user(child,
893                                            task_user_regset_view(current),
894                                            REGSET_GENERAL,
895                                            0, sizeof(struct user_regs_struct),
896                                            datap);
897
898         case PTRACE_SETREGS:    /* Set all gp regs in the child. */
899                 return copy_regset_from_user(child,
900                                              task_user_regset_view(current),
901                                              REGSET_GENERAL,
902                                              0, sizeof(struct user_regs_struct),
903                                              datap);
904
905         case PTRACE_GETFPREGS:  /* Get the child FPU state. */
906                 return copy_regset_to_user(child,
907                                            task_user_regset_view(current),
908                                            REGSET_FP,
909                                            0, sizeof(struct user_i387_struct),
910                                            datap);
911
912         case PTRACE_SETFPREGS:  /* Set the child FPU state. */
913                 return copy_regset_from_user(child,
914                                              task_user_regset_view(current),
915                                              REGSET_FP,
916                                              0, sizeof(struct user_i387_struct),
917                                              datap);
918
919 #ifdef CONFIG_X86_32
920         case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
921                 return copy_regset_to_user(child, &user_x86_32_view,
922                                            REGSET_XFP,
923                                            0, sizeof(struct user_fxsr_struct),
924                                            datap) ? -EIO : 0;
925
926         case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
927                 return copy_regset_from_user(child, &user_x86_32_view,
928                                              REGSET_XFP,
929                                              0, sizeof(struct user_fxsr_struct),
930                                              datap) ? -EIO : 0;
931 #endif
932
933 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
934         case PTRACE_GET_THREAD_AREA:
935                 if ((int) addr < 0)
936                         return -EIO;
937                 ret = do_get_thread_area(child, addr,
938                                         (struct user_desc __user *)data);
939                 break;
940
941         case PTRACE_SET_THREAD_AREA:
942                 if ((int) addr < 0)
943                         return -EIO;
944                 ret = do_set_thread_area(child, addr,
945                                         (struct user_desc __user *)data, 0);
946                 break;
947 #endif
948
949 #ifdef CONFIG_X86_64
950                 /* normal 64bit interface to access TLS data.
951                    Works just like arch_prctl, except that the arguments
952                    are reversed. */
953         case PTRACE_ARCH_PRCTL:
954                 ret = do_arch_prctl(child, data, addr);
955                 break;
956 #endif
957
958         default:
959                 ret = ptrace_request(child, request, addr, data);
960                 break;
961         }
962
963         return ret;
964 }
965
966 #ifdef CONFIG_IA32_EMULATION
967
968 #include <linux/compat.h>
969 #include <linux/syscalls.h>
970 #include <asm/ia32.h>
971 #include <asm/user32.h>
972
973 #define R32(l,q)                                                        \
974         case offsetof(struct user32, regs.l):                           \
975                 regs->q = value; break
976
977 #define SEG32(rs)                                                       \
978         case offsetof(struct user32, regs.rs):                          \
979                 return set_segment_reg(child,                           \
980                                        offsetof(struct user_regs_struct, rs), \
981                                        value);                          \
982                 break
983
984 static int putreg32(struct task_struct *child, unsigned regno, u32 value)
985 {
986         struct pt_regs *regs = task_pt_regs(child);
987
988         switch (regno) {
989
990         SEG32(cs);
991         SEG32(ds);
992         SEG32(es);
993         SEG32(fs);
994         SEG32(gs);
995         SEG32(ss);
996
997         R32(ebx, bx);
998         R32(ecx, cx);
999         R32(edx, dx);
1000         R32(edi, di);
1001         R32(esi, si);
1002         R32(ebp, bp);
1003         R32(eax, ax);
1004         R32(eip, ip);
1005         R32(esp, sp);
1006
1007         case offsetof(struct user32, regs.orig_eax):
1008                 /*
1009                  * A 32-bit debugger setting orig_eax means to restore
1010                  * the state of the task restarting a 32-bit syscall.
1011                  * Make sure we interpret the -ERESTART* codes correctly
1012                  * in case the task is not actually still sitting at the
1013                  * exit from a 32-bit syscall with TS_COMPAT still set.
1014                  */
1015                 regs->orig_ax = value;
1016                 if (syscall_get_nr(child, regs) >= 0)
1017                         task_thread_info(child)->status |= TS_COMPAT;
1018                 break;
1019
1020         case offsetof(struct user32, regs.eflags):
1021                 return set_flags(child, value);
1022
1023         case offsetof(struct user32, u_debugreg[0]) ...
1024                 offsetof(struct user32, u_debugreg[7]):
1025                 regno -= offsetof(struct user32, u_debugreg[0]);
1026                 return ptrace_set_debugreg(child, regno / 4, value);
1027
1028         default:
1029                 if (regno > sizeof(struct user32) || (regno & 3))
1030                         return -EIO;
1031
1032                 /*
1033                  * Other dummy fields in the virtual user structure
1034                  * are ignored
1035                  */
1036                 break;
1037         }
1038         return 0;
1039 }
1040
1041 #undef R32
1042 #undef SEG32
1043
1044 #define R32(l,q)                                                        \
1045         case offsetof(struct user32, regs.l):                           \
1046                 *val = regs->q; break
1047
1048 #define SEG32(rs)                                                       \
1049         case offsetof(struct user32, regs.rs):                          \
1050                 *val = get_segment_reg(child,                           \
1051                                        offsetof(struct user_regs_struct, rs)); \
1052                 break
1053
1054 static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
1055 {
1056         struct pt_regs *regs = task_pt_regs(child);
1057
1058         switch (regno) {
1059
1060         SEG32(ds);
1061         SEG32(es);
1062         SEG32(fs);
1063         SEG32(gs);
1064
1065         R32(cs, cs);
1066         R32(ss, ss);
1067         R32(ebx, bx);
1068         R32(ecx, cx);
1069         R32(edx, dx);
1070         R32(edi, di);
1071         R32(esi, si);
1072         R32(ebp, bp);
1073         R32(eax, ax);
1074         R32(orig_eax, orig_ax);
1075         R32(eip, ip);
1076         R32(esp, sp);
1077
1078         case offsetof(struct user32, regs.eflags):
1079                 *val = get_flags(child);
1080                 break;
1081
1082         case offsetof(struct user32, u_debugreg[0]) ...
1083                 offsetof(struct user32, u_debugreg[7]):
1084                 regno -= offsetof(struct user32, u_debugreg[0]);
1085                 *val = ptrace_get_debugreg(child, regno / 4);
1086                 break;
1087
1088         default:
1089                 if (regno > sizeof(struct user32) || (regno & 3))
1090                         return -EIO;
1091
1092                 /*
1093                  * Other dummy fields in the virtual user structure
1094                  * are ignored
1095                  */
1096                 *val = 0;
1097                 break;
1098         }
1099         return 0;
1100 }
1101
1102 #undef R32
1103 #undef SEG32
1104
1105 static int genregs32_get(struct task_struct *target,
1106                          const struct user_regset *regset,
1107                          unsigned int pos, unsigned int count,
1108                          void *kbuf, void __user *ubuf)
1109 {
1110         if (kbuf) {
1111                 compat_ulong_t *k = kbuf;
1112                 while (count >= sizeof(*k)) {
1113                         getreg32(target, pos, k++);
1114                         count -= sizeof(*k);
1115                         pos += sizeof(*k);
1116                 }
1117         } else {
1118                 compat_ulong_t __user *u = ubuf;
1119                 while (count >= sizeof(*u)) {
1120                         compat_ulong_t word;
1121                         getreg32(target, pos, &word);
1122                         if (__put_user(word, u++))
1123                                 return -EFAULT;
1124                         count -= sizeof(*u);
1125                         pos += sizeof(*u);
1126                 }
1127         }
1128
1129         return 0;
1130 }
1131
1132 static int genregs32_set(struct task_struct *target,
1133                          const struct user_regset *regset,
1134                          unsigned int pos, unsigned int count,
1135                          const void *kbuf, const void __user *ubuf)
1136 {
1137         int ret = 0;
1138         if (kbuf) {
1139                 const compat_ulong_t *k = kbuf;
1140                 while (count >= sizeof(*k) && !ret) {
1141                         ret = putreg32(target, pos, *k++);
1142                         count -= sizeof(*k);
1143                         pos += sizeof(*k);
1144                 }
1145         } else {
1146                 const compat_ulong_t __user *u = ubuf;
1147                 while (count >= sizeof(*u) && !ret) {
1148                         compat_ulong_t word;
1149                         ret = __get_user(word, u++);
1150                         if (ret)
1151                                 break;
1152                         ret = putreg32(target, pos, word);
1153                         count -= sizeof(*u);
1154                         pos += sizeof(*u);
1155                 }
1156         }
1157         return ret;
1158 }
1159
1160 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1161                         compat_ulong_t caddr, compat_ulong_t cdata)
1162 {
1163         unsigned long addr = caddr;
1164         unsigned long data = cdata;
1165         void __user *datap = compat_ptr(data);
1166         int ret;
1167         __u32 val;
1168
1169         switch (request) {
1170         case PTRACE_PEEKUSR:
1171                 ret = getreg32(child, addr, &val);
1172                 if (ret == 0)
1173                         ret = put_user(val, (__u32 __user *)datap);
1174                 break;
1175
1176         case PTRACE_POKEUSR:
1177                 ret = putreg32(child, addr, data);
1178                 break;
1179
1180         case PTRACE_GETREGS:    /* Get all gp regs from the child. */
1181                 return copy_regset_to_user(child, &user_x86_32_view,
1182                                            REGSET_GENERAL,
1183                                            0, sizeof(struct user_regs_struct32),
1184                                            datap);
1185
1186         case PTRACE_SETREGS:    /* Set all gp regs in the child. */
1187                 return copy_regset_from_user(child, &user_x86_32_view,
1188                                              REGSET_GENERAL, 0,
1189                                              sizeof(struct user_regs_struct32),
1190                                              datap);
1191
1192         case PTRACE_GETFPREGS:  /* Get the child FPU state. */
1193                 return copy_regset_to_user(child, &user_x86_32_view,
1194                                            REGSET_FP, 0,
1195                                            sizeof(struct user_i387_ia32_struct),
1196                                            datap);
1197
1198         case PTRACE_SETFPREGS:  /* Set the child FPU state. */
1199                 return copy_regset_from_user(
1200                         child, &user_x86_32_view, REGSET_FP,
1201                         0, sizeof(struct user_i387_ia32_struct), datap);
1202
1203         case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
1204                 return copy_regset_to_user(child, &user_x86_32_view,
1205                                            REGSET_XFP, 0,
1206                                            sizeof(struct user32_fxsr_struct),
1207                                            datap);
1208
1209         case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
1210                 return copy_regset_from_user(child, &user_x86_32_view,
1211                                              REGSET_XFP, 0,
1212                                              sizeof(struct user32_fxsr_struct),
1213                                              datap);
1214
1215         case PTRACE_GET_THREAD_AREA:
1216         case PTRACE_SET_THREAD_AREA:
1217                 return arch_ptrace(child, request, addr, data);
1218
1219         default:
1220                 return compat_ptrace_request(child, request, addr, data);
1221         }
1222
1223         return ret;
1224 }
1225
1226 #endif  /* CONFIG_IA32_EMULATION */
1227
1228 #ifdef CONFIG_X86_64
1229
1230 static struct user_regset x86_64_regsets[] __read_mostly = {
1231         [REGSET_GENERAL] = {
1232                 .core_note_type = NT_PRSTATUS,
1233                 .n = sizeof(struct user_regs_struct) / sizeof(long),
1234                 .size = sizeof(long), .align = sizeof(long),
1235                 .get = genregs_get, .set = genregs_set
1236         },
1237         [REGSET_FP] = {
1238                 .core_note_type = NT_PRFPREG,
1239                 .n = sizeof(struct user_i387_struct) / sizeof(long),
1240                 .size = sizeof(long), .align = sizeof(long),
1241                 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
1242         },
1243         [REGSET_XSTATE] = {
1244                 .core_note_type = NT_X86_XSTATE,
1245                 .size = sizeof(u64), .align = sizeof(u64),
1246                 .active = xstateregs_active, .get = xstateregs_get,
1247                 .set = xstateregs_set
1248         },
1249         [REGSET_IOPERM64] = {
1250                 .core_note_type = NT_386_IOPERM,
1251                 .n = IO_BITMAP_LONGS,
1252                 .size = sizeof(long), .align = sizeof(long),
1253                 .active = ioperm_active, .get = ioperm_get
1254         },
1255 };
1256
1257 static const struct user_regset_view user_x86_64_view = {
1258         .name = "x86_64", .e_machine = EM_X86_64,
1259         .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
1260 };
1261
1262 #else  /* CONFIG_X86_32 */
1263
1264 #define user_regs_struct32      user_regs_struct
1265 #define genregs32_get           genregs_get
1266 #define genregs32_set           genregs_set
1267
1268 #define user_i387_ia32_struct   user_i387_struct
1269 #define user32_fxsr_struct      user_fxsr_struct
1270
1271 #endif  /* CONFIG_X86_64 */
1272
1273 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1274 static struct user_regset x86_32_regsets[] __read_mostly = {
1275         [REGSET_GENERAL] = {
1276                 .core_note_type = NT_PRSTATUS,
1277                 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
1278                 .size = sizeof(u32), .align = sizeof(u32),
1279                 .get = genregs32_get, .set = genregs32_set
1280         },
1281         [REGSET_FP] = {
1282                 .core_note_type = NT_PRFPREG,
1283                 .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
1284                 .size = sizeof(u32), .align = sizeof(u32),
1285                 .active = fpregs_active, .get = fpregs_get, .set = fpregs_set
1286         },
1287         [REGSET_XFP] = {
1288                 .core_note_type = NT_PRXFPREG,
1289                 .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
1290                 .size = sizeof(u32), .align = sizeof(u32),
1291                 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
1292         },
1293         [REGSET_XSTATE] = {
1294                 .core_note_type = NT_X86_XSTATE,
1295                 .size = sizeof(u64), .align = sizeof(u64),
1296                 .active = xstateregs_active, .get = xstateregs_get,
1297                 .set = xstateregs_set
1298         },
1299         [REGSET_TLS] = {
1300                 .core_note_type = NT_386_TLS,
1301                 .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
1302                 .size = sizeof(struct user_desc),
1303                 .align = sizeof(struct user_desc),
1304                 .active = regset_tls_active,
1305                 .get = regset_tls_get, .set = regset_tls_set
1306         },
1307         [REGSET_IOPERM32] = {
1308                 .core_note_type = NT_386_IOPERM,
1309                 .n = IO_BITMAP_BYTES / sizeof(u32),
1310                 .size = sizeof(u32), .align = sizeof(u32),
1311                 .active = ioperm_active, .get = ioperm_get
1312         },
1313 };
1314
1315 static const struct user_regset_view user_x86_32_view = {
1316         .name = "i386", .e_machine = EM_386,
1317         .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
1318 };
1319 #endif
1320
1321 /*
1322  * This represents bytes 464..511 in the memory layout exported through
1323  * the REGSET_XSTATE interface.
1324  */
1325 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
1326
1327 void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
1328 {
1329 #ifdef CONFIG_X86_64
1330         x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
1331 #endif
1332 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1333         x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
1334 #endif
1335         xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
1336 }
1337
1338 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1339 {
1340 #ifdef CONFIG_IA32_EMULATION
1341         if (test_tsk_thread_flag(task, TIF_IA32))
1342 #endif
1343 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1344                 return &user_x86_32_view;
1345 #endif
1346 #ifdef CONFIG_X86_64
1347         return &user_x86_64_view;
1348 #endif
1349 }
1350
1351 static void fill_sigtrap_info(struct task_struct *tsk,
1352                                 struct pt_regs *regs,
1353                                 int error_code, int si_code,
1354                                 struct siginfo *info)
1355 {
1356         tsk->thread.trap_no = 1;
1357         tsk->thread.error_code = error_code;
1358
1359         memset(info, 0, sizeof(*info));
1360         info->si_signo = SIGTRAP;
1361         info->si_code = si_code;
1362         info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
1363 }
1364
1365 void user_single_step_siginfo(struct task_struct *tsk,
1366                                 struct pt_regs *regs,
1367                                 struct siginfo *info)
1368 {
1369         fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
1370 }
1371
1372 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1373                                          int error_code, int si_code)
1374 {
1375         struct siginfo info;
1376
1377         fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
1378         /* Send us the fake SIGTRAP */
1379         force_sig_info(SIGTRAP, &info, tsk);
1380 }
1381
1382
1383 #ifdef CONFIG_X86_32
1384 # define IS_IA32        1
1385 #elif defined CONFIG_IA32_EMULATION
1386 # define IS_IA32        is_compat_task()
1387 #else
1388 # define IS_IA32        0
1389 #endif
1390
1391 /*
1392  * We must return the syscall number to actually look up in the table.
1393  * This can be -1L to skip running any syscall at all.
1394  */
1395 long syscall_trace_enter(struct pt_regs *regs)
1396 {
1397         long ret = 0;
1398
1399         /*
1400          * If we stepped into a sysenter/syscall insn, it trapped in
1401          * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
1402          * If user-mode had set TF itself, then it's still clear from
1403          * do_debug() and we need to set it again to restore the user
1404          * state.  If we entered on the slow path, TF was already set.
1405          */
1406         if (test_thread_flag(TIF_SINGLESTEP))
1407                 regs->flags |= X86_EFLAGS_TF;
1408
1409         /* do the secure computing check first */
1410         secure_computing(regs->orig_ax);
1411
1412         if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1413                 ret = -1L;
1414
1415         if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
1416             tracehook_report_syscall_entry(regs))
1417                 ret = -1L;
1418
1419         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1420                 trace_sys_enter(regs, regs->orig_ax);
1421
1422         if (unlikely(current->audit_context)) {
1423                 if (IS_IA32)
1424                         audit_syscall_entry(AUDIT_ARCH_I386,
1425                                             regs->orig_ax,
1426                                             regs->bx, regs->cx,
1427                                             regs->dx, regs->si);
1428 #ifdef CONFIG_X86_64
1429                 else
1430                         audit_syscall_entry(AUDIT_ARCH_X86_64,
1431                                             regs->orig_ax,
1432                                             regs->di, regs->si,
1433                                             regs->dx, regs->r10);
1434 #endif
1435         }
1436
1437         return ret ?: regs->orig_ax;
1438 }
1439
1440 void syscall_trace_leave(struct pt_regs *regs)
1441 {
1442         bool step;
1443
1444         if (unlikely(current->audit_context))
1445                 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
1446
1447         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1448                 trace_sys_exit(regs, regs->ax);
1449
1450         /*
1451          * If TIF_SYSCALL_EMU is set, we only get here because of
1452          * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
1453          * We already reported this syscall instruction in
1454          * syscall_trace_enter().
1455          */
1456         step = unlikely(test_thread_flag(TIF_SINGLESTEP)) &&
1457                         !test_thread_flag(TIF_SYSCALL_EMU);
1458         if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1459                 tracehook_report_syscall_exit(regs, step);
1460 }