]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/powerpc/kernel/ptrace.c
Merge tag 'v4.11-rc1' into docs-next
[karo-tx-linux.git] / arch / powerpc / kernel / ptrace.c
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Derived from "arch/m68k/kernel/ptrace.c"
6  *  Copyright (C) 1994 by Hamish Macdonald
7  *  Taken from linux/kernel/ptrace.c and modified for M680x0.
8  *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
9  *
10  * Modified by Cort Dougan (cort@hq.fsmlabs.com)
11  * and Paul Mackerras (paulus@samba.org).
12  *
13  * This file is subject to the terms and conditions of the GNU General
14  * Public License.  See the file README.legal in the main directory of
15  * this archive for more details.
16  */
17
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/errno.h>
23 #include <linux/ptrace.h>
24 #include <linux/regset.h>
25 #include <linux/tracehook.h>
26 #include <linux/elf.h>
27 #include <linux/user.h>
28 #include <linux/security.h>
29 #include <linux/signal.h>
30 #include <linux/seccomp.h>
31 #include <linux/audit.h>
32 #include <trace/syscall.h>
33 #include <linux/hw_breakpoint.h>
34 #include <linux/perf_event.h>
35 #include <linux/context_tracking.h>
36
37 #include <linux/uaccess.h>
38 #include <asm/page.h>
39 #include <asm/pgtable.h>
40 #include <asm/switch_to.h>
41 #include <asm/tm.h>
42 #include <asm/asm-prototypes.h>
43
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/syscalls.h>
46
47 /*
48  * The parameter save area on the stack is used to store arguments being passed
49  * to callee function and is located at fixed offset from stack pointer.
50  */
51 #ifdef CONFIG_PPC32
52 #define PARAMETER_SAVE_AREA_OFFSET      24  /* bytes */
53 #else /* CONFIG_PPC32 */
54 #define PARAMETER_SAVE_AREA_OFFSET      48  /* bytes */
55 #endif
56
57 struct pt_regs_offset {
58         const char *name;
59         int offset;
60 };
61
62 #define STR(s)  #s                      /* convert to string */
63 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
64 #define GPR_OFFSET_NAME(num)    \
65         {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
66         {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
67 #define REG_OFFSET_END {.name = NULL, .offset = 0}
68
69 #define TVSO(f) (offsetof(struct thread_vr_state, f))
70 #define TFSO(f) (offsetof(struct thread_fp_state, f))
71 #define TSO(f)  (offsetof(struct thread_struct, f))
72
73 static const struct pt_regs_offset regoffset_table[] = {
74         GPR_OFFSET_NAME(0),
75         GPR_OFFSET_NAME(1),
76         GPR_OFFSET_NAME(2),
77         GPR_OFFSET_NAME(3),
78         GPR_OFFSET_NAME(4),
79         GPR_OFFSET_NAME(5),
80         GPR_OFFSET_NAME(6),
81         GPR_OFFSET_NAME(7),
82         GPR_OFFSET_NAME(8),
83         GPR_OFFSET_NAME(9),
84         GPR_OFFSET_NAME(10),
85         GPR_OFFSET_NAME(11),
86         GPR_OFFSET_NAME(12),
87         GPR_OFFSET_NAME(13),
88         GPR_OFFSET_NAME(14),
89         GPR_OFFSET_NAME(15),
90         GPR_OFFSET_NAME(16),
91         GPR_OFFSET_NAME(17),
92         GPR_OFFSET_NAME(18),
93         GPR_OFFSET_NAME(19),
94         GPR_OFFSET_NAME(20),
95         GPR_OFFSET_NAME(21),
96         GPR_OFFSET_NAME(22),
97         GPR_OFFSET_NAME(23),
98         GPR_OFFSET_NAME(24),
99         GPR_OFFSET_NAME(25),
100         GPR_OFFSET_NAME(26),
101         GPR_OFFSET_NAME(27),
102         GPR_OFFSET_NAME(28),
103         GPR_OFFSET_NAME(29),
104         GPR_OFFSET_NAME(30),
105         GPR_OFFSET_NAME(31),
106         REG_OFFSET_NAME(nip),
107         REG_OFFSET_NAME(msr),
108         REG_OFFSET_NAME(ctr),
109         REG_OFFSET_NAME(link),
110         REG_OFFSET_NAME(xer),
111         REG_OFFSET_NAME(ccr),
112 #ifdef CONFIG_PPC64
113         REG_OFFSET_NAME(softe),
114 #else
115         REG_OFFSET_NAME(mq),
116 #endif
117         REG_OFFSET_NAME(trap),
118         REG_OFFSET_NAME(dar),
119         REG_OFFSET_NAME(dsisr),
120         REG_OFFSET_END,
121 };
122
123 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
124 static void flush_tmregs_to_thread(struct task_struct *tsk)
125 {
126         /*
127          * If task is not current, it will have been flushed already to
128          * it's thread_struct during __switch_to().
129          *
130          * A reclaim flushes ALL the state.
131          */
132
133         if (tsk == current && MSR_TM_SUSPENDED(mfmsr()))
134                 tm_reclaim_current(TM_CAUSE_SIGNAL);
135
136 }
137 #else
138 static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
139 #endif
140
141 /**
142  * regs_query_register_offset() - query register offset from its name
143  * @name:       the name of a register
144  *
145  * regs_query_register_offset() returns the offset of a register in struct
146  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
147  */
148 int regs_query_register_offset(const char *name)
149 {
150         const struct pt_regs_offset *roff;
151         for (roff = regoffset_table; roff->name != NULL; roff++)
152                 if (!strcmp(roff->name, name))
153                         return roff->offset;
154         return -EINVAL;
155 }
156
157 /**
158  * regs_query_register_name() - query register name from its offset
159  * @offset:     the offset of a register in struct pt_regs.
160  *
161  * regs_query_register_name() returns the name of a register from its
162  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
163  */
164 const char *regs_query_register_name(unsigned int offset)
165 {
166         const struct pt_regs_offset *roff;
167         for (roff = regoffset_table; roff->name != NULL; roff++)
168                 if (roff->offset == offset)
169                         return roff->name;
170         return NULL;
171 }
172
173 /*
174  * does not yet catch signals sent when the child dies.
175  * in exit.c or in signal.c.
176  */
177
178 /*
179  * Set of msr bits that gdb can change on behalf of a process.
180  */
181 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
182 #define MSR_DEBUGCHANGE 0
183 #else
184 #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
185 #endif
186
187 /*
188  * Max register writeable via put_reg
189  */
190 #ifdef CONFIG_PPC32
191 #define PT_MAX_PUT_REG  PT_MQ
192 #else
193 #define PT_MAX_PUT_REG  PT_CCR
194 #endif
195
196 static unsigned long get_user_msr(struct task_struct *task)
197 {
198         return task->thread.regs->msr | task->thread.fpexc_mode;
199 }
200
201 static int set_user_msr(struct task_struct *task, unsigned long msr)
202 {
203         task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
204         task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
205         return 0;
206 }
207
208 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
209 static unsigned long get_user_ckpt_msr(struct task_struct *task)
210 {
211         return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
212 }
213
214 static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
215 {
216         task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
217         task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
218         return 0;
219 }
220
221 static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
222 {
223         task->thread.ckpt_regs.trap = trap & 0xfff0;
224         return 0;
225 }
226 #endif
227
228 #ifdef CONFIG_PPC64
229 static int get_user_dscr(struct task_struct *task, unsigned long *data)
230 {
231         *data = task->thread.dscr;
232         return 0;
233 }
234
235 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
236 {
237         task->thread.dscr = dscr;
238         task->thread.dscr_inherit = 1;
239         return 0;
240 }
241 #else
242 static int get_user_dscr(struct task_struct *task, unsigned long *data)
243 {
244         return -EIO;
245 }
246
247 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
248 {
249         return -EIO;
250 }
251 #endif
252
253 /*
254  * We prevent mucking around with the reserved area of trap
255  * which are used internally by the kernel.
256  */
257 static int set_user_trap(struct task_struct *task, unsigned long trap)
258 {
259         task->thread.regs->trap = trap & 0xfff0;
260         return 0;
261 }
262
263 /*
264  * Get contents of register REGNO in task TASK.
265  */
266 int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
267 {
268         if ((task->thread.regs == NULL) || !data)
269                 return -EIO;
270
271         if (regno == PT_MSR) {
272                 *data = get_user_msr(task);
273                 return 0;
274         }
275
276         if (regno == PT_DSCR)
277                 return get_user_dscr(task, data);
278
279         if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
280                 *data = ((unsigned long *)task->thread.regs)[regno];
281                 return 0;
282         }
283
284         return -EIO;
285 }
286
287 /*
288  * Write contents of register REGNO in task TASK.
289  */
290 int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
291 {
292         if (task->thread.regs == NULL)
293                 return -EIO;
294
295         if (regno == PT_MSR)
296                 return set_user_msr(task, data);
297         if (regno == PT_TRAP)
298                 return set_user_trap(task, data);
299         if (regno == PT_DSCR)
300                 return set_user_dscr(task, data);
301
302         if (regno <= PT_MAX_PUT_REG) {
303                 ((unsigned long *)task->thread.regs)[regno] = data;
304                 return 0;
305         }
306         return -EIO;
307 }
308
309 static int gpr_get(struct task_struct *target, const struct user_regset *regset,
310                    unsigned int pos, unsigned int count,
311                    void *kbuf, void __user *ubuf)
312 {
313         int i, ret;
314
315         if (target->thread.regs == NULL)
316                 return -EIO;
317
318         if (!FULL_REGS(target->thread.regs)) {
319                 /* We have a partial register set.  Fill 14-31 with bogus values */
320                 for (i = 14; i < 32; i++)
321                         target->thread.regs->gpr[i] = NV_REG_POISON;
322         }
323
324         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
325                                   target->thread.regs,
326                                   0, offsetof(struct pt_regs, msr));
327         if (!ret) {
328                 unsigned long msr = get_user_msr(target);
329                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
330                                           offsetof(struct pt_regs, msr),
331                                           offsetof(struct pt_regs, msr) +
332                                           sizeof(msr));
333         }
334
335         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
336                      offsetof(struct pt_regs, msr) + sizeof(long));
337
338         if (!ret)
339                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
340                                           &target->thread.regs->orig_gpr3,
341                                           offsetof(struct pt_regs, orig_gpr3),
342                                           sizeof(struct pt_regs));
343         if (!ret)
344                 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
345                                                sizeof(struct pt_regs), -1);
346
347         return ret;
348 }
349
350 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
351                    unsigned int pos, unsigned int count,
352                    const void *kbuf, const void __user *ubuf)
353 {
354         unsigned long reg;
355         int ret;
356
357         if (target->thread.regs == NULL)
358                 return -EIO;
359
360         CHECK_FULL_REGS(target->thread.regs);
361
362         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
363                                  target->thread.regs,
364                                  0, PT_MSR * sizeof(reg));
365
366         if (!ret && count > 0) {
367                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
368                                          PT_MSR * sizeof(reg),
369                                          (PT_MSR + 1) * sizeof(reg));
370                 if (!ret)
371                         ret = set_user_msr(target, reg);
372         }
373
374         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
375                      offsetof(struct pt_regs, msr) + sizeof(long));
376
377         if (!ret)
378                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
379                                          &target->thread.regs->orig_gpr3,
380                                          PT_ORIG_R3 * sizeof(reg),
381                                          (PT_MAX_PUT_REG + 1) * sizeof(reg));
382
383         if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
384                 ret = user_regset_copyin_ignore(
385                         &pos, &count, &kbuf, &ubuf,
386                         (PT_MAX_PUT_REG + 1) * sizeof(reg),
387                         PT_TRAP * sizeof(reg));
388
389         if (!ret && count > 0) {
390                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
391                                          PT_TRAP * sizeof(reg),
392                                          (PT_TRAP + 1) * sizeof(reg));
393                 if (!ret)
394                         ret = set_user_trap(target, reg);
395         }
396
397         if (!ret)
398                 ret = user_regset_copyin_ignore(
399                         &pos, &count, &kbuf, &ubuf,
400                         (PT_TRAP + 1) * sizeof(reg), -1);
401
402         return ret;
403 }
404
405 /*
406  * Regardless of transactions, 'fp_state' holds the current running
407  * value of all FPR registers and 'ckfp_state' holds the last checkpointed
408  * value of all FPR registers for the current transaction.
409  *
410  * Userspace interface buffer layout:
411  *
412  * struct data {
413  *      u64     fpr[32];
414  *      u64     fpscr;
415  * };
416  */
417 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
418                    unsigned int pos, unsigned int count,
419                    void *kbuf, void __user *ubuf)
420 {
421 #ifdef CONFIG_VSX
422         u64 buf[33];
423         int i;
424
425         flush_fp_to_thread(target);
426
427         /* copy to local buffer then write that out */
428         for (i = 0; i < 32 ; i++)
429                 buf[i] = target->thread.TS_FPR(i);
430         buf[32] = target->thread.fp_state.fpscr;
431         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
432 #else
433         BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
434                      offsetof(struct thread_fp_state, fpr[32]));
435
436         flush_fp_to_thread(target);
437
438         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
439                                    &target->thread.fp_state, 0, -1);
440 #endif
441 }
442
443 /*
444  * Regardless of transactions, 'fp_state' holds the current running
445  * value of all FPR registers and 'ckfp_state' holds the last checkpointed
446  * value of all FPR registers for the current transaction.
447  *
448  * Userspace interface buffer layout:
449  *
450  * struct data {
451  *      u64     fpr[32];
452  *      u64     fpscr;
453  * };
454  *
455  */
456 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
457                    unsigned int pos, unsigned int count,
458                    const void *kbuf, const void __user *ubuf)
459 {
460 #ifdef CONFIG_VSX
461         u64 buf[33];
462         int i;
463
464         flush_fp_to_thread(target);
465
466         for (i = 0; i < 32 ; i++)
467                 buf[i] = target->thread.TS_FPR(i);
468         buf[32] = target->thread.fp_state.fpscr;
469
470         /* copy to local buffer then write that out */
471         i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
472         if (i)
473                 return i;
474
475         for (i = 0; i < 32 ; i++)
476                 target->thread.TS_FPR(i) = buf[i];
477         target->thread.fp_state.fpscr = buf[32];
478         return 0;
479 #else
480         BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
481                      offsetof(struct thread_fp_state, fpr[32]));
482
483         flush_fp_to_thread(target);
484
485         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
486                                   &target->thread.fp_state, 0, -1);
487 #endif
488 }
489
490 #ifdef CONFIG_ALTIVEC
491 /*
492  * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
493  * The transfer totals 34 quadword.  Quadwords 0-31 contain the
494  * corresponding vector registers.  Quadword 32 contains the vscr as the
495  * last word (offset 12) within that quadword.  Quadword 33 contains the
496  * vrsave as the first word (offset 0) within the quadword.
497  *
498  * This definition of the VMX state is compatible with the current PPC32
499  * ptrace interface.  This allows signal handling and ptrace to use the
500  * same structures.  This also simplifies the implementation of a bi-arch
501  * (combined (32- and 64-bit) gdb.
502  */
503
504 static int vr_active(struct task_struct *target,
505                      const struct user_regset *regset)
506 {
507         flush_altivec_to_thread(target);
508         return target->thread.used_vr ? regset->n : 0;
509 }
510
511 /*
512  * Regardless of transactions, 'vr_state' holds the current running
513  * value of all the VMX registers and 'ckvr_state' holds the last
514  * checkpointed value of all the VMX registers for the current
515  * transaction to fall back on in case it aborts.
516  *
517  * Userspace interface buffer layout:
518  *
519  * struct data {
520  *      vector128       vr[32];
521  *      vector128       vscr;
522  *      vector128       vrsave;
523  * };
524  */
525 static int vr_get(struct task_struct *target, const struct user_regset *regset,
526                   unsigned int pos, unsigned int count,
527                   void *kbuf, void __user *ubuf)
528 {
529         int ret;
530
531         flush_altivec_to_thread(target);
532
533         BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
534                      offsetof(struct thread_vr_state, vr[32]));
535
536         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
537                                   &target->thread.vr_state, 0,
538                                   33 * sizeof(vector128));
539         if (!ret) {
540                 /*
541                  * Copy out only the low-order word of vrsave.
542                  */
543                 union {
544                         elf_vrreg_t reg;
545                         u32 word;
546                 } vrsave;
547                 memset(&vrsave, 0, sizeof(vrsave));
548
549                 vrsave.word = target->thread.vrsave;
550
551                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
552                                           33 * sizeof(vector128), -1);
553         }
554
555         return ret;
556 }
557
558 /*
559  * Regardless of transactions, 'vr_state' holds the current running
560  * value of all the VMX registers and 'ckvr_state' holds the last
561  * checkpointed value of all the VMX registers for the current
562  * transaction to fall back on in case it aborts.
563  *
564  * Userspace interface buffer layout:
565  *
566  * struct data {
567  *      vector128       vr[32];
568  *      vector128       vscr;
569  *      vector128       vrsave;
570  * };
571  */
572 static int vr_set(struct task_struct *target, const struct user_regset *regset,
573                   unsigned int pos, unsigned int count,
574                   const void *kbuf, const void __user *ubuf)
575 {
576         int ret;
577
578         flush_altivec_to_thread(target);
579
580         BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
581                      offsetof(struct thread_vr_state, vr[32]));
582
583         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
584                                  &target->thread.vr_state, 0,
585                                  33 * sizeof(vector128));
586         if (!ret && count > 0) {
587                 /*
588                  * We use only the first word of vrsave.
589                  */
590                 union {
591                         elf_vrreg_t reg;
592                         u32 word;
593                 } vrsave;
594                 memset(&vrsave, 0, sizeof(vrsave));
595
596                 vrsave.word = target->thread.vrsave;
597
598                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
599                                          33 * sizeof(vector128), -1);
600                 if (!ret)
601                         target->thread.vrsave = vrsave.word;
602         }
603
604         return ret;
605 }
606 #endif /* CONFIG_ALTIVEC */
607
608 #ifdef CONFIG_VSX
609 /*
610  * Currently to set and and get all the vsx state, you need to call
611  * the fp and VMX calls as well.  This only get/sets the lower 32
612  * 128bit VSX registers.
613  */
614
615 static int vsr_active(struct task_struct *target,
616                       const struct user_regset *regset)
617 {
618         flush_vsx_to_thread(target);
619         return target->thread.used_vsr ? regset->n : 0;
620 }
621
622 /*
623  * Regardless of transactions, 'fp_state' holds the current running
624  * value of all FPR registers and 'ckfp_state' holds the last
625  * checkpointed value of all FPR registers for the current
626  * transaction.
627  *
628  * Userspace interface buffer layout:
629  *
630  * struct data {
631  *      u64     vsx[32];
632  * };
633  */
634 static int vsr_get(struct task_struct *target, const struct user_regset *regset,
635                    unsigned int pos, unsigned int count,
636                    void *kbuf, void __user *ubuf)
637 {
638         u64 buf[32];
639         int ret, i;
640
641         flush_tmregs_to_thread(target);
642         flush_fp_to_thread(target);
643         flush_altivec_to_thread(target);
644         flush_vsx_to_thread(target);
645
646         for (i = 0; i < 32 ; i++)
647                 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
648
649         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
650                                   buf, 0, 32 * sizeof(double));
651
652         return ret;
653 }
654
655 /*
656  * Regardless of transactions, 'fp_state' holds the current running
657  * value of all FPR registers and 'ckfp_state' holds the last
658  * checkpointed value of all FPR registers for the current
659  * transaction.
660  *
661  * Userspace interface buffer layout:
662  *
663  * struct data {
664  *      u64     vsx[32];
665  * };
666  */
667 static int vsr_set(struct task_struct *target, const struct user_regset *regset,
668                    unsigned int pos, unsigned int count,
669                    const void *kbuf, const void __user *ubuf)
670 {
671         u64 buf[32];
672         int ret,i;
673
674         flush_tmregs_to_thread(target);
675         flush_fp_to_thread(target);
676         flush_altivec_to_thread(target);
677         flush_vsx_to_thread(target);
678
679         for (i = 0; i < 32 ; i++)
680                 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
681
682         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
683                                  buf, 0, 32 * sizeof(double));
684         if (!ret)
685                 for (i = 0; i < 32 ; i++)
686                         target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
687
688         return ret;
689 }
690 #endif /* CONFIG_VSX */
691
692 #ifdef CONFIG_SPE
693
694 /*
695  * For get_evrregs/set_evrregs functions 'data' has the following layout:
696  *
697  * struct {
698  *   u32 evr[32];
699  *   u64 acc;
700  *   u32 spefscr;
701  * }
702  */
703
704 static int evr_active(struct task_struct *target,
705                       const struct user_regset *regset)
706 {
707         flush_spe_to_thread(target);
708         return target->thread.used_spe ? regset->n : 0;
709 }
710
711 static int evr_get(struct task_struct *target, const struct user_regset *regset,
712                    unsigned int pos, unsigned int count,
713                    void *kbuf, void __user *ubuf)
714 {
715         int ret;
716
717         flush_spe_to_thread(target);
718
719         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
720                                   &target->thread.evr,
721                                   0, sizeof(target->thread.evr));
722
723         BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
724                      offsetof(struct thread_struct, spefscr));
725
726         if (!ret)
727                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
728                                           &target->thread.acc,
729                                           sizeof(target->thread.evr), -1);
730
731         return ret;
732 }
733
734 static int evr_set(struct task_struct *target, const struct user_regset *regset,
735                    unsigned int pos, unsigned int count,
736                    const void *kbuf, const void __user *ubuf)
737 {
738         int ret;
739
740         flush_spe_to_thread(target);
741
742         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
743                                  &target->thread.evr,
744                                  0, sizeof(target->thread.evr));
745
746         BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
747                      offsetof(struct thread_struct, spefscr));
748
749         if (!ret)
750                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
751                                          &target->thread.acc,
752                                          sizeof(target->thread.evr), -1);
753
754         return ret;
755 }
756 #endif /* CONFIG_SPE */
757
758 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
759 /**
760  * tm_cgpr_active - get active number of registers in CGPR
761  * @target:     The target task.
762  * @regset:     The user regset structure.
763  *
764  * This function checks for the active number of available
765  * regisers in transaction checkpointed GPR category.
766  */
767 static int tm_cgpr_active(struct task_struct *target,
768                           const struct user_regset *regset)
769 {
770         if (!cpu_has_feature(CPU_FTR_TM))
771                 return -ENODEV;
772
773         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
774                 return 0;
775
776         return regset->n;
777 }
778
779 /**
780  * tm_cgpr_get - get CGPR registers
781  * @target:     The target task.
782  * @regset:     The user regset structure.
783  * @pos:        The buffer position.
784  * @count:      Number of bytes to copy.
785  * @kbuf:       Kernel buffer to copy from.
786  * @ubuf:       User buffer to copy into.
787  *
788  * This function gets transaction checkpointed GPR registers.
789  *
790  * When the transaction is active, 'ckpt_regs' holds all the checkpointed
791  * GPR register values for the current transaction to fall back on if it
792  * aborts in between. This function gets those checkpointed GPR registers.
793  * The userspace interface buffer layout is as follows.
794  *
795  * struct data {
796  *      struct pt_regs ckpt_regs;
797  * };
798  */
799 static int tm_cgpr_get(struct task_struct *target,
800                         const struct user_regset *regset,
801                         unsigned int pos, unsigned int count,
802                         void *kbuf, void __user *ubuf)
803 {
804         int ret;
805
806         if (!cpu_has_feature(CPU_FTR_TM))
807                 return -ENODEV;
808
809         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
810                 return -ENODATA;
811
812         flush_tmregs_to_thread(target);
813         flush_fp_to_thread(target);
814         flush_altivec_to_thread(target);
815
816         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
817                                   &target->thread.ckpt_regs,
818                                   0, offsetof(struct pt_regs, msr));
819         if (!ret) {
820                 unsigned long msr = get_user_ckpt_msr(target);
821
822                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
823                                           offsetof(struct pt_regs, msr),
824                                           offsetof(struct pt_regs, msr) +
825                                           sizeof(msr));
826         }
827
828         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
829                      offsetof(struct pt_regs, msr) + sizeof(long));
830
831         if (!ret)
832                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
833                                           &target->thread.ckpt_regs.orig_gpr3,
834                                           offsetof(struct pt_regs, orig_gpr3),
835                                           sizeof(struct pt_regs));
836         if (!ret)
837                 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
838                                                sizeof(struct pt_regs), -1);
839
840         return ret;
841 }
842
843 /*
844  * tm_cgpr_set - set the CGPR registers
845  * @target:     The target task.
846  * @regset:     The user regset structure.
847  * @pos:        The buffer position.
848  * @count:      Number of bytes to copy.
849  * @kbuf:       Kernel buffer to copy into.
850  * @ubuf:       User buffer to copy from.
851  *
852  * This function sets in transaction checkpointed GPR registers.
853  *
854  * When the transaction is active, 'ckpt_regs' holds the checkpointed
855  * GPR register values for the current transaction to fall back on if it
856  * aborts in between. This function sets those checkpointed GPR registers.
857  * The userspace interface buffer layout is as follows.
858  *
859  * struct data {
860  *      struct pt_regs ckpt_regs;
861  * };
862  */
863 static int tm_cgpr_set(struct task_struct *target,
864                         const struct user_regset *regset,
865                         unsigned int pos, unsigned int count,
866                         const void *kbuf, const void __user *ubuf)
867 {
868         unsigned long reg;
869         int ret;
870
871         if (!cpu_has_feature(CPU_FTR_TM))
872                 return -ENODEV;
873
874         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
875                 return -ENODATA;
876
877         flush_tmregs_to_thread(target);
878         flush_fp_to_thread(target);
879         flush_altivec_to_thread(target);
880
881         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
882                                  &target->thread.ckpt_regs,
883                                  0, PT_MSR * sizeof(reg));
884
885         if (!ret && count > 0) {
886                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
887                                          PT_MSR * sizeof(reg),
888                                          (PT_MSR + 1) * sizeof(reg));
889                 if (!ret)
890                         ret = set_user_ckpt_msr(target, reg);
891         }
892
893         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
894                      offsetof(struct pt_regs, msr) + sizeof(long));
895
896         if (!ret)
897                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
898                                          &target->thread.ckpt_regs.orig_gpr3,
899                                          PT_ORIG_R3 * sizeof(reg),
900                                          (PT_MAX_PUT_REG + 1) * sizeof(reg));
901
902         if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
903                 ret = user_regset_copyin_ignore(
904                         &pos, &count, &kbuf, &ubuf,
905                         (PT_MAX_PUT_REG + 1) * sizeof(reg),
906                         PT_TRAP * sizeof(reg));
907
908         if (!ret && count > 0) {
909                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
910                                          PT_TRAP * sizeof(reg),
911                                          (PT_TRAP + 1) * sizeof(reg));
912                 if (!ret)
913                         ret = set_user_ckpt_trap(target, reg);
914         }
915
916         if (!ret)
917                 ret = user_regset_copyin_ignore(
918                         &pos, &count, &kbuf, &ubuf,
919                         (PT_TRAP + 1) * sizeof(reg), -1);
920
921         return ret;
922 }
923
924 /**
925  * tm_cfpr_active - get active number of registers in CFPR
926  * @target:     The target task.
927  * @regset:     The user regset structure.
928  *
929  * This function checks for the active number of available
930  * regisers in transaction checkpointed FPR category.
931  */
932 static int tm_cfpr_active(struct task_struct *target,
933                                 const struct user_regset *regset)
934 {
935         if (!cpu_has_feature(CPU_FTR_TM))
936                 return -ENODEV;
937
938         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
939                 return 0;
940
941         return regset->n;
942 }
943
944 /**
945  * tm_cfpr_get - get CFPR registers
946  * @target:     The target task.
947  * @regset:     The user regset structure.
948  * @pos:        The buffer position.
949  * @count:      Number of bytes to copy.
950  * @kbuf:       Kernel buffer to copy from.
951  * @ubuf:       User buffer to copy into.
952  *
953  * This function gets in transaction checkpointed FPR registers.
954  *
955  * When the transaction is active 'ckfp_state' holds the checkpointed
956  * values for the current transaction to fall back on if it aborts
957  * in between. This function gets those checkpointed FPR registers.
958  * The userspace interface buffer layout is as follows.
959  *
960  * struct data {
961  *      u64     fpr[32];
962  *      u64     fpscr;
963  *};
964  */
965 static int tm_cfpr_get(struct task_struct *target,
966                         const struct user_regset *regset,
967                         unsigned int pos, unsigned int count,
968                         void *kbuf, void __user *ubuf)
969 {
970         u64 buf[33];
971         int i;
972
973         if (!cpu_has_feature(CPU_FTR_TM))
974                 return -ENODEV;
975
976         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
977                 return -ENODATA;
978
979         flush_tmregs_to_thread(target);
980         flush_fp_to_thread(target);
981         flush_altivec_to_thread(target);
982
983         /* copy to local buffer then write that out */
984         for (i = 0; i < 32 ; i++)
985                 buf[i] = target->thread.TS_CKFPR(i);
986         buf[32] = target->thread.ckfp_state.fpscr;
987         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
988 }
989
990 /**
991  * tm_cfpr_set - set CFPR registers
992  * @target:     The target task.
993  * @regset:     The user regset structure.
994  * @pos:        The buffer position.
995  * @count:      Number of bytes to copy.
996  * @kbuf:       Kernel buffer to copy into.
997  * @ubuf:       User buffer to copy from.
998  *
999  * This function sets in transaction checkpointed FPR registers.
1000  *
1001  * When the transaction is active 'ckfp_state' holds the checkpointed
1002  * FPR register values for the current transaction to fall back on
1003  * if it aborts in between. This function sets these checkpointed
1004  * FPR registers. The userspace interface buffer layout is as follows.
1005  *
1006  * struct data {
1007  *      u64     fpr[32];
1008  *      u64     fpscr;
1009  *};
1010  */
1011 static int tm_cfpr_set(struct task_struct *target,
1012                         const struct user_regset *regset,
1013                         unsigned int pos, unsigned int count,
1014                         const void *kbuf, const void __user *ubuf)
1015 {
1016         u64 buf[33];
1017         int i;
1018
1019         if (!cpu_has_feature(CPU_FTR_TM))
1020                 return -ENODEV;
1021
1022         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1023                 return -ENODATA;
1024
1025         flush_tmregs_to_thread(target);
1026         flush_fp_to_thread(target);
1027         flush_altivec_to_thread(target);
1028
1029         for (i = 0; i < 32; i++)
1030                 buf[i] = target->thread.TS_CKFPR(i);
1031         buf[32] = target->thread.ckfp_state.fpscr;
1032
1033         /* copy to local buffer then write that out */
1034         i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1035         if (i)
1036                 return i;
1037         for (i = 0; i < 32 ; i++)
1038                 target->thread.TS_CKFPR(i) = buf[i];
1039         target->thread.ckfp_state.fpscr = buf[32];
1040         return 0;
1041 }
1042
1043 /**
1044  * tm_cvmx_active - get active number of registers in CVMX
1045  * @target:     The target task.
1046  * @regset:     The user regset structure.
1047  *
1048  * This function checks for the active number of available
1049  * regisers in checkpointed VMX category.
1050  */
1051 static int tm_cvmx_active(struct task_struct *target,
1052                                 const struct user_regset *regset)
1053 {
1054         if (!cpu_has_feature(CPU_FTR_TM))
1055                 return -ENODEV;
1056
1057         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1058                 return 0;
1059
1060         return regset->n;
1061 }
1062
1063 /**
1064  * tm_cvmx_get - get CMVX registers
1065  * @target:     The target task.
1066  * @regset:     The user regset structure.
1067  * @pos:        The buffer position.
1068  * @count:      Number of bytes to copy.
1069  * @kbuf:       Kernel buffer to copy from.
1070  * @ubuf:       User buffer to copy into.
1071  *
1072  * This function gets in transaction checkpointed VMX registers.
1073  *
1074  * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1075  * the checkpointed values for the current transaction to fall
1076  * back on if it aborts in between. The userspace interface buffer
1077  * layout is as follows.
1078  *
1079  * struct data {
1080  *      vector128       vr[32];
1081  *      vector128       vscr;
1082  *      vector128       vrsave;
1083  *};
1084  */
1085 static int tm_cvmx_get(struct task_struct *target,
1086                         const struct user_regset *regset,
1087                         unsigned int pos, unsigned int count,
1088                         void *kbuf, void __user *ubuf)
1089 {
1090         int ret;
1091
1092         BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1093
1094         if (!cpu_has_feature(CPU_FTR_TM))
1095                 return -ENODEV;
1096
1097         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1098                 return -ENODATA;
1099
1100         /* Flush the state */
1101         flush_tmregs_to_thread(target);
1102         flush_fp_to_thread(target);
1103         flush_altivec_to_thread(target);
1104
1105         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1106                                         &target->thread.ckvr_state, 0,
1107                                         33 * sizeof(vector128));
1108         if (!ret) {
1109                 /*
1110                  * Copy out only the low-order word of vrsave.
1111                  */
1112                 union {
1113                         elf_vrreg_t reg;
1114                         u32 word;
1115                 } vrsave;
1116                 memset(&vrsave, 0, sizeof(vrsave));
1117                 vrsave.word = target->thread.ckvrsave;
1118                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
1119                                                 33 * sizeof(vector128), -1);
1120         }
1121
1122         return ret;
1123 }
1124
1125 /**
1126  * tm_cvmx_set - set CMVX registers
1127  * @target:     The target task.
1128  * @regset:     The user regset structure.
1129  * @pos:        The buffer position.
1130  * @count:      Number of bytes to copy.
1131  * @kbuf:       Kernel buffer to copy into.
1132  * @ubuf:       User buffer to copy from.
1133  *
1134  * This function sets in transaction checkpointed VMX registers.
1135  *
1136  * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1137  * the checkpointed values for the current transaction to fall
1138  * back on if it aborts in between. The userspace interface buffer
1139  * layout is as follows.
1140  *
1141  * struct data {
1142  *      vector128       vr[32];
1143  *      vector128       vscr;
1144  *      vector128       vrsave;
1145  *};
1146  */
1147 static int tm_cvmx_set(struct task_struct *target,
1148                         const struct user_regset *regset,
1149                         unsigned int pos, unsigned int count,
1150                         const void *kbuf, const void __user *ubuf)
1151 {
1152         int ret;
1153
1154         BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1155
1156         if (!cpu_has_feature(CPU_FTR_TM))
1157                 return -ENODEV;
1158
1159         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1160                 return -ENODATA;
1161
1162         flush_tmregs_to_thread(target);
1163         flush_fp_to_thread(target);
1164         flush_altivec_to_thread(target);
1165
1166         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1167                                         &target->thread.ckvr_state, 0,
1168                                         33 * sizeof(vector128));
1169         if (!ret && count > 0) {
1170                 /*
1171                  * We use only the low-order word of vrsave.
1172                  */
1173                 union {
1174                         elf_vrreg_t reg;
1175                         u32 word;
1176                 } vrsave;
1177                 memset(&vrsave, 0, sizeof(vrsave));
1178                 vrsave.word = target->thread.ckvrsave;
1179                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
1180                                                 33 * sizeof(vector128), -1);
1181                 if (!ret)
1182                         target->thread.ckvrsave = vrsave.word;
1183         }
1184
1185         return ret;
1186 }
1187
1188 /**
1189  * tm_cvsx_active - get active number of registers in CVSX
1190  * @target:     The target task.
1191  * @regset:     The user regset structure.
1192  *
1193  * This function checks for the active number of available
1194  * regisers in transaction checkpointed VSX category.
1195  */
1196 static int tm_cvsx_active(struct task_struct *target,
1197                                 const struct user_regset *regset)
1198 {
1199         if (!cpu_has_feature(CPU_FTR_TM))
1200                 return -ENODEV;
1201
1202         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1203                 return 0;
1204
1205         flush_vsx_to_thread(target);
1206         return target->thread.used_vsr ? regset->n : 0;
1207 }
1208
1209 /**
1210  * tm_cvsx_get - get CVSX registers
1211  * @target:     The target task.
1212  * @regset:     The user regset structure.
1213  * @pos:        The buffer position.
1214  * @count:      Number of bytes to copy.
1215  * @kbuf:       Kernel buffer to copy from.
1216  * @ubuf:       User buffer to copy into.
1217  *
1218  * This function gets in transaction checkpointed VSX registers.
1219  *
1220  * When the transaction is active 'ckfp_state' holds the checkpointed
1221  * values for the current transaction to fall back on if it aborts
1222  * in between. This function gets those checkpointed VSX registers.
1223  * The userspace interface buffer layout is as follows.
1224  *
1225  * struct data {
1226  *      u64     vsx[32];
1227  *};
1228  */
1229 static int tm_cvsx_get(struct task_struct *target,
1230                         const struct user_regset *regset,
1231                         unsigned int pos, unsigned int count,
1232                         void *kbuf, void __user *ubuf)
1233 {
1234         u64 buf[32];
1235         int ret, i;
1236
1237         if (!cpu_has_feature(CPU_FTR_TM))
1238                 return -ENODEV;
1239
1240         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1241                 return -ENODATA;
1242
1243         /* Flush the state */
1244         flush_tmregs_to_thread(target);
1245         flush_fp_to_thread(target);
1246         flush_altivec_to_thread(target);
1247         flush_vsx_to_thread(target);
1248
1249         for (i = 0; i < 32 ; i++)
1250                 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1251         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1252                                   buf, 0, 32 * sizeof(double));
1253
1254         return ret;
1255 }
1256
1257 /**
1258  * tm_cvsx_set - set CFPR registers
1259  * @target:     The target task.
1260  * @regset:     The user regset structure.
1261  * @pos:        The buffer position.
1262  * @count:      Number of bytes to copy.
1263  * @kbuf:       Kernel buffer to copy into.
1264  * @ubuf:       User buffer to copy from.
1265  *
1266  * This function sets in transaction checkpointed VSX registers.
1267  *
1268  * When the transaction is active 'ckfp_state' holds the checkpointed
1269  * VSX register values for the current transaction to fall back on
1270  * if it aborts in between. This function sets these checkpointed
1271  * FPR registers. The userspace interface buffer layout is as follows.
1272  *
1273  * struct data {
1274  *      u64     vsx[32];
1275  *};
1276  */
1277 static int tm_cvsx_set(struct task_struct *target,
1278                         const struct user_regset *regset,
1279                         unsigned int pos, unsigned int count,
1280                         const void *kbuf, const void __user *ubuf)
1281 {
1282         u64 buf[32];
1283         int ret, i;
1284
1285         if (!cpu_has_feature(CPU_FTR_TM))
1286                 return -ENODEV;
1287
1288         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1289                 return -ENODATA;
1290
1291         /* Flush the state */
1292         flush_tmregs_to_thread(target);
1293         flush_fp_to_thread(target);
1294         flush_altivec_to_thread(target);
1295         flush_vsx_to_thread(target);
1296
1297         for (i = 0; i < 32 ; i++)
1298                 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1299
1300         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1301                                  buf, 0, 32 * sizeof(double));
1302         if (!ret)
1303                 for (i = 0; i < 32 ; i++)
1304                         target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
1305
1306         return ret;
1307 }
1308
1309 /**
1310  * tm_spr_active - get active number of registers in TM SPR
1311  * @target:     The target task.
1312  * @regset:     The user regset structure.
1313  *
1314  * This function checks the active number of available
1315  * regisers in the transactional memory SPR category.
1316  */
1317 static int tm_spr_active(struct task_struct *target,
1318                          const struct user_regset *regset)
1319 {
1320         if (!cpu_has_feature(CPU_FTR_TM))
1321                 return -ENODEV;
1322
1323         return regset->n;
1324 }
1325
1326 /**
1327  * tm_spr_get - get the TM related SPR registers
1328  * @target:     The target task.
1329  * @regset:     The user regset structure.
1330  * @pos:        The buffer position.
1331  * @count:      Number of bytes to copy.
1332  * @kbuf:       Kernel buffer to copy from.
1333  * @ubuf:       User buffer to copy into.
1334  *
1335  * This function gets transactional memory related SPR registers.
1336  * The userspace interface buffer layout is as follows.
1337  *
1338  * struct {
1339  *      u64             tm_tfhar;
1340  *      u64             tm_texasr;
1341  *      u64             tm_tfiar;
1342  * };
1343  */
1344 static int tm_spr_get(struct task_struct *target,
1345                       const struct user_regset *regset,
1346                       unsigned int pos, unsigned int count,
1347                       void *kbuf, void __user *ubuf)
1348 {
1349         int ret;
1350
1351         /* Build tests */
1352         BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1353         BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1354         BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1355
1356         if (!cpu_has_feature(CPU_FTR_TM))
1357                 return -ENODEV;
1358
1359         /* Flush the states */
1360         flush_tmregs_to_thread(target);
1361         flush_fp_to_thread(target);
1362         flush_altivec_to_thread(target);
1363
1364         /* TFHAR register */
1365         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1366                                 &target->thread.tm_tfhar, 0, sizeof(u64));
1367
1368         /* TEXASR register */
1369         if (!ret)
1370                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1371                                 &target->thread.tm_texasr, sizeof(u64),
1372                                 2 * sizeof(u64));
1373
1374         /* TFIAR register */
1375         if (!ret)
1376                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1377                                 &target->thread.tm_tfiar,
1378                                 2 * sizeof(u64), 3 * sizeof(u64));
1379         return ret;
1380 }
1381
1382 /**
1383  * tm_spr_set - set the TM related SPR registers
1384  * @target:     The target task.
1385  * @regset:     The user regset structure.
1386  * @pos:        The buffer position.
1387  * @count:      Number of bytes to copy.
1388  * @kbuf:       Kernel buffer to copy into.
1389  * @ubuf:       User buffer to copy from.
1390  *
1391  * This function sets transactional memory related SPR registers.
1392  * The userspace interface buffer layout is as follows.
1393  *
1394  * struct {
1395  *      u64             tm_tfhar;
1396  *      u64             tm_texasr;
1397  *      u64             tm_tfiar;
1398  * };
1399  */
1400 static int tm_spr_set(struct task_struct *target,
1401                       const struct user_regset *regset,
1402                       unsigned int pos, unsigned int count,
1403                       const void *kbuf, const void __user *ubuf)
1404 {
1405         int ret;
1406
1407         /* Build tests */
1408         BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1409         BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1410         BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1411
1412         if (!cpu_has_feature(CPU_FTR_TM))
1413                 return -ENODEV;
1414
1415         /* Flush the states */
1416         flush_tmregs_to_thread(target);
1417         flush_fp_to_thread(target);
1418         flush_altivec_to_thread(target);
1419
1420         /* TFHAR register */
1421         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1422                                 &target->thread.tm_tfhar, 0, sizeof(u64));
1423
1424         /* TEXASR register */
1425         if (!ret)
1426                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1427                                 &target->thread.tm_texasr, sizeof(u64),
1428                                 2 * sizeof(u64));
1429
1430         /* TFIAR register */
1431         if (!ret)
1432                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1433                                 &target->thread.tm_tfiar,
1434                                  2 * sizeof(u64), 3 * sizeof(u64));
1435         return ret;
1436 }
1437
1438 static int tm_tar_active(struct task_struct *target,
1439                          const struct user_regset *regset)
1440 {
1441         if (!cpu_has_feature(CPU_FTR_TM))
1442                 return -ENODEV;
1443
1444         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1445                 return regset->n;
1446
1447         return 0;
1448 }
1449
1450 static int tm_tar_get(struct task_struct *target,
1451                       const struct user_regset *regset,
1452                       unsigned int pos, unsigned int count,
1453                       void *kbuf, void __user *ubuf)
1454 {
1455         int ret;
1456
1457         if (!cpu_has_feature(CPU_FTR_TM))
1458                 return -ENODEV;
1459
1460         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1461                 return -ENODATA;
1462
1463         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1464                                 &target->thread.tm_tar, 0, sizeof(u64));
1465         return ret;
1466 }
1467
1468 static int tm_tar_set(struct task_struct *target,
1469                       const struct user_regset *regset,
1470                       unsigned int pos, unsigned int count,
1471                       const void *kbuf, const void __user *ubuf)
1472 {
1473         int ret;
1474
1475         if (!cpu_has_feature(CPU_FTR_TM))
1476                 return -ENODEV;
1477
1478         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1479                 return -ENODATA;
1480
1481         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1482                                 &target->thread.tm_tar, 0, sizeof(u64));
1483         return ret;
1484 }
1485
1486 static int tm_ppr_active(struct task_struct *target,
1487                          const struct user_regset *regset)
1488 {
1489         if (!cpu_has_feature(CPU_FTR_TM))
1490                 return -ENODEV;
1491
1492         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1493                 return regset->n;
1494
1495         return 0;
1496 }
1497
1498
1499 static int tm_ppr_get(struct task_struct *target,
1500                       const struct user_regset *regset,
1501                       unsigned int pos, unsigned int count,
1502                       void *kbuf, void __user *ubuf)
1503 {
1504         int ret;
1505
1506         if (!cpu_has_feature(CPU_FTR_TM))
1507                 return -ENODEV;
1508
1509         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1510                 return -ENODATA;
1511
1512         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1513                                 &target->thread.tm_ppr, 0, sizeof(u64));
1514         return ret;
1515 }
1516
1517 static int tm_ppr_set(struct task_struct *target,
1518                       const struct user_regset *regset,
1519                       unsigned int pos, unsigned int count,
1520                       const void *kbuf, const void __user *ubuf)
1521 {
1522         int ret;
1523
1524         if (!cpu_has_feature(CPU_FTR_TM))
1525                 return -ENODEV;
1526
1527         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1528                 return -ENODATA;
1529
1530         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1531                                 &target->thread.tm_ppr, 0, sizeof(u64));
1532         return ret;
1533 }
1534
1535 static int tm_dscr_active(struct task_struct *target,
1536                          const struct user_regset *regset)
1537 {
1538         if (!cpu_has_feature(CPU_FTR_TM))
1539                 return -ENODEV;
1540
1541         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1542                 return regset->n;
1543
1544         return 0;
1545 }
1546
1547 static int tm_dscr_get(struct task_struct *target,
1548                       const struct user_regset *regset,
1549                       unsigned int pos, unsigned int count,
1550                       void *kbuf, void __user *ubuf)
1551 {
1552         int ret;
1553
1554         if (!cpu_has_feature(CPU_FTR_TM))
1555                 return -ENODEV;
1556
1557         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1558                 return -ENODATA;
1559
1560         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1561                                 &target->thread.tm_dscr, 0, sizeof(u64));
1562         return ret;
1563 }
1564
1565 static int tm_dscr_set(struct task_struct *target,
1566                       const struct user_regset *regset,
1567                       unsigned int pos, unsigned int count,
1568                       const void *kbuf, const void __user *ubuf)
1569 {
1570         int ret;
1571
1572         if (!cpu_has_feature(CPU_FTR_TM))
1573                 return -ENODEV;
1574
1575         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1576                 return -ENODATA;
1577
1578         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1579                                 &target->thread.tm_dscr, 0, sizeof(u64));
1580         return ret;
1581 }
1582 #endif  /* CONFIG_PPC_TRANSACTIONAL_MEM */
1583
1584 #ifdef CONFIG_PPC64
1585 static int ppr_get(struct task_struct *target,
1586                       const struct user_regset *regset,
1587                       unsigned int pos, unsigned int count,
1588                       void *kbuf, void __user *ubuf)
1589 {
1590         int ret;
1591
1592         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1593                                 &target->thread.ppr, 0, sizeof(u64));
1594         return ret;
1595 }
1596
1597 static int ppr_set(struct task_struct *target,
1598                       const struct user_regset *regset,
1599                       unsigned int pos, unsigned int count,
1600                       const void *kbuf, const void __user *ubuf)
1601 {
1602         int ret;
1603
1604         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1605                                 &target->thread.ppr, 0, sizeof(u64));
1606         return ret;
1607 }
1608
1609 static int dscr_get(struct task_struct *target,
1610                       const struct user_regset *regset,
1611                       unsigned int pos, unsigned int count,
1612                       void *kbuf, void __user *ubuf)
1613 {
1614         int ret;
1615
1616         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1617                                 &target->thread.dscr, 0, sizeof(u64));
1618         return ret;
1619 }
1620 static int dscr_set(struct task_struct *target,
1621                       const struct user_regset *regset,
1622                       unsigned int pos, unsigned int count,
1623                       const void *kbuf, const void __user *ubuf)
1624 {
1625         int ret;
1626
1627         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1628                                 &target->thread.dscr, 0, sizeof(u64));
1629         return ret;
1630 }
1631 #endif
1632 #ifdef CONFIG_PPC_BOOK3S_64
1633 static int tar_get(struct task_struct *target,
1634                       const struct user_regset *regset,
1635                       unsigned int pos, unsigned int count,
1636                       void *kbuf, void __user *ubuf)
1637 {
1638         int ret;
1639
1640         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1641                                 &target->thread.tar, 0, sizeof(u64));
1642         return ret;
1643 }
1644 static int tar_set(struct task_struct *target,
1645                       const struct user_regset *regset,
1646                       unsigned int pos, unsigned int count,
1647                       const void *kbuf, const void __user *ubuf)
1648 {
1649         int ret;
1650
1651         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1652                                 &target->thread.tar, 0, sizeof(u64));
1653         return ret;
1654 }
1655
1656 static int ebb_active(struct task_struct *target,
1657                          const struct user_regset *regset)
1658 {
1659         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1660                 return -ENODEV;
1661
1662         if (target->thread.used_ebb)
1663                 return regset->n;
1664
1665         return 0;
1666 }
1667
1668 static int ebb_get(struct task_struct *target,
1669                       const struct user_regset *regset,
1670                       unsigned int pos, unsigned int count,
1671                       void *kbuf, void __user *ubuf)
1672 {
1673         /* Build tests */
1674         BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1675         BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1676
1677         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1678                 return -ENODEV;
1679
1680         if (!target->thread.used_ebb)
1681                 return -ENODATA;
1682
1683         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1684                         &target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
1685 }
1686
1687 static int ebb_set(struct task_struct *target,
1688                       const struct user_regset *regset,
1689                       unsigned int pos, unsigned int count,
1690                       const void *kbuf, const void __user *ubuf)
1691 {
1692         int ret = 0;
1693
1694         /* Build tests */
1695         BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1696         BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1697
1698         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1699                 return -ENODEV;
1700
1701         if (target->thread.used_ebb)
1702                 return -ENODATA;
1703
1704         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1705                         &target->thread.ebbrr, 0, sizeof(unsigned long));
1706
1707         if (!ret)
1708                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1709                         &target->thread.ebbhr, sizeof(unsigned long),
1710                         2 * sizeof(unsigned long));
1711
1712         if (!ret)
1713                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1714                         &target->thread.bescr,
1715                         2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
1716
1717         return ret;
1718 }
1719 static int pmu_active(struct task_struct *target,
1720                          const struct user_regset *regset)
1721 {
1722         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1723                 return -ENODEV;
1724
1725         return regset->n;
1726 }
1727
1728 static int pmu_get(struct task_struct *target,
1729                       const struct user_regset *regset,
1730                       unsigned int pos, unsigned int count,
1731                       void *kbuf, void __user *ubuf)
1732 {
1733         /* Build tests */
1734         BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1735         BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1736         BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1737         BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1738
1739         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1740                 return -ENODEV;
1741
1742         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1743                         &target->thread.siar, 0,
1744                         5 * sizeof(unsigned long));
1745 }
1746
1747 static int pmu_set(struct task_struct *target,
1748                       const struct user_regset *regset,
1749                       unsigned int pos, unsigned int count,
1750                       const void *kbuf, const void __user *ubuf)
1751 {
1752         int ret = 0;
1753
1754         /* Build tests */
1755         BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1756         BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1757         BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1758         BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1759
1760         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1761                 return -ENODEV;
1762
1763         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1764                         &target->thread.siar, 0,
1765                         sizeof(unsigned long));
1766
1767         if (!ret)
1768                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1769                         &target->thread.sdar, sizeof(unsigned long),
1770                         2 * sizeof(unsigned long));
1771
1772         if (!ret)
1773                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1774                         &target->thread.sier, 2 * sizeof(unsigned long),
1775                         3 * sizeof(unsigned long));
1776
1777         if (!ret)
1778                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1779                         &target->thread.mmcr2, 3 * sizeof(unsigned long),
1780                         4 * sizeof(unsigned long));
1781
1782         if (!ret)
1783                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1784                         &target->thread.mmcr0, 4 * sizeof(unsigned long),
1785                         5 * sizeof(unsigned long));
1786         return ret;
1787 }
1788 #endif
1789 /*
1790  * These are our native regset flavors.
1791  */
1792 enum powerpc_regset {
1793         REGSET_GPR,
1794         REGSET_FPR,
1795 #ifdef CONFIG_ALTIVEC
1796         REGSET_VMX,
1797 #endif
1798 #ifdef CONFIG_VSX
1799         REGSET_VSX,
1800 #endif
1801 #ifdef CONFIG_SPE
1802         REGSET_SPE,
1803 #endif
1804 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1805         REGSET_TM_CGPR,         /* TM checkpointed GPR registers */
1806         REGSET_TM_CFPR,         /* TM checkpointed FPR registers */
1807         REGSET_TM_CVMX,         /* TM checkpointed VMX registers */
1808         REGSET_TM_CVSX,         /* TM checkpointed VSX registers */
1809         REGSET_TM_SPR,          /* TM specific SPR registers */
1810         REGSET_TM_CTAR,         /* TM checkpointed TAR register */
1811         REGSET_TM_CPPR,         /* TM checkpointed PPR register */
1812         REGSET_TM_CDSCR,        /* TM checkpointed DSCR register */
1813 #endif
1814 #ifdef CONFIG_PPC64
1815         REGSET_PPR,             /* PPR register */
1816         REGSET_DSCR,            /* DSCR register */
1817 #endif
1818 #ifdef CONFIG_PPC_BOOK3S_64
1819         REGSET_TAR,             /* TAR register */
1820         REGSET_EBB,             /* EBB registers */
1821         REGSET_PMR,             /* Performance Monitor Registers */
1822 #endif
1823 };
1824
1825 static const struct user_regset native_regsets[] = {
1826         [REGSET_GPR] = {
1827                 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
1828                 .size = sizeof(long), .align = sizeof(long),
1829                 .get = gpr_get, .set = gpr_set
1830         },
1831         [REGSET_FPR] = {
1832                 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
1833                 .size = sizeof(double), .align = sizeof(double),
1834                 .get = fpr_get, .set = fpr_set
1835         },
1836 #ifdef CONFIG_ALTIVEC
1837         [REGSET_VMX] = {
1838                 .core_note_type = NT_PPC_VMX, .n = 34,
1839                 .size = sizeof(vector128), .align = sizeof(vector128),
1840                 .active = vr_active, .get = vr_get, .set = vr_set
1841         },
1842 #endif
1843 #ifdef CONFIG_VSX
1844         [REGSET_VSX] = {
1845                 .core_note_type = NT_PPC_VSX, .n = 32,
1846                 .size = sizeof(double), .align = sizeof(double),
1847                 .active = vsr_active, .get = vsr_get, .set = vsr_set
1848         },
1849 #endif
1850 #ifdef CONFIG_SPE
1851         [REGSET_SPE] = {
1852                 .core_note_type = NT_PPC_SPE, .n = 35,
1853                 .size = sizeof(u32), .align = sizeof(u32),
1854                 .active = evr_active, .get = evr_get, .set = evr_set
1855         },
1856 #endif
1857 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1858         [REGSET_TM_CGPR] = {
1859                 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
1860                 .size = sizeof(long), .align = sizeof(long),
1861                 .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
1862         },
1863         [REGSET_TM_CFPR] = {
1864                 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
1865                 .size = sizeof(double), .align = sizeof(double),
1866                 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
1867         },
1868         [REGSET_TM_CVMX] = {
1869                 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
1870                 .size = sizeof(vector128), .align = sizeof(vector128),
1871                 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
1872         },
1873         [REGSET_TM_CVSX] = {
1874                 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
1875                 .size = sizeof(double), .align = sizeof(double),
1876                 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
1877         },
1878         [REGSET_TM_SPR] = {
1879                 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
1880                 .size = sizeof(u64), .align = sizeof(u64),
1881                 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
1882         },
1883         [REGSET_TM_CTAR] = {
1884                 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
1885                 .size = sizeof(u64), .align = sizeof(u64),
1886                 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
1887         },
1888         [REGSET_TM_CPPR] = {
1889                 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
1890                 .size = sizeof(u64), .align = sizeof(u64),
1891                 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
1892         },
1893         [REGSET_TM_CDSCR] = {
1894                 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
1895                 .size = sizeof(u64), .align = sizeof(u64),
1896                 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
1897         },
1898 #endif
1899 #ifdef CONFIG_PPC64
1900         [REGSET_PPR] = {
1901                 .core_note_type = NT_PPC_PPR, .n = 1,
1902                 .size = sizeof(u64), .align = sizeof(u64),
1903                 .get = ppr_get, .set = ppr_set
1904         },
1905         [REGSET_DSCR] = {
1906                 .core_note_type = NT_PPC_DSCR, .n = 1,
1907                 .size = sizeof(u64), .align = sizeof(u64),
1908                 .get = dscr_get, .set = dscr_set
1909         },
1910 #endif
1911 #ifdef CONFIG_PPC_BOOK3S_64
1912         [REGSET_TAR] = {
1913                 .core_note_type = NT_PPC_TAR, .n = 1,
1914                 .size = sizeof(u64), .align = sizeof(u64),
1915                 .get = tar_get, .set = tar_set
1916         },
1917         [REGSET_EBB] = {
1918                 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
1919                 .size = sizeof(u64), .align = sizeof(u64),
1920                 .active = ebb_active, .get = ebb_get, .set = ebb_set
1921         },
1922         [REGSET_PMR] = {
1923                 .core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
1924                 .size = sizeof(u64), .align = sizeof(u64),
1925                 .active = pmu_active, .get = pmu_get, .set = pmu_set
1926         },
1927 #endif
1928 };
1929
1930 static const struct user_regset_view user_ppc_native_view = {
1931         .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
1932         .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
1933 };
1934
1935 #ifdef CONFIG_PPC64
1936 #include <linux/compat.h>
1937
1938 static int gpr32_get_common(struct task_struct *target,
1939                      const struct user_regset *regset,
1940                      unsigned int pos, unsigned int count,
1941                             void *kbuf, void __user *ubuf,
1942                             unsigned long *regs)
1943 {
1944         compat_ulong_t *k = kbuf;
1945         compat_ulong_t __user *u = ubuf;
1946         compat_ulong_t reg;
1947
1948         pos /= sizeof(reg);
1949         count /= sizeof(reg);
1950
1951         if (kbuf)
1952                 for (; count > 0 && pos < PT_MSR; --count)
1953                         *k++ = regs[pos++];
1954         else
1955                 for (; count > 0 && pos < PT_MSR; --count)
1956                         if (__put_user((compat_ulong_t) regs[pos++], u++))
1957                                 return -EFAULT;
1958
1959         if (count > 0 && pos == PT_MSR) {
1960                 reg = get_user_msr(target);
1961                 if (kbuf)
1962                         *k++ = reg;
1963                 else if (__put_user(reg, u++))
1964                         return -EFAULT;
1965                 ++pos;
1966                 --count;
1967         }
1968
1969         if (kbuf)
1970                 for (; count > 0 && pos < PT_REGS_COUNT; --count)
1971                         *k++ = regs[pos++];
1972         else
1973                 for (; count > 0 && pos < PT_REGS_COUNT; --count)
1974                         if (__put_user((compat_ulong_t) regs[pos++], u++))
1975                                 return -EFAULT;
1976
1977         kbuf = k;
1978         ubuf = u;
1979         pos *= sizeof(reg);
1980         count *= sizeof(reg);
1981         return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
1982                                         PT_REGS_COUNT * sizeof(reg), -1);
1983 }
1984
1985 static int gpr32_set_common(struct task_struct *target,
1986                      const struct user_regset *regset,
1987                      unsigned int pos, unsigned int count,
1988                      const void *kbuf, const void __user *ubuf,
1989                      unsigned long *regs)
1990 {
1991         const compat_ulong_t *k = kbuf;
1992         const compat_ulong_t __user *u = ubuf;
1993         compat_ulong_t reg;
1994
1995         pos /= sizeof(reg);
1996         count /= sizeof(reg);
1997
1998         if (kbuf)
1999                 for (; count > 0 && pos < PT_MSR; --count)
2000                         regs[pos++] = *k++;
2001         else
2002                 for (; count > 0 && pos < PT_MSR; --count) {
2003                         if (__get_user(reg, u++))
2004                                 return -EFAULT;
2005                         regs[pos++] = reg;
2006                 }
2007
2008
2009         if (count > 0 && pos == PT_MSR) {
2010                 if (kbuf)
2011                         reg = *k++;
2012                 else if (__get_user(reg, u++))
2013                         return -EFAULT;
2014                 set_user_msr(target, reg);
2015                 ++pos;
2016                 --count;
2017         }
2018
2019         if (kbuf) {
2020                 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
2021                         regs[pos++] = *k++;
2022                 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2023                         ++k;
2024         } else {
2025                 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
2026                         if (__get_user(reg, u++))
2027                                 return -EFAULT;
2028                         regs[pos++] = reg;
2029                 }
2030                 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2031                         if (__get_user(reg, u++))
2032                                 return -EFAULT;
2033         }
2034
2035         if (count > 0 && pos == PT_TRAP) {
2036                 if (kbuf)
2037                         reg = *k++;
2038                 else if (__get_user(reg, u++))
2039                         return -EFAULT;
2040                 set_user_trap(target, reg);
2041                 ++pos;
2042                 --count;
2043         }
2044
2045         kbuf = k;
2046         ubuf = u;
2047         pos *= sizeof(reg);
2048         count *= sizeof(reg);
2049         return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
2050                                          (PT_TRAP + 1) * sizeof(reg), -1);
2051 }
2052
2053 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2054 static int tm_cgpr32_get(struct task_struct *target,
2055                      const struct user_regset *regset,
2056                      unsigned int pos, unsigned int count,
2057                      void *kbuf, void __user *ubuf)
2058 {
2059         return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2060                         &target->thread.ckpt_regs.gpr[0]);
2061 }
2062
2063 static int tm_cgpr32_set(struct task_struct *target,
2064                      const struct user_regset *regset,
2065                      unsigned int pos, unsigned int count,
2066                      const void *kbuf, const void __user *ubuf)
2067 {
2068         return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2069                         &target->thread.ckpt_regs.gpr[0]);
2070 }
2071 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2072
2073 static int gpr32_get(struct task_struct *target,
2074                      const struct user_regset *regset,
2075                      unsigned int pos, unsigned int count,
2076                      void *kbuf, void __user *ubuf)
2077 {
2078         int i;
2079
2080         if (target->thread.regs == NULL)
2081                 return -EIO;
2082
2083         if (!FULL_REGS(target->thread.regs)) {
2084                 /*
2085                  * We have a partial register set.
2086                  * Fill 14-31 with bogus values.
2087                  */
2088                 for (i = 14; i < 32; i++)
2089                         target->thread.regs->gpr[i] = NV_REG_POISON;
2090         }
2091         return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2092                         &target->thread.regs->gpr[0]);
2093 }
2094
2095 static int gpr32_set(struct task_struct *target,
2096                      const struct user_regset *regset,
2097                      unsigned int pos, unsigned int count,
2098                      const void *kbuf, const void __user *ubuf)
2099 {
2100         if (target->thread.regs == NULL)
2101                 return -EIO;
2102
2103         CHECK_FULL_REGS(target->thread.regs);
2104         return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2105                         &target->thread.regs->gpr[0]);
2106 }
2107
2108 /*
2109  * These are the regset flavors matching the CONFIG_PPC32 native set.
2110  */
2111 static const struct user_regset compat_regsets[] = {
2112         [REGSET_GPR] = {
2113                 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
2114                 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
2115                 .get = gpr32_get, .set = gpr32_set
2116         },
2117         [REGSET_FPR] = {
2118                 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
2119                 .size = sizeof(double), .align = sizeof(double),
2120                 .get = fpr_get, .set = fpr_set
2121         },
2122 #ifdef CONFIG_ALTIVEC
2123         [REGSET_VMX] = {
2124                 .core_note_type = NT_PPC_VMX, .n = 34,
2125                 .size = sizeof(vector128), .align = sizeof(vector128),
2126                 .active = vr_active, .get = vr_get, .set = vr_set
2127         },
2128 #endif
2129 #ifdef CONFIG_SPE
2130         [REGSET_SPE] = {
2131                 .core_note_type = NT_PPC_SPE, .n = 35,
2132                 .size = sizeof(u32), .align = sizeof(u32),
2133                 .active = evr_active, .get = evr_get, .set = evr_set
2134         },
2135 #endif
2136 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2137         [REGSET_TM_CGPR] = {
2138                 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
2139                 .size = sizeof(long), .align = sizeof(long),
2140                 .active = tm_cgpr_active,
2141                 .get = tm_cgpr32_get, .set = tm_cgpr32_set
2142         },
2143         [REGSET_TM_CFPR] = {
2144                 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
2145                 .size = sizeof(double), .align = sizeof(double),
2146                 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
2147         },
2148         [REGSET_TM_CVMX] = {
2149                 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
2150                 .size = sizeof(vector128), .align = sizeof(vector128),
2151                 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
2152         },
2153         [REGSET_TM_CVSX] = {
2154                 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
2155                 .size = sizeof(double), .align = sizeof(double),
2156                 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
2157         },
2158         [REGSET_TM_SPR] = {
2159                 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
2160                 .size = sizeof(u64), .align = sizeof(u64),
2161                 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
2162         },
2163         [REGSET_TM_CTAR] = {
2164                 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
2165                 .size = sizeof(u64), .align = sizeof(u64),
2166                 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
2167         },
2168         [REGSET_TM_CPPR] = {
2169                 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
2170                 .size = sizeof(u64), .align = sizeof(u64),
2171                 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
2172         },
2173         [REGSET_TM_CDSCR] = {
2174                 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
2175                 .size = sizeof(u64), .align = sizeof(u64),
2176                 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
2177         },
2178 #endif
2179 #ifdef CONFIG_PPC64
2180         [REGSET_PPR] = {
2181                 .core_note_type = NT_PPC_PPR, .n = 1,
2182                 .size = sizeof(u64), .align = sizeof(u64),
2183                 .get = ppr_get, .set = ppr_set
2184         },
2185         [REGSET_DSCR] = {
2186                 .core_note_type = NT_PPC_DSCR, .n = 1,
2187                 .size = sizeof(u64), .align = sizeof(u64),
2188                 .get = dscr_get, .set = dscr_set
2189         },
2190 #endif
2191 #ifdef CONFIG_PPC_BOOK3S_64
2192         [REGSET_TAR] = {
2193                 .core_note_type = NT_PPC_TAR, .n = 1,
2194                 .size = sizeof(u64), .align = sizeof(u64),
2195                 .get = tar_get, .set = tar_set
2196         },
2197         [REGSET_EBB] = {
2198                 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
2199                 .size = sizeof(u64), .align = sizeof(u64),
2200                 .active = ebb_active, .get = ebb_get, .set = ebb_set
2201         },
2202 #endif
2203 };
2204
2205 static const struct user_regset_view user_ppc_compat_view = {
2206         .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
2207         .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
2208 };
2209 #endif  /* CONFIG_PPC64 */
2210
2211 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2212 {
2213 #ifdef CONFIG_PPC64
2214         if (test_tsk_thread_flag(task, TIF_32BIT))
2215                 return &user_ppc_compat_view;
2216 #endif
2217         return &user_ppc_native_view;
2218 }
2219
2220
2221 void user_enable_single_step(struct task_struct *task)
2222 {
2223         struct pt_regs *regs = task->thread.regs;
2224
2225         if (regs != NULL) {
2226 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2227                 task->thread.debug.dbcr0 &= ~DBCR0_BT;
2228                 task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2229                 regs->msr |= MSR_DE;
2230 #else
2231                 regs->msr &= ~MSR_BE;
2232                 regs->msr |= MSR_SE;
2233 #endif
2234         }
2235         set_tsk_thread_flag(task, TIF_SINGLESTEP);
2236 }
2237
2238 void user_enable_block_step(struct task_struct *task)
2239 {
2240         struct pt_regs *regs = task->thread.regs;
2241
2242         if (regs != NULL) {
2243 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2244                 task->thread.debug.dbcr0 &= ~DBCR0_IC;
2245                 task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
2246                 regs->msr |= MSR_DE;
2247 #else
2248                 regs->msr &= ~MSR_SE;
2249                 regs->msr |= MSR_BE;
2250 #endif
2251         }
2252         set_tsk_thread_flag(task, TIF_SINGLESTEP);
2253 }
2254
2255 void user_disable_single_step(struct task_struct *task)
2256 {
2257         struct pt_regs *regs = task->thread.regs;
2258
2259         if (regs != NULL) {
2260 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2261                 /*
2262                  * The logic to disable single stepping should be as
2263                  * simple as turning off the Instruction Complete flag.
2264                  * And, after doing so, if all debug flags are off, turn
2265                  * off DBCR0(IDM) and MSR(DE) .... Torez
2266                  */
2267                 task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
2268                 /*
2269                  * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
2270                  */
2271                 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2272                                         task->thread.debug.dbcr1)) {
2273                         /*
2274                          * All debug events were off.....
2275                          */
2276                         task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2277                         regs->msr &= ~MSR_DE;
2278                 }
2279 #else
2280                 regs->msr &= ~(MSR_SE | MSR_BE);
2281 #endif
2282         }
2283         clear_tsk_thread_flag(task, TIF_SINGLESTEP);
2284 }
2285
2286 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2287 void ptrace_triggered(struct perf_event *bp,
2288                       struct perf_sample_data *data, struct pt_regs *regs)
2289 {
2290         struct perf_event_attr attr;
2291
2292         /*
2293          * Disable the breakpoint request here since ptrace has defined a
2294          * one-shot behaviour for breakpoint exceptions in PPC64.
2295          * The SIGTRAP signal is generated automatically for us in do_dabr().
2296          * We don't have to do anything about that here
2297          */
2298         attr = bp->attr;
2299         attr.disabled = true;
2300         modify_user_hw_breakpoint(bp, &attr);
2301 }
2302 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2303
2304 static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
2305                                unsigned long data)
2306 {
2307 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2308         int ret;
2309         struct thread_struct *thread = &(task->thread);
2310         struct perf_event *bp;
2311         struct perf_event_attr attr;
2312 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2313 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2314         struct arch_hw_breakpoint hw_brk;
2315 #endif
2316
2317         /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
2318          *  For embedded processors we support one DAC and no IAC's at the
2319          *  moment.
2320          */
2321         if (addr > 0)
2322                 return -EINVAL;
2323
2324         /* The bottom 3 bits in dabr are flags */
2325         if ((data & ~0x7UL) >= TASK_SIZE)
2326                 return -EIO;
2327
2328 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2329         /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
2330          *  It was assumed, on previous implementations, that 3 bits were
2331          *  passed together with the data address, fitting the design of the
2332          *  DABR register, as follows:
2333          *
2334          *  bit 0: Read flag
2335          *  bit 1: Write flag
2336          *  bit 2: Breakpoint translation
2337          *
2338          *  Thus, we use them here as so.
2339          */
2340
2341         /* Ensure breakpoint translation bit is set */
2342         if (data && !(data & HW_BRK_TYPE_TRANSLATE))
2343                 return -EIO;
2344         hw_brk.address = data & (~HW_BRK_TYPE_DABR);
2345         hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
2346         hw_brk.len = 8;
2347 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2348         bp = thread->ptrace_bps[0];
2349         if ((!data) || !(hw_brk.type & HW_BRK_TYPE_RDWR)) {
2350                 if (bp) {
2351                         unregister_hw_breakpoint(bp);
2352                         thread->ptrace_bps[0] = NULL;
2353                 }
2354                 return 0;
2355         }
2356         if (bp) {
2357                 attr = bp->attr;
2358                 attr.bp_addr = hw_brk.address;
2359                 arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
2360
2361                 /* Enable breakpoint */
2362                 attr.disabled = false;
2363
2364                 ret =  modify_user_hw_breakpoint(bp, &attr);
2365                 if (ret) {
2366                         return ret;
2367                 }
2368                 thread->ptrace_bps[0] = bp;
2369                 thread->hw_brk = hw_brk;
2370                 return 0;
2371         }
2372
2373         /* Create a new breakpoint request if one doesn't exist already */
2374         hw_breakpoint_init(&attr);
2375         attr.bp_addr = hw_brk.address;
2376         arch_bp_generic_fields(hw_brk.type,
2377                                &attr.bp_type);
2378
2379         thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2380                                                ptrace_triggered, NULL, task);
2381         if (IS_ERR(bp)) {
2382                 thread->ptrace_bps[0] = NULL;
2383                 return PTR_ERR(bp);
2384         }
2385
2386 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2387         task->thread.hw_brk = hw_brk;
2388 #else /* CONFIG_PPC_ADV_DEBUG_REGS */
2389         /* As described above, it was assumed 3 bits were passed with the data
2390          *  address, but we will assume only the mode bits will be passed
2391          *  as to not cause alignment restrictions for DAC-based processors.
2392          */
2393
2394         /* DAC's hold the whole address without any mode flags */
2395         task->thread.debug.dac1 = data & ~0x3UL;
2396
2397         if (task->thread.debug.dac1 == 0) {
2398                 dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2399                 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2400                                         task->thread.debug.dbcr1)) {
2401                         task->thread.regs->msr &= ~MSR_DE;
2402                         task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2403                 }
2404                 return 0;
2405         }
2406
2407         /* Read or Write bits must be set */
2408
2409         if (!(data & 0x3UL))
2410                 return -EINVAL;
2411
2412         /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
2413            register */
2414         task->thread.debug.dbcr0 |= DBCR0_IDM;
2415
2416         /* Check for write and read flags and set DBCR0
2417            accordingly */
2418         dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
2419         if (data & 0x1UL)
2420                 dbcr_dac(task) |= DBCR_DAC1R;
2421         if (data & 0x2UL)
2422                 dbcr_dac(task) |= DBCR_DAC1W;
2423         task->thread.regs->msr |= MSR_DE;
2424 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2425         return 0;
2426 }
2427
2428 /*
2429  * Called by kernel/ptrace.c when detaching..
2430  *
2431  * Make sure single step bits etc are not set.
2432  */
2433 void ptrace_disable(struct task_struct *child)
2434 {
2435         /* make sure the single step bit is not set. */
2436         user_disable_single_step(child);
2437 }
2438
2439 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2440 static long set_instruction_bp(struct task_struct *child,
2441                               struct ppc_hw_breakpoint *bp_info)
2442 {
2443         int slot;
2444         int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
2445         int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
2446         int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
2447         int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
2448
2449         if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2450                 slot2_in_use = 1;
2451         if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2452                 slot4_in_use = 1;
2453
2454         if (bp_info->addr >= TASK_SIZE)
2455                 return -EIO;
2456
2457         if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
2458
2459                 /* Make sure range is valid. */
2460                 if (bp_info->addr2 >= TASK_SIZE)
2461                         return -EIO;
2462
2463                 /* We need a pair of IAC regsisters */
2464                 if ((!slot1_in_use) && (!slot2_in_use)) {
2465                         slot = 1;
2466                         child->thread.debug.iac1 = bp_info->addr;
2467                         child->thread.debug.iac2 = bp_info->addr2;
2468                         child->thread.debug.dbcr0 |= DBCR0_IAC1;
2469                         if (bp_info->addr_mode ==
2470                                         PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2471                                 dbcr_iac_range(child) |= DBCR_IAC12X;
2472                         else
2473                                 dbcr_iac_range(child) |= DBCR_IAC12I;
2474 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2475                 } else if ((!slot3_in_use) && (!slot4_in_use)) {
2476                         slot = 3;
2477                         child->thread.debug.iac3 = bp_info->addr;
2478                         child->thread.debug.iac4 = bp_info->addr2;
2479                         child->thread.debug.dbcr0 |= DBCR0_IAC3;
2480                         if (bp_info->addr_mode ==
2481                                         PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2482                                 dbcr_iac_range(child) |= DBCR_IAC34X;
2483                         else
2484                                 dbcr_iac_range(child) |= DBCR_IAC34I;
2485 #endif
2486                 } else
2487                         return -ENOSPC;
2488         } else {
2489                 /* We only need one.  If possible leave a pair free in
2490                  * case a range is needed later
2491                  */
2492                 if (!slot1_in_use) {
2493                         /*
2494                          * Don't use iac1 if iac1-iac2 are free and either
2495                          * iac3 or iac4 (but not both) are free
2496                          */
2497                         if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
2498                                 slot = 1;
2499                                 child->thread.debug.iac1 = bp_info->addr;
2500                                 child->thread.debug.dbcr0 |= DBCR0_IAC1;
2501                                 goto out;
2502                         }
2503                 }
2504                 if (!slot2_in_use) {
2505                         slot = 2;
2506                         child->thread.debug.iac2 = bp_info->addr;
2507                         child->thread.debug.dbcr0 |= DBCR0_IAC2;
2508 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2509                 } else if (!slot3_in_use) {
2510                         slot = 3;
2511                         child->thread.debug.iac3 = bp_info->addr;
2512                         child->thread.debug.dbcr0 |= DBCR0_IAC3;
2513                 } else if (!slot4_in_use) {
2514                         slot = 4;
2515                         child->thread.debug.iac4 = bp_info->addr;
2516                         child->thread.debug.dbcr0 |= DBCR0_IAC4;
2517 #endif
2518                 } else
2519                         return -ENOSPC;
2520         }
2521 out:
2522         child->thread.debug.dbcr0 |= DBCR0_IDM;
2523         child->thread.regs->msr |= MSR_DE;
2524
2525         return slot;
2526 }
2527
2528 static int del_instruction_bp(struct task_struct *child, int slot)
2529 {
2530         switch (slot) {
2531         case 1:
2532                 if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
2533                         return -ENOENT;
2534
2535                 if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
2536                         /* address range - clear slots 1 & 2 */
2537                         child->thread.debug.iac2 = 0;
2538                         dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
2539                 }
2540                 child->thread.debug.iac1 = 0;
2541                 child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
2542                 break;
2543         case 2:
2544                 if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
2545                         return -ENOENT;
2546
2547                 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2548                         /* used in a range */
2549                         return -EINVAL;
2550                 child->thread.debug.iac2 = 0;
2551                 child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
2552                 break;
2553 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2554         case 3:
2555                 if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
2556                         return -ENOENT;
2557
2558                 if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
2559                         /* address range - clear slots 3 & 4 */
2560                         child->thread.debug.iac4 = 0;
2561                         dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
2562                 }
2563                 child->thread.debug.iac3 = 0;
2564                 child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
2565                 break;
2566         case 4:
2567                 if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
2568                         return -ENOENT;
2569
2570                 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2571                         /* Used in a range */
2572                         return -EINVAL;
2573                 child->thread.debug.iac4 = 0;
2574                 child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
2575                 break;
2576 #endif
2577         default:
2578                 return -EINVAL;
2579         }
2580         return 0;
2581 }
2582
2583 static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
2584 {
2585         int byte_enable =
2586                 (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
2587                 & 0xf;
2588         int condition_mode =
2589                 bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
2590         int slot;
2591
2592         if (byte_enable && (condition_mode == 0))
2593                 return -EINVAL;
2594
2595         if (bp_info->addr >= TASK_SIZE)
2596                 return -EIO;
2597
2598         if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
2599                 slot = 1;
2600                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2601                         dbcr_dac(child) |= DBCR_DAC1R;
2602                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2603                         dbcr_dac(child) |= DBCR_DAC1W;
2604                 child->thread.debug.dac1 = (unsigned long)bp_info->addr;
2605 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2606                 if (byte_enable) {
2607                         child->thread.debug.dvc1 =
2608                                 (unsigned long)bp_info->condition_value;
2609                         child->thread.debug.dbcr2 |=
2610                                 ((byte_enable << DBCR2_DVC1BE_SHIFT) |
2611                                  (condition_mode << DBCR2_DVC1M_SHIFT));
2612                 }
2613 #endif
2614 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2615         } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2616                 /* Both dac1 and dac2 are part of a range */
2617                 return -ENOSPC;
2618 #endif
2619         } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
2620                 slot = 2;
2621                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2622                         dbcr_dac(child) |= DBCR_DAC2R;
2623                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2624                         dbcr_dac(child) |= DBCR_DAC2W;
2625                 child->thread.debug.dac2 = (unsigned long)bp_info->addr;
2626 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2627                 if (byte_enable) {
2628                         child->thread.debug.dvc2 =
2629                                 (unsigned long)bp_info->condition_value;
2630                         child->thread.debug.dbcr2 |=
2631                                 ((byte_enable << DBCR2_DVC2BE_SHIFT) |
2632                                  (condition_mode << DBCR2_DVC2M_SHIFT));
2633                 }
2634 #endif
2635         } else
2636                 return -ENOSPC;
2637         child->thread.debug.dbcr0 |= DBCR0_IDM;
2638         child->thread.regs->msr |= MSR_DE;
2639
2640         return slot + 4;
2641 }
2642
2643 static int del_dac(struct task_struct *child, int slot)
2644 {
2645         if (slot == 1) {
2646                 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
2647                         return -ENOENT;
2648
2649                 child->thread.debug.dac1 = 0;
2650                 dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2651 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2652                 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2653                         child->thread.debug.dac2 = 0;
2654                         child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
2655                 }
2656                 child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
2657 #endif
2658 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2659                 child->thread.debug.dvc1 = 0;
2660 #endif
2661         } else if (slot == 2) {
2662                 if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
2663                         return -ENOENT;
2664
2665 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2666                 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
2667                         /* Part of a range */
2668                         return -EINVAL;
2669                 child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
2670 #endif
2671 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2672                 child->thread.debug.dvc2 = 0;
2673 #endif
2674                 child->thread.debug.dac2 = 0;
2675                 dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
2676         } else
2677                 return -EINVAL;
2678
2679         return 0;
2680 }
2681 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2682
2683 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2684 static int set_dac_range(struct task_struct *child,
2685                          struct ppc_hw_breakpoint *bp_info)
2686 {
2687         int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
2688
2689         /* We don't allow range watchpoints to be used with DVC */
2690         if (bp_info->condition_mode)
2691                 return -EINVAL;
2692
2693         /*
2694          * Best effort to verify the address range.  The user/supervisor bits
2695          * prevent trapping in kernel space, but let's fail on an obvious bad
2696          * range.  The simple test on the mask is not fool-proof, and any
2697          * exclusive range will spill over into kernel space.
2698          */
2699         if (bp_info->addr >= TASK_SIZE)
2700                 return -EIO;
2701         if (mode == PPC_BREAKPOINT_MODE_MASK) {
2702                 /*
2703                  * dac2 is a bitmask.  Don't allow a mask that makes a
2704                  * kernel space address from a valid dac1 value
2705                  */
2706                 if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
2707                         return -EIO;
2708         } else {
2709                 /*
2710                  * For range breakpoints, addr2 must also be a valid address
2711                  */
2712                 if (bp_info->addr2 >= TASK_SIZE)
2713                         return -EIO;
2714         }
2715
2716         if (child->thread.debug.dbcr0 &
2717             (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
2718                 return -ENOSPC;
2719
2720         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2721                 child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
2722         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2723                 child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
2724         child->thread.debug.dac1 = bp_info->addr;
2725         child->thread.debug.dac2 = bp_info->addr2;
2726         if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2727                 child->thread.debug.dbcr2  |= DBCR2_DAC12M;
2728         else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2729                 child->thread.debug.dbcr2  |= DBCR2_DAC12MX;
2730         else    /* PPC_BREAKPOINT_MODE_MASK */
2731                 child->thread.debug.dbcr2  |= DBCR2_DAC12MM;
2732         child->thread.regs->msr |= MSR_DE;
2733
2734         return 5;
2735 }
2736 #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
2737
2738 static long ppc_set_hwdebug(struct task_struct *child,
2739                      struct ppc_hw_breakpoint *bp_info)
2740 {
2741 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2742         int len = 0;
2743         struct thread_struct *thread = &(child->thread);
2744         struct perf_event *bp;
2745         struct perf_event_attr attr;
2746 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2747 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2748         struct arch_hw_breakpoint brk;
2749 #endif
2750
2751         if (bp_info->version != 1)
2752                 return -ENOTSUPP;
2753 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2754         /*
2755          * Check for invalid flags and combinations
2756          */
2757         if ((bp_info->trigger_type == 0) ||
2758             (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
2759                                        PPC_BREAKPOINT_TRIGGER_RW)) ||
2760             (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
2761             (bp_info->condition_mode &
2762              ~(PPC_BREAKPOINT_CONDITION_MODE |
2763                PPC_BREAKPOINT_CONDITION_BE_ALL)))
2764                 return -EINVAL;
2765 #if CONFIG_PPC_ADV_DEBUG_DVCS == 0
2766         if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2767                 return -EINVAL;
2768 #endif
2769
2770         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
2771                 if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
2772                     (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
2773                         return -EINVAL;
2774                 return set_instruction_bp(child, bp_info);
2775         }
2776         if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2777                 return set_dac(child, bp_info);
2778
2779 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2780         return set_dac_range(child, bp_info);
2781 #else
2782         return -EINVAL;
2783 #endif
2784 #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2785         /*
2786          * We only support one data breakpoint
2787          */
2788         if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
2789             (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
2790             bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2791                 return -EINVAL;
2792
2793         if ((unsigned long)bp_info->addr >= TASK_SIZE)
2794                 return -EIO;
2795
2796         brk.address = bp_info->addr & ~7UL;
2797         brk.type = HW_BRK_TYPE_TRANSLATE;
2798         brk.len = 8;
2799         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2800                 brk.type |= HW_BRK_TYPE_READ;
2801         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2802                 brk.type |= HW_BRK_TYPE_WRITE;
2803 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2804         /*
2805          * Check if the request is for 'range' breakpoints. We can
2806          * support it if range < 8 bytes.
2807          */
2808         if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2809                 len = bp_info->addr2 - bp_info->addr;
2810         else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2811                 len = 1;
2812         else
2813                 return -EINVAL;
2814         bp = thread->ptrace_bps[0];
2815         if (bp)
2816                 return -ENOSPC;
2817
2818         /* Create a new breakpoint request if one doesn't exist already */
2819         hw_breakpoint_init(&attr);
2820         attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
2821         attr.bp_len = len;
2822         arch_bp_generic_fields(brk.type, &attr.bp_type);
2823
2824         thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2825                                                ptrace_triggered, NULL, child);
2826         if (IS_ERR(bp)) {
2827                 thread->ptrace_bps[0] = NULL;
2828                 return PTR_ERR(bp);
2829         }
2830
2831         return 1;
2832 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2833
2834         if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
2835                 return -EINVAL;
2836
2837         if (child->thread.hw_brk.address)
2838                 return -ENOSPC;
2839
2840         child->thread.hw_brk = brk;
2841
2842         return 1;
2843 #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2844 }
2845
2846 static long ppc_del_hwdebug(struct task_struct *child, long data)
2847 {
2848 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2849         int ret = 0;
2850         struct thread_struct *thread = &(child->thread);
2851         struct perf_event *bp;
2852 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2853 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2854         int rc;
2855
2856         if (data <= 4)
2857                 rc = del_instruction_bp(child, (int)data);
2858         else
2859                 rc = del_dac(child, (int)data - 4);
2860
2861         if (!rc) {
2862                 if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
2863                                         child->thread.debug.dbcr1)) {
2864                         child->thread.debug.dbcr0 &= ~DBCR0_IDM;
2865                         child->thread.regs->msr &= ~MSR_DE;
2866                 }
2867         }
2868         return rc;
2869 #else
2870         if (data != 1)
2871                 return -EINVAL;
2872
2873 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2874         bp = thread->ptrace_bps[0];
2875         if (bp) {
2876                 unregister_hw_breakpoint(bp);
2877                 thread->ptrace_bps[0] = NULL;
2878         } else
2879                 ret = -ENOENT;
2880         return ret;
2881 #else /* CONFIG_HAVE_HW_BREAKPOINT */
2882         if (child->thread.hw_brk.address == 0)
2883                 return -ENOENT;
2884
2885         child->thread.hw_brk.address = 0;
2886         child->thread.hw_brk.type = 0;
2887 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2888
2889         return 0;
2890 #endif
2891 }
2892
2893 long arch_ptrace(struct task_struct *child, long request,
2894                  unsigned long addr, unsigned long data)
2895 {
2896         int ret = -EPERM;
2897         void __user *datavp = (void __user *) data;
2898         unsigned long __user *datalp = datavp;
2899
2900         switch (request) {
2901         /* read the word at location addr in the USER area. */
2902         case PTRACE_PEEKUSR: {
2903                 unsigned long index, tmp;
2904
2905                 ret = -EIO;
2906                 /* convert to index and check */
2907 #ifdef CONFIG_PPC32
2908                 index = addr >> 2;
2909                 if ((addr & 3) || (index > PT_FPSCR)
2910                     || (child->thread.regs == NULL))
2911 #else
2912                 index = addr >> 3;
2913                 if ((addr & 7) || (index > PT_FPSCR))
2914 #endif
2915                         break;
2916
2917                 CHECK_FULL_REGS(child->thread.regs);
2918                 if (index < PT_FPR0) {
2919                         ret = ptrace_get_reg(child, (int) index, &tmp);
2920                         if (ret)
2921                                 break;
2922                 } else {
2923                         unsigned int fpidx = index - PT_FPR0;
2924
2925                         flush_fp_to_thread(child);
2926                         if (fpidx < (PT_FPSCR - PT_FPR0))
2927                                 memcpy(&tmp, &child->thread.TS_FPR(fpidx),
2928                                        sizeof(long));
2929                         else
2930                                 tmp = child->thread.fp_state.fpscr;
2931                 }
2932                 ret = put_user(tmp, datalp);
2933                 break;
2934         }
2935
2936         /* write the word at location addr in the USER area */
2937         case PTRACE_POKEUSR: {
2938                 unsigned long index;
2939
2940                 ret = -EIO;
2941                 /* convert to index and check */
2942 #ifdef CONFIG_PPC32
2943                 index = addr >> 2;
2944                 if ((addr & 3) || (index > PT_FPSCR)
2945                     || (child->thread.regs == NULL))
2946 #else
2947                 index = addr >> 3;
2948                 if ((addr & 7) || (index > PT_FPSCR))
2949 #endif
2950                         break;
2951
2952                 CHECK_FULL_REGS(child->thread.regs);
2953                 if (index < PT_FPR0) {
2954                         ret = ptrace_put_reg(child, index, data);
2955                 } else {
2956                         unsigned int fpidx = index - PT_FPR0;
2957
2958                         flush_fp_to_thread(child);
2959                         if (fpidx < (PT_FPSCR - PT_FPR0))
2960                                 memcpy(&child->thread.TS_FPR(fpidx), &data,
2961                                        sizeof(long));
2962                         else
2963                                 child->thread.fp_state.fpscr = data;
2964                         ret = 0;
2965                 }
2966                 break;
2967         }
2968
2969         case PPC_PTRACE_GETHWDBGINFO: {
2970                 struct ppc_debug_info dbginfo;
2971
2972                 dbginfo.version = 1;
2973 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2974                 dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
2975                 dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
2976                 dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
2977                 dbginfo.data_bp_alignment = 4;
2978                 dbginfo.sizeof_condition = 4;
2979                 dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
2980                                    PPC_DEBUG_FEATURE_INSN_BP_MASK;
2981 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2982                 dbginfo.features |=
2983                                    PPC_DEBUG_FEATURE_DATA_BP_RANGE |
2984                                    PPC_DEBUG_FEATURE_DATA_BP_MASK;
2985 #endif
2986 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
2987                 dbginfo.num_instruction_bps = 0;
2988                 dbginfo.num_data_bps = 1;
2989                 dbginfo.num_condition_regs = 0;
2990 #ifdef CONFIG_PPC64
2991                 dbginfo.data_bp_alignment = 8;
2992 #else
2993                 dbginfo.data_bp_alignment = 4;
2994 #endif
2995                 dbginfo.sizeof_condition = 0;
2996 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2997                 dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
2998                 if (cpu_has_feature(CPU_FTR_DAWR))
2999                         dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
3000 #else
3001                 dbginfo.features = 0;
3002 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
3003 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
3004
3005                 if (!access_ok(VERIFY_WRITE, datavp,
3006                                sizeof(struct ppc_debug_info)))
3007                         return -EFAULT;
3008                 ret = __copy_to_user(datavp, &dbginfo,
3009                                      sizeof(struct ppc_debug_info)) ?
3010                       -EFAULT : 0;
3011                 break;
3012         }
3013
3014         case PPC_PTRACE_SETHWDEBUG: {
3015                 struct ppc_hw_breakpoint bp_info;
3016
3017                 if (!access_ok(VERIFY_READ, datavp,
3018                                sizeof(struct ppc_hw_breakpoint)))
3019                         return -EFAULT;
3020                 ret = __copy_from_user(&bp_info, datavp,
3021                                        sizeof(struct ppc_hw_breakpoint)) ?
3022                       -EFAULT : 0;
3023                 if (!ret)
3024                         ret = ppc_set_hwdebug(child, &bp_info);
3025                 break;
3026         }
3027
3028         case PPC_PTRACE_DELHWDEBUG: {
3029                 ret = ppc_del_hwdebug(child, data);
3030                 break;
3031         }
3032
3033         case PTRACE_GET_DEBUGREG: {
3034 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
3035                 unsigned long dabr_fake;
3036 #endif
3037                 ret = -EINVAL;
3038                 /* We only support one DABR and no IABRS at the moment */
3039                 if (addr > 0)
3040                         break;
3041 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
3042                 ret = put_user(child->thread.debug.dac1, datalp);
3043 #else
3044                 dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
3045                              (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
3046                 ret = put_user(dabr_fake, datalp);
3047 #endif
3048                 break;
3049         }
3050
3051         case PTRACE_SET_DEBUGREG:
3052                 ret = ptrace_set_debugreg(child, addr, data);
3053                 break;
3054
3055 #ifdef CONFIG_PPC64
3056         case PTRACE_GETREGS64:
3057 #endif
3058         case PTRACE_GETREGS:    /* Get all pt_regs from the child. */
3059                 return copy_regset_to_user(child, &user_ppc_native_view,
3060                                            REGSET_GPR,
3061                                            0, sizeof(struct pt_regs),
3062                                            datavp);
3063
3064 #ifdef CONFIG_PPC64
3065         case PTRACE_SETREGS64:
3066 #endif
3067         case PTRACE_SETREGS:    /* Set all gp regs in the child. */
3068                 return copy_regset_from_user(child, &user_ppc_native_view,
3069                                              REGSET_GPR,
3070                                              0, sizeof(struct pt_regs),
3071                                              datavp);
3072
3073         case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
3074                 return copy_regset_to_user(child, &user_ppc_native_view,
3075                                            REGSET_FPR,
3076                                            0, sizeof(elf_fpregset_t),
3077                                            datavp);
3078
3079         case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
3080                 return copy_regset_from_user(child, &user_ppc_native_view,
3081                                              REGSET_FPR,
3082                                              0, sizeof(elf_fpregset_t),
3083                                              datavp);
3084
3085 #ifdef CONFIG_ALTIVEC
3086         case PTRACE_GETVRREGS:
3087                 return copy_regset_to_user(child, &user_ppc_native_view,
3088                                            REGSET_VMX,
3089                                            0, (33 * sizeof(vector128) +
3090                                                sizeof(u32)),
3091                                            datavp);
3092
3093         case PTRACE_SETVRREGS:
3094                 return copy_regset_from_user(child, &user_ppc_native_view,
3095                                              REGSET_VMX,
3096                                              0, (33 * sizeof(vector128) +
3097                                                  sizeof(u32)),
3098                                              datavp);
3099 #endif
3100 #ifdef CONFIG_VSX
3101         case PTRACE_GETVSRREGS:
3102                 return copy_regset_to_user(child, &user_ppc_native_view,
3103                                            REGSET_VSX,
3104                                            0, 32 * sizeof(double),
3105                                            datavp);
3106
3107         case PTRACE_SETVSRREGS:
3108                 return copy_regset_from_user(child, &user_ppc_native_view,
3109                                              REGSET_VSX,
3110                                              0, 32 * sizeof(double),
3111                                              datavp);
3112 #endif
3113 #ifdef CONFIG_SPE
3114         case PTRACE_GETEVRREGS:
3115                 /* Get the child spe register state. */
3116                 return copy_regset_to_user(child, &user_ppc_native_view,
3117                                            REGSET_SPE, 0, 35 * sizeof(u32),
3118                                            datavp);
3119
3120         case PTRACE_SETEVRREGS:
3121                 /* Set the child spe register state. */
3122                 return copy_regset_from_user(child, &user_ppc_native_view,
3123                                              REGSET_SPE, 0, 35 * sizeof(u32),
3124                                              datavp);
3125 #endif
3126
3127         default:
3128                 ret = ptrace_request(child, request, addr, data);
3129                 break;
3130         }
3131         return ret;
3132 }
3133
3134 #ifdef CONFIG_SECCOMP
3135 static int do_seccomp(struct pt_regs *regs)
3136 {
3137         if (!test_thread_flag(TIF_SECCOMP))
3138                 return 0;
3139
3140         /*
3141          * The ABI we present to seccomp tracers is that r3 contains
3142          * the syscall return value and orig_gpr3 contains the first
3143          * syscall parameter. This is different to the ptrace ABI where
3144          * both r3 and orig_gpr3 contain the first syscall parameter.
3145          */
3146         regs->gpr[3] = -ENOSYS;
3147
3148         /*
3149          * We use the __ version here because we have already checked
3150          * TIF_SECCOMP. If this fails, there is nothing left to do, we
3151          * have already loaded -ENOSYS into r3, or seccomp has put
3152          * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
3153          */
3154         if (__secure_computing(NULL))
3155                 return -1;
3156
3157         /*
3158          * The syscall was allowed by seccomp, restore the register
3159          * state to what audit expects.
3160          * Note that we use orig_gpr3, which means a seccomp tracer can
3161          * modify the first syscall parameter (in orig_gpr3) and also
3162          * allow the syscall to proceed.
3163          */
3164         regs->gpr[3] = regs->orig_gpr3;
3165
3166         return 0;
3167 }
3168 #else
3169 static inline int do_seccomp(struct pt_regs *regs) { return 0; }
3170 #endif /* CONFIG_SECCOMP */
3171
3172 /**
3173  * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
3174  * @regs: the pt_regs of the task to trace (current)
3175  *
3176  * Performs various types of tracing on syscall entry. This includes seccomp,
3177  * ptrace, syscall tracepoints and audit.
3178  *
3179  * The pt_regs are potentially visible to userspace via ptrace, so their
3180  * contents is ABI.
3181  *
3182  * One or more of the tracers may modify the contents of pt_regs, in particular
3183  * to modify arguments or even the syscall number itself.
3184  *
3185  * It's also possible that a tracer can choose to reject the system call. In
3186  * that case this function will return an illegal syscall number, and will put
3187  * an appropriate return value in regs->r3.
3188  *
3189  * Return: the (possibly changed) syscall number.
3190  */
3191 long do_syscall_trace_enter(struct pt_regs *regs)
3192 {
3193         user_exit();
3194
3195         /*
3196          * The tracer may decide to abort the syscall, if so tracehook
3197          * will return !0. Note that the tracer may also just change
3198          * regs->gpr[0] to an invalid syscall number, that is handled
3199          * below on the exit path.
3200          */
3201         if (test_thread_flag(TIF_SYSCALL_TRACE) &&
3202             tracehook_report_syscall_entry(regs))
3203                 goto skip;
3204
3205         /* Run seccomp after ptrace; allow it to set gpr[3]. */
3206         if (do_seccomp(regs))
3207                 return -1;
3208
3209         /* Avoid trace and audit when syscall is invalid. */
3210         if (regs->gpr[0] >= NR_syscalls)
3211                 goto skip;
3212
3213         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3214                 trace_sys_enter(regs, regs->gpr[0]);
3215
3216 #ifdef CONFIG_PPC64
3217         if (!is_32bit_task())
3218                 audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
3219                                     regs->gpr[5], regs->gpr[6]);
3220         else
3221 #endif
3222                 audit_syscall_entry(regs->gpr[0],
3223                                     regs->gpr[3] & 0xffffffff,
3224                                     regs->gpr[4] & 0xffffffff,
3225                                     regs->gpr[5] & 0xffffffff,
3226                                     regs->gpr[6] & 0xffffffff);
3227
3228         /* Return the possibly modified but valid syscall number */
3229         return regs->gpr[0];
3230
3231 skip:
3232         /*
3233          * If we are aborting explicitly, or if the syscall number is
3234          * now invalid, set the return value to -ENOSYS.
3235          */
3236         regs->gpr[3] = -ENOSYS;
3237         return -1;
3238 }
3239
3240 void do_syscall_trace_leave(struct pt_regs *regs)
3241 {
3242         int step;
3243
3244         audit_syscall_exit(regs);
3245
3246         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3247                 trace_sys_exit(regs, regs->result);
3248
3249         step = test_thread_flag(TIF_SINGLESTEP);
3250         if (step || test_thread_flag(TIF_SYSCALL_TRACE))
3251                 tracehook_report_syscall_exit(regs, step);
3252
3253         user_enter();
3254 }