]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/arm64/kernel/hw_breakpoint.c
Merge tag 'xfs-for-linus-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/dgc...
[karo-tx-linux.git] / arch / arm64 / kernel / hw_breakpoint.c
1 /*
2  * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
3  * using the CPU's debug registers.
4  *
5  * Copyright (C) 2012 ARM Limited
6  * Author: Will Deacon <will.deacon@arm.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #define pr_fmt(fmt) "hw-breakpoint: " fmt
22
23 #include <linux/compat.h>
24 #include <linux/cpu_pm.h>
25 #include <linux/errno.h>
26 #include <linux/hw_breakpoint.h>
27 #include <linux/perf_event.h>
28 #include <linux/ptrace.h>
29 #include <linux/smp.h>
30
31 #include <asm/current.h>
32 #include <asm/debug-monitors.h>
33 #include <asm/hw_breakpoint.h>
34 #include <asm/traps.h>
35 #include <asm/cputype.h>
36 #include <asm/system_misc.h>
37
38 /* Breakpoint currently in use for each BRP. */
39 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
40
41 /* Watchpoint currently in use for each WRP. */
42 static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
43
44 /* Currently stepping a per-CPU kernel breakpoint. */
45 static DEFINE_PER_CPU(int, stepping_kernel_bp);
46
47 /* Number of BRP/WRP registers on this CPU. */
48 static int core_num_brps;
49 static int core_num_wrps;
50
51 /* Determine number of BRP registers available. */
52 static int get_num_brps(void)
53 {
54         return ((read_cpuid(ID_AA64DFR0_EL1) >> 12) & 0xf) + 1;
55 }
56
57 /* Determine number of WRP registers available. */
58 static int get_num_wrps(void)
59 {
60         return ((read_cpuid(ID_AA64DFR0_EL1) >> 20) & 0xf) + 1;
61 }
62
63 int hw_breakpoint_slots(int type)
64 {
65         /*
66          * We can be called early, so don't rely on
67          * our static variables being initialised.
68          */
69         switch (type) {
70         case TYPE_INST:
71                 return get_num_brps();
72         case TYPE_DATA:
73                 return get_num_wrps();
74         default:
75                 pr_warning("unknown slot type: %d\n", type);
76                 return 0;
77         }
78 }
79
80 #define READ_WB_REG_CASE(OFF, N, REG, VAL)      \
81         case (OFF + N):                         \
82                 AARCH64_DBG_READ(N, REG, VAL);  \
83                 break
84
85 #define WRITE_WB_REG_CASE(OFF, N, REG, VAL)     \
86         case (OFF + N):                         \
87                 AARCH64_DBG_WRITE(N, REG, VAL); \
88                 break
89
90 #define GEN_READ_WB_REG_CASES(OFF, REG, VAL)    \
91         READ_WB_REG_CASE(OFF,  0, REG, VAL);    \
92         READ_WB_REG_CASE(OFF,  1, REG, VAL);    \
93         READ_WB_REG_CASE(OFF,  2, REG, VAL);    \
94         READ_WB_REG_CASE(OFF,  3, REG, VAL);    \
95         READ_WB_REG_CASE(OFF,  4, REG, VAL);    \
96         READ_WB_REG_CASE(OFF,  5, REG, VAL);    \
97         READ_WB_REG_CASE(OFF,  6, REG, VAL);    \
98         READ_WB_REG_CASE(OFF,  7, REG, VAL);    \
99         READ_WB_REG_CASE(OFF,  8, REG, VAL);    \
100         READ_WB_REG_CASE(OFF,  9, REG, VAL);    \
101         READ_WB_REG_CASE(OFF, 10, REG, VAL);    \
102         READ_WB_REG_CASE(OFF, 11, REG, VAL);    \
103         READ_WB_REG_CASE(OFF, 12, REG, VAL);    \
104         READ_WB_REG_CASE(OFF, 13, REG, VAL);    \
105         READ_WB_REG_CASE(OFF, 14, REG, VAL);    \
106         READ_WB_REG_CASE(OFF, 15, REG, VAL)
107
108 #define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL)   \
109         WRITE_WB_REG_CASE(OFF,  0, REG, VAL);   \
110         WRITE_WB_REG_CASE(OFF,  1, REG, VAL);   \
111         WRITE_WB_REG_CASE(OFF,  2, REG, VAL);   \
112         WRITE_WB_REG_CASE(OFF,  3, REG, VAL);   \
113         WRITE_WB_REG_CASE(OFF,  4, REG, VAL);   \
114         WRITE_WB_REG_CASE(OFF,  5, REG, VAL);   \
115         WRITE_WB_REG_CASE(OFF,  6, REG, VAL);   \
116         WRITE_WB_REG_CASE(OFF,  7, REG, VAL);   \
117         WRITE_WB_REG_CASE(OFF,  8, REG, VAL);   \
118         WRITE_WB_REG_CASE(OFF,  9, REG, VAL);   \
119         WRITE_WB_REG_CASE(OFF, 10, REG, VAL);   \
120         WRITE_WB_REG_CASE(OFF, 11, REG, VAL);   \
121         WRITE_WB_REG_CASE(OFF, 12, REG, VAL);   \
122         WRITE_WB_REG_CASE(OFF, 13, REG, VAL);   \
123         WRITE_WB_REG_CASE(OFF, 14, REG, VAL);   \
124         WRITE_WB_REG_CASE(OFF, 15, REG, VAL)
125
126 static u64 read_wb_reg(int reg, int n)
127 {
128         u64 val = 0;
129
130         switch (reg + n) {
131         GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
132         GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
133         GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
134         GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
135         default:
136                 pr_warning("attempt to read from unknown breakpoint register %d\n", n);
137         }
138
139         return val;
140 }
141
142 static void write_wb_reg(int reg, int n, u64 val)
143 {
144         switch (reg + n) {
145         GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
146         GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
147         GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
148         GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
149         default:
150                 pr_warning("attempt to write to unknown breakpoint register %d\n", n);
151         }
152         isb();
153 }
154
155 /*
156  * Convert a breakpoint privilege level to the corresponding exception
157  * level.
158  */
159 static enum dbg_active_el debug_exception_level(int privilege)
160 {
161         switch (privilege) {
162         case AARCH64_BREAKPOINT_EL0:
163                 return DBG_ACTIVE_EL0;
164         case AARCH64_BREAKPOINT_EL1:
165                 return DBG_ACTIVE_EL1;
166         default:
167                 pr_warning("invalid breakpoint privilege level %d\n", privilege);
168                 return -EINVAL;
169         }
170 }
171
172 enum hw_breakpoint_ops {
173         HW_BREAKPOINT_INSTALL,
174         HW_BREAKPOINT_UNINSTALL,
175         HW_BREAKPOINT_RESTORE
176 };
177
178 /**
179  * hw_breakpoint_slot_setup - Find and setup a perf slot according to
180  *                            operations
181  *
182  * @slots: pointer to array of slots
183  * @max_slots: max number of slots
184  * @bp: perf_event to setup
185  * @ops: operation to be carried out on the slot
186  *
187  * Return:
188  *      slot index on success
189  *      -ENOSPC if no slot is available/matches
190  *      -EINVAL on wrong operations parameter
191  */
192 static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
193                                     struct perf_event *bp,
194                                     enum hw_breakpoint_ops ops)
195 {
196         int i;
197         struct perf_event **slot;
198
199         for (i = 0; i < max_slots; ++i) {
200                 slot = &slots[i];
201                 switch (ops) {
202                 case HW_BREAKPOINT_INSTALL:
203                         if (!*slot) {
204                                 *slot = bp;
205                                 return i;
206                         }
207                         break;
208                 case HW_BREAKPOINT_UNINSTALL:
209                         if (*slot == bp) {
210                                 *slot = NULL;
211                                 return i;
212                         }
213                         break;
214                 case HW_BREAKPOINT_RESTORE:
215                         if (*slot == bp)
216                                 return i;
217                         break;
218                 default:
219                         pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
220                         return -EINVAL;
221                 }
222         }
223         return -ENOSPC;
224 }
225
226 static int hw_breakpoint_control(struct perf_event *bp,
227                                  enum hw_breakpoint_ops ops)
228 {
229         struct arch_hw_breakpoint *info = counter_arch_bp(bp);
230         struct perf_event **slots;
231         struct debug_info *debug_info = &current->thread.debug;
232         int i, max_slots, ctrl_reg, val_reg, reg_enable;
233         enum dbg_active_el dbg_el = debug_exception_level(info->ctrl.privilege);
234         u32 ctrl;
235
236         if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
237                 /* Breakpoint */
238                 ctrl_reg = AARCH64_DBG_REG_BCR;
239                 val_reg = AARCH64_DBG_REG_BVR;
240                 slots = this_cpu_ptr(bp_on_reg);
241                 max_slots = core_num_brps;
242                 reg_enable = !debug_info->bps_disabled;
243         } else {
244                 /* Watchpoint */
245                 ctrl_reg = AARCH64_DBG_REG_WCR;
246                 val_reg = AARCH64_DBG_REG_WVR;
247                 slots = this_cpu_ptr(wp_on_reg);
248                 max_slots = core_num_wrps;
249                 reg_enable = !debug_info->wps_disabled;
250         }
251
252         i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
253
254         if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
255                 return i;
256
257         switch (ops) {
258         case HW_BREAKPOINT_INSTALL:
259                 /*
260                  * Ensure debug monitors are enabled at the correct exception
261                  * level.
262                  */
263                 enable_debug_monitors(dbg_el);
264                 /* Fall through */
265         case HW_BREAKPOINT_RESTORE:
266                 /* Setup the address register. */
267                 write_wb_reg(val_reg, i, info->address);
268
269                 /* Setup the control register. */
270                 ctrl = encode_ctrl_reg(info->ctrl);
271                 write_wb_reg(ctrl_reg, i,
272                              reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
273                 break;
274         case HW_BREAKPOINT_UNINSTALL:
275                 /* Reset the control register. */
276                 write_wb_reg(ctrl_reg, i, 0);
277
278                 /*
279                  * Release the debug monitors for the correct exception
280                  * level.
281                  */
282                 disable_debug_monitors(dbg_el);
283                 break;
284         }
285
286         return 0;
287 }
288
289 /*
290  * Install a perf counter breakpoint.
291  */
292 int arch_install_hw_breakpoint(struct perf_event *bp)
293 {
294         return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
295 }
296
297 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
298 {
299         hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
300 }
301
302 static int get_hbp_len(u8 hbp_len)
303 {
304         unsigned int len_in_bytes = 0;
305
306         switch (hbp_len) {
307         case ARM_BREAKPOINT_LEN_1:
308                 len_in_bytes = 1;
309                 break;
310         case ARM_BREAKPOINT_LEN_2:
311                 len_in_bytes = 2;
312                 break;
313         case ARM_BREAKPOINT_LEN_4:
314                 len_in_bytes = 4;
315                 break;
316         case ARM_BREAKPOINT_LEN_8:
317                 len_in_bytes = 8;
318                 break;
319         }
320
321         return len_in_bytes;
322 }
323
324 /*
325  * Check whether bp virtual address is in kernel space.
326  */
327 int arch_check_bp_in_kernelspace(struct perf_event *bp)
328 {
329         unsigned int len;
330         unsigned long va;
331         struct arch_hw_breakpoint *info = counter_arch_bp(bp);
332
333         va = info->address;
334         len = get_hbp_len(info->ctrl.len);
335
336         return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
337 }
338
339 /*
340  * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
341  * Hopefully this will disappear when ptrace can bypass the conversion
342  * to generic breakpoint descriptions.
343  */
344 int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
345                            int *gen_len, int *gen_type)
346 {
347         /* Type */
348         switch (ctrl.type) {
349         case ARM_BREAKPOINT_EXECUTE:
350                 *gen_type = HW_BREAKPOINT_X;
351                 break;
352         case ARM_BREAKPOINT_LOAD:
353                 *gen_type = HW_BREAKPOINT_R;
354                 break;
355         case ARM_BREAKPOINT_STORE:
356                 *gen_type = HW_BREAKPOINT_W;
357                 break;
358         case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
359                 *gen_type = HW_BREAKPOINT_RW;
360                 break;
361         default:
362                 return -EINVAL;
363         }
364
365         /* Len */
366         switch (ctrl.len) {
367         case ARM_BREAKPOINT_LEN_1:
368                 *gen_len = HW_BREAKPOINT_LEN_1;
369                 break;
370         case ARM_BREAKPOINT_LEN_2:
371                 *gen_len = HW_BREAKPOINT_LEN_2;
372                 break;
373         case ARM_BREAKPOINT_LEN_4:
374                 *gen_len = HW_BREAKPOINT_LEN_4;
375                 break;
376         case ARM_BREAKPOINT_LEN_8:
377                 *gen_len = HW_BREAKPOINT_LEN_8;
378                 break;
379         default:
380                 return -EINVAL;
381         }
382
383         return 0;
384 }
385
386 /*
387  * Construct an arch_hw_breakpoint from a perf_event.
388  */
389 static int arch_build_bp_info(struct perf_event *bp)
390 {
391         struct arch_hw_breakpoint *info = counter_arch_bp(bp);
392
393         /* Type */
394         switch (bp->attr.bp_type) {
395         case HW_BREAKPOINT_X:
396                 info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
397                 break;
398         case HW_BREAKPOINT_R:
399                 info->ctrl.type = ARM_BREAKPOINT_LOAD;
400                 break;
401         case HW_BREAKPOINT_W:
402                 info->ctrl.type = ARM_BREAKPOINT_STORE;
403                 break;
404         case HW_BREAKPOINT_RW:
405                 info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
406                 break;
407         default:
408                 return -EINVAL;
409         }
410
411         /* Len */
412         switch (bp->attr.bp_len) {
413         case HW_BREAKPOINT_LEN_1:
414                 info->ctrl.len = ARM_BREAKPOINT_LEN_1;
415                 break;
416         case HW_BREAKPOINT_LEN_2:
417                 info->ctrl.len = ARM_BREAKPOINT_LEN_2;
418                 break;
419         case HW_BREAKPOINT_LEN_4:
420                 info->ctrl.len = ARM_BREAKPOINT_LEN_4;
421                 break;
422         case HW_BREAKPOINT_LEN_8:
423                 info->ctrl.len = ARM_BREAKPOINT_LEN_8;
424                 break;
425         default:
426                 return -EINVAL;
427         }
428
429         /*
430          * On AArch64, we only permit breakpoints of length 4, whereas
431          * AArch32 also requires breakpoints of length 2 for Thumb.
432          * Watchpoints can be of length 1, 2, 4 or 8 bytes.
433          */
434         if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
435                 if (is_compat_task()) {
436                         if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
437                             info->ctrl.len != ARM_BREAKPOINT_LEN_4)
438                                 return -EINVAL;
439                 } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) {
440                         /*
441                          * FIXME: Some tools (I'm looking at you perf) assume
442                          *        that breakpoints should be sizeof(long). This
443                          *        is nonsense. For now, we fix up the parameter
444                          *        but we should probably return -EINVAL instead.
445                          */
446                         info->ctrl.len = ARM_BREAKPOINT_LEN_4;
447                 }
448         }
449
450         /* Address */
451         info->address = bp->attr.bp_addr;
452
453         /*
454          * Privilege
455          * Note that we disallow combined EL0/EL1 breakpoints because
456          * that would complicate the stepping code.
457          */
458         if (arch_check_bp_in_kernelspace(bp))
459                 info->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
460         else
461                 info->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
462
463         /* Enabled? */
464         info->ctrl.enabled = !bp->attr.disabled;
465
466         return 0;
467 }
468
469 /*
470  * Validate the arch-specific HW Breakpoint register settings.
471  */
472 int arch_validate_hwbkpt_settings(struct perf_event *bp)
473 {
474         struct arch_hw_breakpoint *info = counter_arch_bp(bp);
475         int ret;
476         u64 alignment_mask, offset;
477
478         /* Build the arch_hw_breakpoint. */
479         ret = arch_build_bp_info(bp);
480         if (ret)
481                 return ret;
482
483         /*
484          * Check address alignment.
485          * We don't do any clever alignment correction for watchpoints
486          * because using 64-bit unaligned addresses is deprecated for
487          * AArch64.
488          *
489          * AArch32 tasks expect some simple alignment fixups, so emulate
490          * that here.
491          */
492         if (is_compat_task()) {
493                 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
494                         alignment_mask = 0x7;
495                 else
496                         alignment_mask = 0x3;
497                 offset = info->address & alignment_mask;
498                 switch (offset) {
499                 case 0:
500                         /* Aligned */
501                         break;
502                 case 1:
503                         /* Allow single byte watchpoint. */
504                         if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
505                                 break;
506                 case 2:
507                         /* Allow halfword watchpoints and breakpoints. */
508                         if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
509                                 break;
510                 default:
511                         return -EINVAL;
512                 }
513
514                 info->address &= ~alignment_mask;
515                 info->ctrl.len <<= offset;
516         } else {
517                 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE)
518                         alignment_mask = 0x3;
519                 else
520                         alignment_mask = 0x7;
521                 if (info->address & alignment_mask)
522                         return -EINVAL;
523         }
524
525         /*
526          * Disallow per-task kernel breakpoints since these would
527          * complicate the stepping code.
528          */
529         if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
530                 return -EINVAL;
531
532         return 0;
533 }
534
535 /*
536  * Enable/disable all of the breakpoints active at the specified
537  * exception level at the register level.
538  * This is used when single-stepping after a breakpoint exception.
539  */
540 static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
541 {
542         int i, max_slots, privilege;
543         u32 ctrl;
544         struct perf_event **slots;
545
546         switch (reg) {
547         case AARCH64_DBG_REG_BCR:
548                 slots = this_cpu_ptr(bp_on_reg);
549                 max_slots = core_num_brps;
550                 break;
551         case AARCH64_DBG_REG_WCR:
552                 slots = this_cpu_ptr(wp_on_reg);
553                 max_slots = core_num_wrps;
554                 break;
555         default:
556                 return;
557         }
558
559         for (i = 0; i < max_slots; ++i) {
560                 if (!slots[i])
561                         continue;
562
563                 privilege = counter_arch_bp(slots[i])->ctrl.privilege;
564                 if (debug_exception_level(privilege) != el)
565                         continue;
566
567                 ctrl = read_wb_reg(reg, i);
568                 if (enable)
569                         ctrl |= 0x1;
570                 else
571                         ctrl &= ~0x1;
572                 write_wb_reg(reg, i, ctrl);
573         }
574 }
575
576 /*
577  * Debug exception handlers.
578  */
579 static int breakpoint_handler(unsigned long unused, unsigned int esr,
580                               struct pt_regs *regs)
581 {
582         int i, step = 0, *kernel_step;
583         u32 ctrl_reg;
584         u64 addr, val;
585         struct perf_event *bp, **slots;
586         struct debug_info *debug_info;
587         struct arch_hw_breakpoint_ctrl ctrl;
588
589         slots = this_cpu_ptr(bp_on_reg);
590         addr = instruction_pointer(regs);
591         debug_info = &current->thread.debug;
592
593         for (i = 0; i < core_num_brps; ++i) {
594                 rcu_read_lock();
595
596                 bp = slots[i];
597
598                 if (bp == NULL)
599                         goto unlock;
600
601                 /* Check if the breakpoint value matches. */
602                 val = read_wb_reg(AARCH64_DBG_REG_BVR, i);
603                 if (val != (addr & ~0x3))
604                         goto unlock;
605
606                 /* Possible match, check the byte address select to confirm. */
607                 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i);
608                 decode_ctrl_reg(ctrl_reg, &ctrl);
609                 if (!((1 << (addr & 0x3)) & ctrl.len))
610                         goto unlock;
611
612                 counter_arch_bp(bp)->trigger = addr;
613                 perf_bp_event(bp, regs);
614
615                 /* Do we need to handle the stepping? */
616                 if (!bp->overflow_handler)
617                         step = 1;
618 unlock:
619                 rcu_read_unlock();
620         }
621
622         if (!step)
623                 return 0;
624
625         if (user_mode(regs)) {
626                 debug_info->bps_disabled = 1;
627                 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0);
628
629                 /* If we're already stepping a watchpoint, just return. */
630                 if (debug_info->wps_disabled)
631                         return 0;
632
633                 if (test_thread_flag(TIF_SINGLESTEP))
634                         debug_info->suspended_step = 1;
635                 else
636                         user_enable_single_step(current);
637         } else {
638                 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
639                 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
640
641                 if (*kernel_step != ARM_KERNEL_STEP_NONE)
642                         return 0;
643
644                 if (kernel_active_single_step()) {
645                         *kernel_step = ARM_KERNEL_STEP_SUSPEND;
646                 } else {
647                         *kernel_step = ARM_KERNEL_STEP_ACTIVE;
648                         kernel_enable_single_step(regs);
649                 }
650         }
651
652         return 0;
653 }
654
655 static int watchpoint_handler(unsigned long addr, unsigned int esr,
656                               struct pt_regs *regs)
657 {
658         int i, step = 0, *kernel_step, access;
659         u32 ctrl_reg;
660         u64 val, alignment_mask;
661         struct perf_event *wp, **slots;
662         struct debug_info *debug_info;
663         struct arch_hw_breakpoint *info;
664         struct arch_hw_breakpoint_ctrl ctrl;
665
666         slots = this_cpu_ptr(wp_on_reg);
667         debug_info = &current->thread.debug;
668
669         for (i = 0; i < core_num_wrps; ++i) {
670                 rcu_read_lock();
671
672                 wp = slots[i];
673
674                 if (wp == NULL)
675                         goto unlock;
676
677                 info = counter_arch_bp(wp);
678                 /* AArch32 watchpoints are either 4 or 8 bytes aligned. */
679                 if (is_compat_task()) {
680                         if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
681                                 alignment_mask = 0x7;
682                         else
683                                 alignment_mask = 0x3;
684                 } else {
685                         alignment_mask = 0x7;
686                 }
687
688                 /* Check if the watchpoint value matches. */
689                 val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
690                 if (val != (addr & ~alignment_mask))
691                         goto unlock;
692
693                 /* Possible match, check the byte address select to confirm. */
694                 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
695                 decode_ctrl_reg(ctrl_reg, &ctrl);
696                 if (!((1 << (addr & alignment_mask)) & ctrl.len))
697                         goto unlock;
698
699                 /*
700                  * Check that the access type matches.
701                  * 0 => load, otherwise => store
702                  */
703                 access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
704                          HW_BREAKPOINT_R;
705                 if (!(access & hw_breakpoint_type(wp)))
706                         goto unlock;
707
708                 info->trigger = addr;
709                 perf_bp_event(wp, regs);
710
711                 /* Do we need to handle the stepping? */
712                 if (!wp->overflow_handler)
713                         step = 1;
714
715 unlock:
716                 rcu_read_unlock();
717         }
718
719         if (!step)
720                 return 0;
721
722         /*
723          * We always disable EL0 watchpoints because the kernel can
724          * cause these to fire via an unprivileged access.
725          */
726         toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 0);
727
728         if (user_mode(regs)) {
729                 debug_info->wps_disabled = 1;
730
731                 /* If we're already stepping a breakpoint, just return. */
732                 if (debug_info->bps_disabled)
733                         return 0;
734
735                 if (test_thread_flag(TIF_SINGLESTEP))
736                         debug_info->suspended_step = 1;
737                 else
738                         user_enable_single_step(current);
739         } else {
740                 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
741                 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
742
743                 if (*kernel_step != ARM_KERNEL_STEP_NONE)
744                         return 0;
745
746                 if (kernel_active_single_step()) {
747                         *kernel_step = ARM_KERNEL_STEP_SUSPEND;
748                 } else {
749                         *kernel_step = ARM_KERNEL_STEP_ACTIVE;
750                         kernel_enable_single_step(regs);
751                 }
752         }
753
754         return 0;
755 }
756
757 /*
758  * Handle single-step exception.
759  */
760 int reinstall_suspended_bps(struct pt_regs *regs)
761 {
762         struct debug_info *debug_info = &current->thread.debug;
763         int handled_exception = 0, *kernel_step;
764
765         kernel_step = this_cpu_ptr(&stepping_kernel_bp);
766
767         /*
768          * Called from single-step exception handler.
769          * Return 0 if execution can resume, 1 if a SIGTRAP should be
770          * reported.
771          */
772         if (user_mode(regs)) {
773                 if (debug_info->bps_disabled) {
774                         debug_info->bps_disabled = 0;
775                         toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1);
776                         handled_exception = 1;
777                 }
778
779                 if (debug_info->wps_disabled) {
780                         debug_info->wps_disabled = 0;
781                         toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
782                         handled_exception = 1;
783                 }
784
785                 if (handled_exception) {
786                         if (debug_info->suspended_step) {
787                                 debug_info->suspended_step = 0;
788                                 /* Allow exception handling to fall-through. */
789                                 handled_exception = 0;
790                         } else {
791                                 user_disable_single_step(current);
792                         }
793                 }
794         } else if (*kernel_step != ARM_KERNEL_STEP_NONE) {
795                 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 1);
796                 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 1);
797
798                 if (!debug_info->wps_disabled)
799                         toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
800
801                 if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) {
802                         kernel_disable_single_step();
803                         handled_exception = 1;
804                 } else {
805                         handled_exception = 0;
806                 }
807
808                 *kernel_step = ARM_KERNEL_STEP_NONE;
809         }
810
811         return !handled_exception;
812 }
813
814 /*
815  * Context-switcher for restoring suspended breakpoints.
816  */
817 void hw_breakpoint_thread_switch(struct task_struct *next)
818 {
819         /*
820          *           current        next
821          * disabled: 0              0     => The usual case, NOTIFY_DONE
822          *           0              1     => Disable the registers
823          *           1              0     => Enable the registers
824          *           1              1     => NOTIFY_DONE. per-task bps will
825          *                                   get taken care of by perf.
826          */
827
828         struct debug_info *current_debug_info, *next_debug_info;
829
830         current_debug_info = &current->thread.debug;
831         next_debug_info = &next->thread.debug;
832
833         /* Update breakpoints. */
834         if (current_debug_info->bps_disabled != next_debug_info->bps_disabled)
835                 toggle_bp_registers(AARCH64_DBG_REG_BCR,
836                                     DBG_ACTIVE_EL0,
837                                     !next_debug_info->bps_disabled);
838
839         /* Update watchpoints. */
840         if (current_debug_info->wps_disabled != next_debug_info->wps_disabled)
841                 toggle_bp_registers(AARCH64_DBG_REG_WCR,
842                                     DBG_ACTIVE_EL0,
843                                     !next_debug_info->wps_disabled);
844 }
845
846 /*
847  * CPU initialisation.
848  */
849 static void hw_breakpoint_reset(void *unused)
850 {
851         int i;
852         struct perf_event **slots;
853         /*
854          * When a CPU goes through cold-boot, it does not have any installed
855          * slot, so it is safe to share the same function for restoring and
856          * resetting breakpoints; when a CPU is hotplugged in, it goes
857          * through the slots, which are all empty, hence it just resets control
858          * and value for debug registers.
859          * When this function is triggered on warm-boot through a CPU PM
860          * notifier some slots might be initialized; if so they are
861          * reprogrammed according to the debug slots content.
862          */
863         for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) {
864                 if (slots[i]) {
865                         hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
866                 } else {
867                         write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
868                         write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
869                 }
870         }
871
872         for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) {
873                 if (slots[i]) {
874                         hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
875                 } else {
876                         write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
877                         write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
878                 }
879         }
880 }
881
882 static int hw_breakpoint_reset_notify(struct notifier_block *self,
883                                                 unsigned long action,
884                                                 void *hcpu)
885 {
886         int cpu = (long)hcpu;
887         if (action == CPU_ONLINE)
888                 smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1);
889         return NOTIFY_OK;
890 }
891
892 static struct notifier_block hw_breakpoint_reset_nb = {
893         .notifier_call = hw_breakpoint_reset_notify,
894 };
895
896 #ifdef CONFIG_CPU_PM
897 extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *));
898 #else
899 static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
900 {
901 }
902 #endif
903
904 /*
905  * One-time initialisation.
906  */
907 static int __init arch_hw_breakpoint_init(void)
908 {
909         core_num_brps = get_num_brps();
910         core_num_wrps = get_num_wrps();
911
912         pr_info("found %d breakpoint and %d watchpoint registers.\n",
913                 core_num_brps, core_num_wrps);
914
915         cpu_notifier_register_begin();
916
917         /*
918          * Reset the breakpoint resources. We assume that a halting
919          * debugger will leave the world in a nice state for us.
920          */
921         smp_call_function(hw_breakpoint_reset, NULL, 1);
922         hw_breakpoint_reset(NULL);
923
924         /* Register debug fault handlers. */
925         hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
926                               TRAP_HWBKPT, "hw-breakpoint handler");
927         hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP,
928                               TRAP_HWBKPT, "hw-watchpoint handler");
929
930         /* Register hotplug notifier. */
931         __register_cpu_notifier(&hw_breakpoint_reset_nb);
932
933         cpu_notifier_register_done();
934
935         /* Register cpu_suspend hw breakpoint restore hook */
936         cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
937
938         return 0;
939 }
940 arch_initcall(arch_hw_breakpoint_init);
941
942 void hw_breakpoint_pmu_read(struct perf_event *bp)
943 {
944 }
945
946 /*
947  * Dummy function to register with die_notifier.
948  */
949 int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
950                                     unsigned long val, void *data)
951 {
952         return NOTIFY_DONE;
953 }