]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/arm64/kernel/hw_breakpoint.c
Merge tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty...
[karo-tx-linux.git] / arch / arm64 / kernel / hw_breakpoint.c
1 /*
2  * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
3  * using the CPU's debug registers.
4  *
5  * Copyright (C) 2012 ARM Limited
6  * Author: Will Deacon <will.deacon@arm.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #define pr_fmt(fmt) "hw-breakpoint: " fmt
22
23 #include <linux/compat.h>
24 #include <linux/cpu_pm.h>
25 #include <linux/errno.h>
26 #include <linux/hw_breakpoint.h>
27 #include <linux/perf_event.h>
28 #include <linux/ptrace.h>
29 #include <linux/smp.h>
30
31 #include <asm/current.h>
32 #include <asm/debug-monitors.h>
33 #include <asm/hw_breakpoint.h>
34 #include <asm/traps.h>
35 #include <asm/cputype.h>
36 #include <asm/system_misc.h>
37
38 /* Breakpoint currently in use for each BRP. */
39 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
40
41 /* Watchpoint currently in use for each WRP. */
42 static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
43
44 /* Currently stepping a per-CPU kernel breakpoint. */
45 static DEFINE_PER_CPU(int, stepping_kernel_bp);
46
47 /* Number of BRP/WRP registers on this CPU. */
48 static int core_num_brps;
49 static int core_num_wrps;
50
51 int hw_breakpoint_slots(int type)
52 {
53         /*
54          * We can be called early, so don't rely on
55          * our static variables being initialised.
56          */
57         switch (type) {
58         case TYPE_INST:
59                 return get_num_brps();
60         case TYPE_DATA:
61                 return get_num_wrps();
62         default:
63                 pr_warning("unknown slot type: %d\n", type);
64                 return 0;
65         }
66 }
67
68 #define READ_WB_REG_CASE(OFF, N, REG, VAL)      \
69         case (OFF + N):                         \
70                 AARCH64_DBG_READ(N, REG, VAL);  \
71                 break
72
73 #define WRITE_WB_REG_CASE(OFF, N, REG, VAL)     \
74         case (OFF + N):                         \
75                 AARCH64_DBG_WRITE(N, REG, VAL); \
76                 break
77
78 #define GEN_READ_WB_REG_CASES(OFF, REG, VAL)    \
79         READ_WB_REG_CASE(OFF,  0, REG, VAL);    \
80         READ_WB_REG_CASE(OFF,  1, REG, VAL);    \
81         READ_WB_REG_CASE(OFF,  2, REG, VAL);    \
82         READ_WB_REG_CASE(OFF,  3, REG, VAL);    \
83         READ_WB_REG_CASE(OFF,  4, REG, VAL);    \
84         READ_WB_REG_CASE(OFF,  5, REG, VAL);    \
85         READ_WB_REG_CASE(OFF,  6, REG, VAL);    \
86         READ_WB_REG_CASE(OFF,  7, REG, VAL);    \
87         READ_WB_REG_CASE(OFF,  8, REG, VAL);    \
88         READ_WB_REG_CASE(OFF,  9, REG, VAL);    \
89         READ_WB_REG_CASE(OFF, 10, REG, VAL);    \
90         READ_WB_REG_CASE(OFF, 11, REG, VAL);    \
91         READ_WB_REG_CASE(OFF, 12, REG, VAL);    \
92         READ_WB_REG_CASE(OFF, 13, REG, VAL);    \
93         READ_WB_REG_CASE(OFF, 14, REG, VAL);    \
94         READ_WB_REG_CASE(OFF, 15, REG, VAL)
95
96 #define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL)   \
97         WRITE_WB_REG_CASE(OFF,  0, REG, VAL);   \
98         WRITE_WB_REG_CASE(OFF,  1, REG, VAL);   \
99         WRITE_WB_REG_CASE(OFF,  2, REG, VAL);   \
100         WRITE_WB_REG_CASE(OFF,  3, REG, VAL);   \
101         WRITE_WB_REG_CASE(OFF,  4, REG, VAL);   \
102         WRITE_WB_REG_CASE(OFF,  5, REG, VAL);   \
103         WRITE_WB_REG_CASE(OFF,  6, REG, VAL);   \
104         WRITE_WB_REG_CASE(OFF,  7, REG, VAL);   \
105         WRITE_WB_REG_CASE(OFF,  8, REG, VAL);   \
106         WRITE_WB_REG_CASE(OFF,  9, REG, VAL);   \
107         WRITE_WB_REG_CASE(OFF, 10, REG, VAL);   \
108         WRITE_WB_REG_CASE(OFF, 11, REG, VAL);   \
109         WRITE_WB_REG_CASE(OFF, 12, REG, VAL);   \
110         WRITE_WB_REG_CASE(OFF, 13, REG, VAL);   \
111         WRITE_WB_REG_CASE(OFF, 14, REG, VAL);   \
112         WRITE_WB_REG_CASE(OFF, 15, REG, VAL)
113
114 static u64 read_wb_reg(int reg, int n)
115 {
116         u64 val = 0;
117
118         switch (reg + n) {
119         GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
120         GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
121         GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
122         GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
123         default:
124                 pr_warning("attempt to read from unknown breakpoint register %d\n", n);
125         }
126
127         return val;
128 }
129
130 static void write_wb_reg(int reg, int n, u64 val)
131 {
132         switch (reg + n) {
133         GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
134         GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
135         GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
136         GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
137         default:
138                 pr_warning("attempt to write to unknown breakpoint register %d\n", n);
139         }
140         isb();
141 }
142
143 /*
144  * Convert a breakpoint privilege level to the corresponding exception
145  * level.
146  */
147 static enum dbg_active_el debug_exception_level(int privilege)
148 {
149         switch (privilege) {
150         case AARCH64_BREAKPOINT_EL0:
151                 return DBG_ACTIVE_EL0;
152         case AARCH64_BREAKPOINT_EL1:
153                 return DBG_ACTIVE_EL1;
154         default:
155                 pr_warning("invalid breakpoint privilege level %d\n", privilege);
156                 return -EINVAL;
157         }
158 }
159
160 enum hw_breakpoint_ops {
161         HW_BREAKPOINT_INSTALL,
162         HW_BREAKPOINT_UNINSTALL,
163         HW_BREAKPOINT_RESTORE
164 };
165
166 /**
167  * hw_breakpoint_slot_setup - Find and setup a perf slot according to
168  *                            operations
169  *
170  * @slots: pointer to array of slots
171  * @max_slots: max number of slots
172  * @bp: perf_event to setup
173  * @ops: operation to be carried out on the slot
174  *
175  * Return:
176  *      slot index on success
177  *      -ENOSPC if no slot is available/matches
178  *      -EINVAL on wrong operations parameter
179  */
180 static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
181                                     struct perf_event *bp,
182                                     enum hw_breakpoint_ops ops)
183 {
184         int i;
185         struct perf_event **slot;
186
187         for (i = 0; i < max_slots; ++i) {
188                 slot = &slots[i];
189                 switch (ops) {
190                 case HW_BREAKPOINT_INSTALL:
191                         if (!*slot) {
192                                 *slot = bp;
193                                 return i;
194                         }
195                         break;
196                 case HW_BREAKPOINT_UNINSTALL:
197                         if (*slot == bp) {
198                                 *slot = NULL;
199                                 return i;
200                         }
201                         break;
202                 case HW_BREAKPOINT_RESTORE:
203                         if (*slot == bp)
204                                 return i;
205                         break;
206                 default:
207                         pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
208                         return -EINVAL;
209                 }
210         }
211         return -ENOSPC;
212 }
213
214 static int hw_breakpoint_control(struct perf_event *bp,
215                                  enum hw_breakpoint_ops ops)
216 {
217         struct arch_hw_breakpoint *info = counter_arch_bp(bp);
218         struct perf_event **slots;
219         struct debug_info *debug_info = &current->thread.debug;
220         int i, max_slots, ctrl_reg, val_reg, reg_enable;
221         enum dbg_active_el dbg_el = debug_exception_level(info->ctrl.privilege);
222         u32 ctrl;
223
224         if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
225                 /* Breakpoint */
226                 ctrl_reg = AARCH64_DBG_REG_BCR;
227                 val_reg = AARCH64_DBG_REG_BVR;
228                 slots = this_cpu_ptr(bp_on_reg);
229                 max_slots = core_num_brps;
230                 reg_enable = !debug_info->bps_disabled;
231         } else {
232                 /* Watchpoint */
233                 ctrl_reg = AARCH64_DBG_REG_WCR;
234                 val_reg = AARCH64_DBG_REG_WVR;
235                 slots = this_cpu_ptr(wp_on_reg);
236                 max_slots = core_num_wrps;
237                 reg_enable = !debug_info->wps_disabled;
238         }
239
240         i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
241
242         if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
243                 return i;
244
245         switch (ops) {
246         case HW_BREAKPOINT_INSTALL:
247                 /*
248                  * Ensure debug monitors are enabled at the correct exception
249                  * level.
250                  */
251                 enable_debug_monitors(dbg_el);
252                 /* Fall through */
253         case HW_BREAKPOINT_RESTORE:
254                 /* Setup the address register. */
255                 write_wb_reg(val_reg, i, info->address);
256
257                 /* Setup the control register. */
258                 ctrl = encode_ctrl_reg(info->ctrl);
259                 write_wb_reg(ctrl_reg, i,
260                              reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
261                 break;
262         case HW_BREAKPOINT_UNINSTALL:
263                 /* Reset the control register. */
264                 write_wb_reg(ctrl_reg, i, 0);
265
266                 /*
267                  * Release the debug monitors for the correct exception
268                  * level.
269                  */
270                 disable_debug_monitors(dbg_el);
271                 break;
272         }
273
274         return 0;
275 }
276
277 /*
278  * Install a perf counter breakpoint.
279  */
280 int arch_install_hw_breakpoint(struct perf_event *bp)
281 {
282         return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
283 }
284
285 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
286 {
287         hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
288 }
289
290 static int get_hbp_len(u8 hbp_len)
291 {
292         unsigned int len_in_bytes = 0;
293
294         switch (hbp_len) {
295         case ARM_BREAKPOINT_LEN_1:
296                 len_in_bytes = 1;
297                 break;
298         case ARM_BREAKPOINT_LEN_2:
299                 len_in_bytes = 2;
300                 break;
301         case ARM_BREAKPOINT_LEN_4:
302                 len_in_bytes = 4;
303                 break;
304         case ARM_BREAKPOINT_LEN_8:
305                 len_in_bytes = 8;
306                 break;
307         }
308
309         return len_in_bytes;
310 }
311
312 /*
313  * Check whether bp virtual address is in kernel space.
314  */
315 int arch_check_bp_in_kernelspace(struct perf_event *bp)
316 {
317         unsigned int len;
318         unsigned long va;
319         struct arch_hw_breakpoint *info = counter_arch_bp(bp);
320
321         va = info->address;
322         len = get_hbp_len(info->ctrl.len);
323
324         return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
325 }
326
327 /*
328  * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
329  * Hopefully this will disappear when ptrace can bypass the conversion
330  * to generic breakpoint descriptions.
331  */
332 int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
333                            int *gen_len, int *gen_type)
334 {
335         /* Type */
336         switch (ctrl.type) {
337         case ARM_BREAKPOINT_EXECUTE:
338                 *gen_type = HW_BREAKPOINT_X;
339                 break;
340         case ARM_BREAKPOINT_LOAD:
341                 *gen_type = HW_BREAKPOINT_R;
342                 break;
343         case ARM_BREAKPOINT_STORE:
344                 *gen_type = HW_BREAKPOINT_W;
345                 break;
346         case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
347                 *gen_type = HW_BREAKPOINT_RW;
348                 break;
349         default:
350                 return -EINVAL;
351         }
352
353         /* Len */
354         switch (ctrl.len) {
355         case ARM_BREAKPOINT_LEN_1:
356                 *gen_len = HW_BREAKPOINT_LEN_1;
357                 break;
358         case ARM_BREAKPOINT_LEN_2:
359                 *gen_len = HW_BREAKPOINT_LEN_2;
360                 break;
361         case ARM_BREAKPOINT_LEN_4:
362                 *gen_len = HW_BREAKPOINT_LEN_4;
363                 break;
364         case ARM_BREAKPOINT_LEN_8:
365                 *gen_len = HW_BREAKPOINT_LEN_8;
366                 break;
367         default:
368                 return -EINVAL;
369         }
370
371         return 0;
372 }
373
374 /*
375  * Construct an arch_hw_breakpoint from a perf_event.
376  */
377 static int arch_build_bp_info(struct perf_event *bp)
378 {
379         struct arch_hw_breakpoint *info = counter_arch_bp(bp);
380
381         /* Type */
382         switch (bp->attr.bp_type) {
383         case HW_BREAKPOINT_X:
384                 info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
385                 break;
386         case HW_BREAKPOINT_R:
387                 info->ctrl.type = ARM_BREAKPOINT_LOAD;
388                 break;
389         case HW_BREAKPOINT_W:
390                 info->ctrl.type = ARM_BREAKPOINT_STORE;
391                 break;
392         case HW_BREAKPOINT_RW:
393                 info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
394                 break;
395         default:
396                 return -EINVAL;
397         }
398
399         /* Len */
400         switch (bp->attr.bp_len) {
401         case HW_BREAKPOINT_LEN_1:
402                 info->ctrl.len = ARM_BREAKPOINT_LEN_1;
403                 break;
404         case HW_BREAKPOINT_LEN_2:
405                 info->ctrl.len = ARM_BREAKPOINT_LEN_2;
406                 break;
407         case HW_BREAKPOINT_LEN_4:
408                 info->ctrl.len = ARM_BREAKPOINT_LEN_4;
409                 break;
410         case HW_BREAKPOINT_LEN_8:
411                 info->ctrl.len = ARM_BREAKPOINT_LEN_8;
412                 break;
413         default:
414                 return -EINVAL;
415         }
416
417         /*
418          * On AArch64, we only permit breakpoints of length 4, whereas
419          * AArch32 also requires breakpoints of length 2 for Thumb.
420          * Watchpoints can be of length 1, 2, 4 or 8 bytes.
421          */
422         if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
423                 if (is_compat_task()) {
424                         if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
425                             info->ctrl.len != ARM_BREAKPOINT_LEN_4)
426                                 return -EINVAL;
427                 } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) {
428                         /*
429                          * FIXME: Some tools (I'm looking at you perf) assume
430                          *        that breakpoints should be sizeof(long). This
431                          *        is nonsense. For now, we fix up the parameter
432                          *        but we should probably return -EINVAL instead.
433                          */
434                         info->ctrl.len = ARM_BREAKPOINT_LEN_4;
435                 }
436         }
437
438         /* Address */
439         info->address = bp->attr.bp_addr;
440
441         /*
442          * Privilege
443          * Note that we disallow combined EL0/EL1 breakpoints because
444          * that would complicate the stepping code.
445          */
446         if (arch_check_bp_in_kernelspace(bp))
447                 info->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
448         else
449                 info->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
450
451         /* Enabled? */
452         info->ctrl.enabled = !bp->attr.disabled;
453
454         return 0;
455 }
456
457 /*
458  * Validate the arch-specific HW Breakpoint register settings.
459  */
460 int arch_validate_hwbkpt_settings(struct perf_event *bp)
461 {
462         struct arch_hw_breakpoint *info = counter_arch_bp(bp);
463         int ret;
464         u64 alignment_mask, offset;
465
466         /* Build the arch_hw_breakpoint. */
467         ret = arch_build_bp_info(bp);
468         if (ret)
469                 return ret;
470
471         /*
472          * Check address alignment.
473          * We don't do any clever alignment correction for watchpoints
474          * because using 64-bit unaligned addresses is deprecated for
475          * AArch64.
476          *
477          * AArch32 tasks expect some simple alignment fixups, so emulate
478          * that here.
479          */
480         if (is_compat_task()) {
481                 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
482                         alignment_mask = 0x7;
483                 else
484                         alignment_mask = 0x3;
485                 offset = info->address & alignment_mask;
486                 switch (offset) {
487                 case 0:
488                         /* Aligned */
489                         break;
490                 case 1:
491                         /* Allow single byte watchpoint. */
492                         if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
493                                 break;
494                 case 2:
495                         /* Allow halfword watchpoints and breakpoints. */
496                         if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
497                                 break;
498                 default:
499                         return -EINVAL;
500                 }
501
502                 info->address &= ~alignment_mask;
503                 info->ctrl.len <<= offset;
504         } else {
505                 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE)
506                         alignment_mask = 0x3;
507                 else
508                         alignment_mask = 0x7;
509                 if (info->address & alignment_mask)
510                         return -EINVAL;
511         }
512
513         /*
514          * Disallow per-task kernel breakpoints since these would
515          * complicate the stepping code.
516          */
517         if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
518                 return -EINVAL;
519
520         return 0;
521 }
522
523 /*
524  * Enable/disable all of the breakpoints active at the specified
525  * exception level at the register level.
526  * This is used when single-stepping after a breakpoint exception.
527  */
528 static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
529 {
530         int i, max_slots, privilege;
531         u32 ctrl;
532         struct perf_event **slots;
533
534         switch (reg) {
535         case AARCH64_DBG_REG_BCR:
536                 slots = this_cpu_ptr(bp_on_reg);
537                 max_slots = core_num_brps;
538                 break;
539         case AARCH64_DBG_REG_WCR:
540                 slots = this_cpu_ptr(wp_on_reg);
541                 max_slots = core_num_wrps;
542                 break;
543         default:
544                 return;
545         }
546
547         for (i = 0; i < max_slots; ++i) {
548                 if (!slots[i])
549                         continue;
550
551                 privilege = counter_arch_bp(slots[i])->ctrl.privilege;
552                 if (debug_exception_level(privilege) != el)
553                         continue;
554
555                 ctrl = read_wb_reg(reg, i);
556                 if (enable)
557                         ctrl |= 0x1;
558                 else
559                         ctrl &= ~0x1;
560                 write_wb_reg(reg, i, ctrl);
561         }
562 }
563
564 /*
565  * Debug exception handlers.
566  */
567 static int breakpoint_handler(unsigned long unused, unsigned int esr,
568                               struct pt_regs *regs)
569 {
570         int i, step = 0, *kernel_step;
571         u32 ctrl_reg;
572         u64 addr, val;
573         struct perf_event *bp, **slots;
574         struct debug_info *debug_info;
575         struct arch_hw_breakpoint_ctrl ctrl;
576
577         slots = this_cpu_ptr(bp_on_reg);
578         addr = instruction_pointer(regs);
579         debug_info = &current->thread.debug;
580
581         for (i = 0; i < core_num_brps; ++i) {
582                 rcu_read_lock();
583
584                 bp = slots[i];
585
586                 if (bp == NULL)
587                         goto unlock;
588
589                 /* Check if the breakpoint value matches. */
590                 val = read_wb_reg(AARCH64_DBG_REG_BVR, i);
591                 if (val != (addr & ~0x3))
592                         goto unlock;
593
594                 /* Possible match, check the byte address select to confirm. */
595                 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i);
596                 decode_ctrl_reg(ctrl_reg, &ctrl);
597                 if (!((1 << (addr & 0x3)) & ctrl.len))
598                         goto unlock;
599
600                 counter_arch_bp(bp)->trigger = addr;
601                 perf_bp_event(bp, regs);
602
603                 /* Do we need to handle the stepping? */
604                 if (!bp->overflow_handler)
605                         step = 1;
606 unlock:
607                 rcu_read_unlock();
608         }
609
610         if (!step)
611                 return 0;
612
613         if (user_mode(regs)) {
614                 debug_info->bps_disabled = 1;
615                 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0);
616
617                 /* If we're already stepping a watchpoint, just return. */
618                 if (debug_info->wps_disabled)
619                         return 0;
620
621                 if (test_thread_flag(TIF_SINGLESTEP))
622                         debug_info->suspended_step = 1;
623                 else
624                         user_enable_single_step(current);
625         } else {
626                 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
627                 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
628
629                 if (*kernel_step != ARM_KERNEL_STEP_NONE)
630                         return 0;
631
632                 if (kernel_active_single_step()) {
633                         *kernel_step = ARM_KERNEL_STEP_SUSPEND;
634                 } else {
635                         *kernel_step = ARM_KERNEL_STEP_ACTIVE;
636                         kernel_enable_single_step(regs);
637                 }
638         }
639
640         return 0;
641 }
642
643 static int watchpoint_handler(unsigned long addr, unsigned int esr,
644                               struct pt_regs *regs)
645 {
646         int i, step = 0, *kernel_step, access;
647         u32 ctrl_reg;
648         u64 val, alignment_mask;
649         struct perf_event *wp, **slots;
650         struct debug_info *debug_info;
651         struct arch_hw_breakpoint *info;
652         struct arch_hw_breakpoint_ctrl ctrl;
653
654         slots = this_cpu_ptr(wp_on_reg);
655         debug_info = &current->thread.debug;
656
657         for (i = 0; i < core_num_wrps; ++i) {
658                 rcu_read_lock();
659
660                 wp = slots[i];
661
662                 if (wp == NULL)
663                         goto unlock;
664
665                 info = counter_arch_bp(wp);
666                 /* AArch32 watchpoints are either 4 or 8 bytes aligned. */
667                 if (is_compat_task()) {
668                         if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
669                                 alignment_mask = 0x7;
670                         else
671                                 alignment_mask = 0x3;
672                 } else {
673                         alignment_mask = 0x7;
674                 }
675
676                 /* Check if the watchpoint value matches. */
677                 val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
678                 if (val != (addr & ~alignment_mask))
679                         goto unlock;
680
681                 /* Possible match, check the byte address select to confirm. */
682                 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
683                 decode_ctrl_reg(ctrl_reg, &ctrl);
684                 if (!((1 << (addr & alignment_mask)) & ctrl.len))
685                         goto unlock;
686
687                 /*
688                  * Check that the access type matches.
689                  * 0 => load, otherwise => store
690                  */
691                 access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
692                          HW_BREAKPOINT_R;
693                 if (!(access & hw_breakpoint_type(wp)))
694                         goto unlock;
695
696                 info->trigger = addr;
697                 perf_bp_event(wp, regs);
698
699                 /* Do we need to handle the stepping? */
700                 if (!wp->overflow_handler)
701                         step = 1;
702
703 unlock:
704                 rcu_read_unlock();
705         }
706
707         if (!step)
708                 return 0;
709
710         /*
711          * We always disable EL0 watchpoints because the kernel can
712          * cause these to fire via an unprivileged access.
713          */
714         toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 0);
715
716         if (user_mode(regs)) {
717                 debug_info->wps_disabled = 1;
718
719                 /* If we're already stepping a breakpoint, just return. */
720                 if (debug_info->bps_disabled)
721                         return 0;
722
723                 if (test_thread_flag(TIF_SINGLESTEP))
724                         debug_info->suspended_step = 1;
725                 else
726                         user_enable_single_step(current);
727         } else {
728                 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
729                 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
730
731                 if (*kernel_step != ARM_KERNEL_STEP_NONE)
732                         return 0;
733
734                 if (kernel_active_single_step()) {
735                         *kernel_step = ARM_KERNEL_STEP_SUSPEND;
736                 } else {
737                         *kernel_step = ARM_KERNEL_STEP_ACTIVE;
738                         kernel_enable_single_step(regs);
739                 }
740         }
741
742         return 0;
743 }
744
745 /*
746  * Handle single-step exception.
747  */
748 int reinstall_suspended_bps(struct pt_regs *regs)
749 {
750         struct debug_info *debug_info = &current->thread.debug;
751         int handled_exception = 0, *kernel_step;
752
753         kernel_step = this_cpu_ptr(&stepping_kernel_bp);
754
755         /*
756          * Called from single-step exception handler.
757          * Return 0 if execution can resume, 1 if a SIGTRAP should be
758          * reported.
759          */
760         if (user_mode(regs)) {
761                 if (debug_info->bps_disabled) {
762                         debug_info->bps_disabled = 0;
763                         toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1);
764                         handled_exception = 1;
765                 }
766
767                 if (debug_info->wps_disabled) {
768                         debug_info->wps_disabled = 0;
769                         toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
770                         handled_exception = 1;
771                 }
772
773                 if (handled_exception) {
774                         if (debug_info->suspended_step) {
775                                 debug_info->suspended_step = 0;
776                                 /* Allow exception handling to fall-through. */
777                                 handled_exception = 0;
778                         } else {
779                                 user_disable_single_step(current);
780                         }
781                 }
782         } else if (*kernel_step != ARM_KERNEL_STEP_NONE) {
783                 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 1);
784                 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 1);
785
786                 if (!debug_info->wps_disabled)
787                         toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
788
789                 if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) {
790                         kernel_disable_single_step();
791                         handled_exception = 1;
792                 } else {
793                         handled_exception = 0;
794                 }
795
796                 *kernel_step = ARM_KERNEL_STEP_NONE;
797         }
798
799         return !handled_exception;
800 }
801
802 /*
803  * Context-switcher for restoring suspended breakpoints.
804  */
805 void hw_breakpoint_thread_switch(struct task_struct *next)
806 {
807         /*
808          *           current        next
809          * disabled: 0              0     => The usual case, NOTIFY_DONE
810          *           0              1     => Disable the registers
811          *           1              0     => Enable the registers
812          *           1              1     => NOTIFY_DONE. per-task bps will
813          *                                   get taken care of by perf.
814          */
815
816         struct debug_info *current_debug_info, *next_debug_info;
817
818         current_debug_info = &current->thread.debug;
819         next_debug_info = &next->thread.debug;
820
821         /* Update breakpoints. */
822         if (current_debug_info->bps_disabled != next_debug_info->bps_disabled)
823                 toggle_bp_registers(AARCH64_DBG_REG_BCR,
824                                     DBG_ACTIVE_EL0,
825                                     !next_debug_info->bps_disabled);
826
827         /* Update watchpoints. */
828         if (current_debug_info->wps_disabled != next_debug_info->wps_disabled)
829                 toggle_bp_registers(AARCH64_DBG_REG_WCR,
830                                     DBG_ACTIVE_EL0,
831                                     !next_debug_info->wps_disabled);
832 }
833
834 /*
835  * CPU initialisation.
836  */
837 static void hw_breakpoint_reset(void *unused)
838 {
839         int i;
840         struct perf_event **slots;
841         /*
842          * When a CPU goes through cold-boot, it does not have any installed
843          * slot, so it is safe to share the same function for restoring and
844          * resetting breakpoints; when a CPU is hotplugged in, it goes
845          * through the slots, which are all empty, hence it just resets control
846          * and value for debug registers.
847          * When this function is triggered on warm-boot through a CPU PM
848          * notifier some slots might be initialized; if so they are
849          * reprogrammed according to the debug slots content.
850          */
851         for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) {
852                 if (slots[i]) {
853                         hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
854                 } else {
855                         write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
856                         write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
857                 }
858         }
859
860         for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) {
861                 if (slots[i]) {
862                         hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
863                 } else {
864                         write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
865                         write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
866                 }
867         }
868 }
869
870 static int hw_breakpoint_reset_notify(struct notifier_block *self,
871                                                 unsigned long action,
872                                                 void *hcpu)
873 {
874         int cpu = (long)hcpu;
875         if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
876                 smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1);
877         return NOTIFY_OK;
878 }
879
880 static struct notifier_block hw_breakpoint_reset_nb = {
881         .notifier_call = hw_breakpoint_reset_notify,
882 };
883
884 #ifdef CONFIG_CPU_PM
885 extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *));
886 #else
887 static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
888 {
889 }
890 #endif
891
892 /*
893  * One-time initialisation.
894  */
895 static int __init arch_hw_breakpoint_init(void)
896 {
897         core_num_brps = get_num_brps();
898         core_num_wrps = get_num_wrps();
899
900         pr_info("found %d breakpoint and %d watchpoint registers.\n",
901                 core_num_brps, core_num_wrps);
902
903         cpu_notifier_register_begin();
904
905         /*
906          * Reset the breakpoint resources. We assume that a halting
907          * debugger will leave the world in a nice state for us.
908          */
909         smp_call_function(hw_breakpoint_reset, NULL, 1);
910         hw_breakpoint_reset(NULL);
911
912         /* Register debug fault handlers. */
913         hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
914                               TRAP_HWBKPT, "hw-breakpoint handler");
915         hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP,
916                               TRAP_HWBKPT, "hw-watchpoint handler");
917
918         /* Register hotplug notifier. */
919         __register_cpu_notifier(&hw_breakpoint_reset_nb);
920
921         cpu_notifier_register_done();
922
923         /* Register cpu_suspend hw breakpoint restore hook */
924         cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
925
926         return 0;
927 }
928 arch_initcall(arch_hw_breakpoint_init);
929
930 void hw_breakpoint_pmu_read(struct perf_event *bp)
931 {
932 }
933
934 /*
935  * Dummy function to register with die_notifier.
936  */
937 int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
938                                     unsigned long val, void *data)
939 {
940         return NOTIFY_DONE;
941 }