2 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
3 * using the CPU's debug registers.
5 * Copyright (C) 2012 ARM Limited
6 * Author: Will Deacon <will.deacon@arm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #define pr_fmt(fmt) "hw-breakpoint: " fmt
23 #include <linux/compat.h>
24 #include <linux/cpu_pm.h>
25 #include <linux/errno.h>
26 #include <linux/hw_breakpoint.h>
27 #include <linux/perf_event.h>
28 #include <linux/ptrace.h>
29 #include <linux/smp.h>
31 #include <asm/current.h>
32 #include <asm/debug-monitors.h>
33 #include <asm/hw_breakpoint.h>
34 #include <asm/traps.h>
35 #include <asm/cputype.h>
36 #include <asm/system_misc.h>
38 /* Breakpoint currently in use for each BRP. */
39 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
41 /* Watchpoint currently in use for each WRP. */
42 static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
44 /* Currently stepping a per-CPU kernel breakpoint. */
45 static DEFINE_PER_CPU(int, stepping_kernel_bp);
47 /* Number of BRP/WRP registers on this CPU. */
48 static int core_num_brps;
49 static int core_num_wrps;
51 int hw_breakpoint_slots(int type)
54 * We can be called early, so don't rely on
55 * our static variables being initialised.
59 return get_num_brps();
61 return get_num_wrps();
63 pr_warning("unknown slot type: %d\n", type);
68 #define READ_WB_REG_CASE(OFF, N, REG, VAL) \
70 AARCH64_DBG_READ(N, REG, VAL); \
73 #define WRITE_WB_REG_CASE(OFF, N, REG, VAL) \
75 AARCH64_DBG_WRITE(N, REG, VAL); \
78 #define GEN_READ_WB_REG_CASES(OFF, REG, VAL) \
79 READ_WB_REG_CASE(OFF, 0, REG, VAL); \
80 READ_WB_REG_CASE(OFF, 1, REG, VAL); \
81 READ_WB_REG_CASE(OFF, 2, REG, VAL); \
82 READ_WB_REG_CASE(OFF, 3, REG, VAL); \
83 READ_WB_REG_CASE(OFF, 4, REG, VAL); \
84 READ_WB_REG_CASE(OFF, 5, REG, VAL); \
85 READ_WB_REG_CASE(OFF, 6, REG, VAL); \
86 READ_WB_REG_CASE(OFF, 7, REG, VAL); \
87 READ_WB_REG_CASE(OFF, 8, REG, VAL); \
88 READ_WB_REG_CASE(OFF, 9, REG, VAL); \
89 READ_WB_REG_CASE(OFF, 10, REG, VAL); \
90 READ_WB_REG_CASE(OFF, 11, REG, VAL); \
91 READ_WB_REG_CASE(OFF, 12, REG, VAL); \
92 READ_WB_REG_CASE(OFF, 13, REG, VAL); \
93 READ_WB_REG_CASE(OFF, 14, REG, VAL); \
94 READ_WB_REG_CASE(OFF, 15, REG, VAL)
96 #define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL) \
97 WRITE_WB_REG_CASE(OFF, 0, REG, VAL); \
98 WRITE_WB_REG_CASE(OFF, 1, REG, VAL); \
99 WRITE_WB_REG_CASE(OFF, 2, REG, VAL); \
100 WRITE_WB_REG_CASE(OFF, 3, REG, VAL); \
101 WRITE_WB_REG_CASE(OFF, 4, REG, VAL); \
102 WRITE_WB_REG_CASE(OFF, 5, REG, VAL); \
103 WRITE_WB_REG_CASE(OFF, 6, REG, VAL); \
104 WRITE_WB_REG_CASE(OFF, 7, REG, VAL); \
105 WRITE_WB_REG_CASE(OFF, 8, REG, VAL); \
106 WRITE_WB_REG_CASE(OFF, 9, REG, VAL); \
107 WRITE_WB_REG_CASE(OFF, 10, REG, VAL); \
108 WRITE_WB_REG_CASE(OFF, 11, REG, VAL); \
109 WRITE_WB_REG_CASE(OFF, 12, REG, VAL); \
110 WRITE_WB_REG_CASE(OFF, 13, REG, VAL); \
111 WRITE_WB_REG_CASE(OFF, 14, REG, VAL); \
112 WRITE_WB_REG_CASE(OFF, 15, REG, VAL)
114 static u64 read_wb_reg(int reg, int n)
119 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
120 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
121 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
122 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
124 pr_warning("attempt to read from unknown breakpoint register %d\n", n);
130 static void write_wb_reg(int reg, int n, u64 val)
133 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
134 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
135 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
136 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
138 pr_warning("attempt to write to unknown breakpoint register %d\n", n);
144 * Convert a breakpoint privilege level to the corresponding exception
147 static enum dbg_active_el debug_exception_level(int privilege)
150 case AARCH64_BREAKPOINT_EL0:
151 return DBG_ACTIVE_EL0;
152 case AARCH64_BREAKPOINT_EL1:
153 return DBG_ACTIVE_EL1;
155 pr_warning("invalid breakpoint privilege level %d\n", privilege);
160 enum hw_breakpoint_ops {
161 HW_BREAKPOINT_INSTALL,
162 HW_BREAKPOINT_UNINSTALL,
163 HW_BREAKPOINT_RESTORE
166 static int is_compat_bp(struct perf_event *bp)
168 struct task_struct *tsk = bp->hw.target;
171 * tsk can be NULL for per-cpu (non-ptrace) breakpoints.
172 * In this case, use the native interface, since we don't have
173 * the notion of a "compat CPU" and could end up relying on
174 * deprecated behaviour if we use unaligned watchpoints in
177 return tsk && is_compat_thread(task_thread_info(tsk));
181 * hw_breakpoint_slot_setup - Find and setup a perf slot according to
184 * @slots: pointer to array of slots
185 * @max_slots: max number of slots
186 * @bp: perf_event to setup
187 * @ops: operation to be carried out on the slot
190 * slot index on success
191 * -ENOSPC if no slot is available/matches
192 * -EINVAL on wrong operations parameter
194 static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
195 struct perf_event *bp,
196 enum hw_breakpoint_ops ops)
199 struct perf_event **slot;
201 for (i = 0; i < max_slots; ++i) {
204 case HW_BREAKPOINT_INSTALL:
210 case HW_BREAKPOINT_UNINSTALL:
216 case HW_BREAKPOINT_RESTORE:
221 pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
228 static int hw_breakpoint_control(struct perf_event *bp,
229 enum hw_breakpoint_ops ops)
231 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
232 struct perf_event **slots;
233 struct debug_info *debug_info = ¤t->thread.debug;
234 int i, max_slots, ctrl_reg, val_reg, reg_enable;
235 enum dbg_active_el dbg_el = debug_exception_level(info->ctrl.privilege);
238 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
240 ctrl_reg = AARCH64_DBG_REG_BCR;
241 val_reg = AARCH64_DBG_REG_BVR;
242 slots = this_cpu_ptr(bp_on_reg);
243 max_slots = core_num_brps;
244 reg_enable = !debug_info->bps_disabled;
247 ctrl_reg = AARCH64_DBG_REG_WCR;
248 val_reg = AARCH64_DBG_REG_WVR;
249 slots = this_cpu_ptr(wp_on_reg);
250 max_slots = core_num_wrps;
251 reg_enable = !debug_info->wps_disabled;
254 i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
256 if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
260 case HW_BREAKPOINT_INSTALL:
262 * Ensure debug monitors are enabled at the correct exception
265 enable_debug_monitors(dbg_el);
267 case HW_BREAKPOINT_RESTORE:
268 /* Setup the address register. */
269 write_wb_reg(val_reg, i, info->address);
271 /* Setup the control register. */
272 ctrl = encode_ctrl_reg(info->ctrl);
273 write_wb_reg(ctrl_reg, i,
274 reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
276 case HW_BREAKPOINT_UNINSTALL:
277 /* Reset the control register. */
278 write_wb_reg(ctrl_reg, i, 0);
281 * Release the debug monitors for the correct exception
284 disable_debug_monitors(dbg_el);
292 * Install a perf counter breakpoint.
294 int arch_install_hw_breakpoint(struct perf_event *bp)
296 return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
299 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
301 hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
304 static int get_hbp_len(u8 hbp_len)
306 unsigned int len_in_bytes = 0;
309 case ARM_BREAKPOINT_LEN_1:
312 case ARM_BREAKPOINT_LEN_2:
315 case ARM_BREAKPOINT_LEN_4:
318 case ARM_BREAKPOINT_LEN_8:
327 * Check whether bp virtual address is in kernel space.
329 int arch_check_bp_in_kernelspace(struct perf_event *bp)
333 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
336 len = get_hbp_len(info->ctrl.len);
338 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
342 * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
343 * Hopefully this will disappear when ptrace can bypass the conversion
344 * to generic breakpoint descriptions.
346 int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
347 int *gen_len, int *gen_type)
351 case ARM_BREAKPOINT_EXECUTE:
352 *gen_type = HW_BREAKPOINT_X;
354 case ARM_BREAKPOINT_LOAD:
355 *gen_type = HW_BREAKPOINT_R;
357 case ARM_BREAKPOINT_STORE:
358 *gen_type = HW_BREAKPOINT_W;
360 case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
361 *gen_type = HW_BREAKPOINT_RW;
369 case ARM_BREAKPOINT_LEN_1:
370 *gen_len = HW_BREAKPOINT_LEN_1;
372 case ARM_BREAKPOINT_LEN_2:
373 *gen_len = HW_BREAKPOINT_LEN_2;
375 case ARM_BREAKPOINT_LEN_4:
376 *gen_len = HW_BREAKPOINT_LEN_4;
378 case ARM_BREAKPOINT_LEN_8:
379 *gen_len = HW_BREAKPOINT_LEN_8;
389 * Construct an arch_hw_breakpoint from a perf_event.
391 static int arch_build_bp_info(struct perf_event *bp)
393 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
396 switch (bp->attr.bp_type) {
397 case HW_BREAKPOINT_X:
398 info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
400 case HW_BREAKPOINT_R:
401 info->ctrl.type = ARM_BREAKPOINT_LOAD;
403 case HW_BREAKPOINT_W:
404 info->ctrl.type = ARM_BREAKPOINT_STORE;
406 case HW_BREAKPOINT_RW:
407 info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
414 switch (bp->attr.bp_len) {
415 case HW_BREAKPOINT_LEN_1:
416 info->ctrl.len = ARM_BREAKPOINT_LEN_1;
418 case HW_BREAKPOINT_LEN_2:
419 info->ctrl.len = ARM_BREAKPOINT_LEN_2;
421 case HW_BREAKPOINT_LEN_4:
422 info->ctrl.len = ARM_BREAKPOINT_LEN_4;
424 case HW_BREAKPOINT_LEN_8:
425 info->ctrl.len = ARM_BREAKPOINT_LEN_8;
432 * On AArch64, we only permit breakpoints of length 4, whereas
433 * AArch32 also requires breakpoints of length 2 for Thumb.
434 * Watchpoints can be of length 1, 2, 4 or 8 bytes.
436 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
437 if (is_compat_bp(bp)) {
438 if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
439 info->ctrl.len != ARM_BREAKPOINT_LEN_4)
441 } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) {
443 * FIXME: Some tools (I'm looking at you perf) assume
444 * that breakpoints should be sizeof(long). This
445 * is nonsense. For now, we fix up the parameter
446 * but we should probably return -EINVAL instead.
448 info->ctrl.len = ARM_BREAKPOINT_LEN_4;
453 info->address = bp->attr.bp_addr;
457 * Note that we disallow combined EL0/EL1 breakpoints because
458 * that would complicate the stepping code.
460 if (arch_check_bp_in_kernelspace(bp))
461 info->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
463 info->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
466 info->ctrl.enabled = !bp->attr.disabled;
472 * Validate the arch-specific HW Breakpoint register settings.
474 int arch_validate_hwbkpt_settings(struct perf_event *bp)
476 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
478 u64 alignment_mask, offset;
480 /* Build the arch_hw_breakpoint. */
481 ret = arch_build_bp_info(bp);
486 * Check address alignment.
487 * We don't do any clever alignment correction for watchpoints
488 * because using 64-bit unaligned addresses is deprecated for
491 * AArch32 tasks expect some simple alignment fixups, so emulate
494 if (is_compat_bp(bp)) {
495 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
496 alignment_mask = 0x7;
498 alignment_mask = 0x3;
499 offset = info->address & alignment_mask;
505 /* Allow single byte watchpoint. */
506 if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
509 /* Allow halfword watchpoints and breakpoints. */
510 if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
516 info->address &= ~alignment_mask;
517 info->ctrl.len <<= offset;
519 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE)
520 alignment_mask = 0x3;
522 alignment_mask = 0x7;
523 if (info->address & alignment_mask)
528 * Disallow per-task kernel breakpoints since these would
529 * complicate the stepping code.
531 if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
538 * Enable/disable all of the breakpoints active at the specified
539 * exception level at the register level.
540 * This is used when single-stepping after a breakpoint exception.
542 static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
544 int i, max_slots, privilege;
546 struct perf_event **slots;
549 case AARCH64_DBG_REG_BCR:
550 slots = this_cpu_ptr(bp_on_reg);
551 max_slots = core_num_brps;
553 case AARCH64_DBG_REG_WCR:
554 slots = this_cpu_ptr(wp_on_reg);
555 max_slots = core_num_wrps;
561 for (i = 0; i < max_slots; ++i) {
565 privilege = counter_arch_bp(slots[i])->ctrl.privilege;
566 if (debug_exception_level(privilege) != el)
569 ctrl = read_wb_reg(reg, i);
574 write_wb_reg(reg, i, ctrl);
579 * Debug exception handlers.
581 static int breakpoint_handler(unsigned long unused, unsigned int esr,
582 struct pt_regs *regs)
584 int i, step = 0, *kernel_step;
587 struct perf_event *bp, **slots;
588 struct debug_info *debug_info;
589 struct arch_hw_breakpoint_ctrl ctrl;
591 slots = this_cpu_ptr(bp_on_reg);
592 addr = instruction_pointer(regs);
593 debug_info = ¤t->thread.debug;
595 for (i = 0; i < core_num_brps; ++i) {
603 /* Check if the breakpoint value matches. */
604 val = read_wb_reg(AARCH64_DBG_REG_BVR, i);
605 if (val != (addr & ~0x3))
608 /* Possible match, check the byte address select to confirm. */
609 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i);
610 decode_ctrl_reg(ctrl_reg, &ctrl);
611 if (!((1 << (addr & 0x3)) & ctrl.len))
614 counter_arch_bp(bp)->trigger = addr;
615 perf_bp_event(bp, regs);
617 /* Do we need to handle the stepping? */
618 if (!bp->overflow_handler)
627 if (user_mode(regs)) {
628 debug_info->bps_disabled = 1;
629 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0);
631 /* If we're already stepping a watchpoint, just return. */
632 if (debug_info->wps_disabled)
635 if (test_thread_flag(TIF_SINGLESTEP))
636 debug_info->suspended_step = 1;
638 user_enable_single_step(current);
640 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
641 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
643 if (*kernel_step != ARM_KERNEL_STEP_NONE)
646 if (kernel_active_single_step()) {
647 *kernel_step = ARM_KERNEL_STEP_SUSPEND;
649 *kernel_step = ARM_KERNEL_STEP_ACTIVE;
650 kernel_enable_single_step(regs);
657 static int watchpoint_handler(unsigned long addr, unsigned int esr,
658 struct pt_regs *regs)
660 int i, step = 0, *kernel_step, access;
662 u64 val, alignment_mask;
663 struct perf_event *wp, **slots;
664 struct debug_info *debug_info;
665 struct arch_hw_breakpoint *info;
666 struct arch_hw_breakpoint_ctrl ctrl;
668 slots = this_cpu_ptr(wp_on_reg);
669 debug_info = ¤t->thread.debug;
671 for (i = 0; i < core_num_wrps; ++i) {
679 info = counter_arch_bp(wp);
680 /* AArch32 watchpoints are either 4 or 8 bytes aligned. */
681 if (is_compat_task()) {
682 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
683 alignment_mask = 0x7;
685 alignment_mask = 0x3;
687 alignment_mask = 0x7;
690 /* Check if the watchpoint value matches. */
691 val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
692 if (val != (addr & ~alignment_mask))
695 /* Possible match, check the byte address select to confirm. */
696 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
697 decode_ctrl_reg(ctrl_reg, &ctrl);
698 if (!((1 << (addr & alignment_mask)) & ctrl.len))
702 * Check that the access type matches.
703 * 0 => load, otherwise => store
705 access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
707 if (!(access & hw_breakpoint_type(wp)))
710 info->trigger = addr;
711 perf_bp_event(wp, regs);
713 /* Do we need to handle the stepping? */
714 if (!wp->overflow_handler)
725 * We always disable EL0 watchpoints because the kernel can
726 * cause these to fire via an unprivileged access.
728 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 0);
730 if (user_mode(regs)) {
731 debug_info->wps_disabled = 1;
733 /* If we're already stepping a breakpoint, just return. */
734 if (debug_info->bps_disabled)
737 if (test_thread_flag(TIF_SINGLESTEP))
738 debug_info->suspended_step = 1;
740 user_enable_single_step(current);
742 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
743 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
745 if (*kernel_step != ARM_KERNEL_STEP_NONE)
748 if (kernel_active_single_step()) {
749 *kernel_step = ARM_KERNEL_STEP_SUSPEND;
751 *kernel_step = ARM_KERNEL_STEP_ACTIVE;
752 kernel_enable_single_step(regs);
760 * Handle single-step exception.
762 int reinstall_suspended_bps(struct pt_regs *regs)
764 struct debug_info *debug_info = ¤t->thread.debug;
765 int handled_exception = 0, *kernel_step;
767 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
770 * Called from single-step exception handler.
771 * Return 0 if execution can resume, 1 if a SIGTRAP should be
774 if (user_mode(regs)) {
775 if (debug_info->bps_disabled) {
776 debug_info->bps_disabled = 0;
777 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1);
778 handled_exception = 1;
781 if (debug_info->wps_disabled) {
782 debug_info->wps_disabled = 0;
783 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
784 handled_exception = 1;
787 if (handled_exception) {
788 if (debug_info->suspended_step) {
789 debug_info->suspended_step = 0;
790 /* Allow exception handling to fall-through. */
791 handled_exception = 0;
793 user_disable_single_step(current);
796 } else if (*kernel_step != ARM_KERNEL_STEP_NONE) {
797 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 1);
798 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 1);
800 if (!debug_info->wps_disabled)
801 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
803 if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) {
804 kernel_disable_single_step();
805 handled_exception = 1;
807 handled_exception = 0;
810 *kernel_step = ARM_KERNEL_STEP_NONE;
813 return !handled_exception;
817 * Context-switcher for restoring suspended breakpoints.
819 void hw_breakpoint_thread_switch(struct task_struct *next)
823 * disabled: 0 0 => The usual case, NOTIFY_DONE
824 * 0 1 => Disable the registers
825 * 1 0 => Enable the registers
826 * 1 1 => NOTIFY_DONE. per-task bps will
827 * get taken care of by perf.
830 struct debug_info *current_debug_info, *next_debug_info;
832 current_debug_info = ¤t->thread.debug;
833 next_debug_info = &next->thread.debug;
835 /* Update breakpoints. */
836 if (current_debug_info->bps_disabled != next_debug_info->bps_disabled)
837 toggle_bp_registers(AARCH64_DBG_REG_BCR,
839 !next_debug_info->bps_disabled);
841 /* Update watchpoints. */
842 if (current_debug_info->wps_disabled != next_debug_info->wps_disabled)
843 toggle_bp_registers(AARCH64_DBG_REG_WCR,
845 !next_debug_info->wps_disabled);
849 * CPU initialisation.
851 static void hw_breakpoint_reset(void *unused)
854 struct perf_event **slots;
856 * When a CPU goes through cold-boot, it does not have any installed
857 * slot, so it is safe to share the same function for restoring and
858 * resetting breakpoints; when a CPU is hotplugged in, it goes
859 * through the slots, which are all empty, hence it just resets control
860 * and value for debug registers.
861 * When this function is triggered on warm-boot through a CPU PM
862 * notifier some slots might be initialized; if so they are
863 * reprogrammed according to the debug slots content.
865 for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) {
867 hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
869 write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
870 write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
874 for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) {
876 hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
878 write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
879 write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
884 static int hw_breakpoint_reset_notify(struct notifier_block *self,
885 unsigned long action,
888 int cpu = (long)hcpu;
889 if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
890 smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1);
894 static struct notifier_block hw_breakpoint_reset_nb = {
895 .notifier_call = hw_breakpoint_reset_notify,
899 extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *));
901 static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
907 * One-time initialisation.
909 static int __init arch_hw_breakpoint_init(void)
911 core_num_brps = get_num_brps();
912 core_num_wrps = get_num_wrps();
914 pr_info("found %d breakpoint and %d watchpoint registers.\n",
915 core_num_brps, core_num_wrps);
917 cpu_notifier_register_begin();
920 * Reset the breakpoint resources. We assume that a halting
921 * debugger will leave the world in a nice state for us.
923 smp_call_function(hw_breakpoint_reset, NULL, 1);
924 hw_breakpoint_reset(NULL);
926 /* Register debug fault handlers. */
927 hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
928 TRAP_HWBKPT, "hw-breakpoint handler");
929 hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP,
930 TRAP_HWBKPT, "hw-watchpoint handler");
932 /* Register hotplug notifier. */
933 __register_cpu_notifier(&hw_breakpoint_reset_nb);
935 cpu_notifier_register_done();
937 /* Register cpu_suspend hw breakpoint restore hook */
938 cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
942 arch_initcall(arch_hw_breakpoint_init);
944 void hw_breakpoint_pmu_read(struct perf_event *bp)
949 * Dummy function to register with die_notifier.
951 int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
952 unsigned long val, void *data)