xref: /linux/arch/arm/kernel/hw_breakpoint.c (revision 7d6904bf26b96ef087514cb7a8c50b62a4911c99)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright (C) 2009, 2010 ARM Limited
5  *
6  * Author: Will Deacon <will.deacon@arm.com>
7  */
8 
9 /*
10  * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
11  * using the CPU's debug registers.
12  */
13 #define pr_fmt(fmt) "hw-breakpoint: " fmt
14 
15 #include <linux/errno.h>
16 #include <linux/hardirq.h>
17 #include <linux/perf_event.h>
18 #include <linux/hw_breakpoint.h>
19 #include <linux/smp.h>
20 #include <linux/cpu_pm.h>
21 #include <linux/coresight.h>
22 
23 #include <asm/cacheflush.h>
24 #include <asm/cputype.h>
25 #include <asm/current.h>
26 #include <asm/hw_breakpoint.h>
27 #include <asm/traps.h>
28 
29 /* Breakpoint currently in use for each BRP. */
30 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
31 
32 /* Watchpoint currently in use for each WRP. */
33 static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
34 
35 /* Number of BRP/WRP registers on this CPU. */
36 static int core_num_brps __ro_after_init;
37 static int core_num_wrps __ro_after_init;
38 
39 /* Debug architecture version. */
40 static u8 debug_arch __ro_after_init;
41 
42 /* Does debug architecture support OS Save and Restore? */
43 static bool has_ossr __ro_after_init;
44 
45 /* Maximum supported watchpoint length. */
46 static u8 max_watchpoint_len __ro_after_init;
47 
48 #define READ_WB_REG_CASE(OP2, M, VAL)			\
49 	case ((OP2 << 4) + M):				\
50 		ARM_DBG_READ(c0, c ## M, OP2, VAL);	\
51 		break
52 
53 #define WRITE_WB_REG_CASE(OP2, M, VAL)			\
54 	case ((OP2 << 4) + M):				\
55 		ARM_DBG_WRITE(c0, c ## M, OP2, VAL);	\
56 		break
57 
58 #define GEN_READ_WB_REG_CASES(OP2, VAL)		\
59 	READ_WB_REG_CASE(OP2, 0, VAL);		\
60 	READ_WB_REG_CASE(OP2, 1, VAL);		\
61 	READ_WB_REG_CASE(OP2, 2, VAL);		\
62 	READ_WB_REG_CASE(OP2, 3, VAL);		\
63 	READ_WB_REG_CASE(OP2, 4, VAL);		\
64 	READ_WB_REG_CASE(OP2, 5, VAL);		\
65 	READ_WB_REG_CASE(OP2, 6, VAL);		\
66 	READ_WB_REG_CASE(OP2, 7, VAL);		\
67 	READ_WB_REG_CASE(OP2, 8, VAL);		\
68 	READ_WB_REG_CASE(OP2, 9, VAL);		\
69 	READ_WB_REG_CASE(OP2, 10, VAL);		\
70 	READ_WB_REG_CASE(OP2, 11, VAL);		\
71 	READ_WB_REG_CASE(OP2, 12, VAL);		\
72 	READ_WB_REG_CASE(OP2, 13, VAL);		\
73 	READ_WB_REG_CASE(OP2, 14, VAL);		\
74 	READ_WB_REG_CASE(OP2, 15, VAL)
75 
76 #define GEN_WRITE_WB_REG_CASES(OP2, VAL)	\
77 	WRITE_WB_REG_CASE(OP2, 0, VAL);		\
78 	WRITE_WB_REG_CASE(OP2, 1, VAL);		\
79 	WRITE_WB_REG_CASE(OP2, 2, VAL);		\
80 	WRITE_WB_REG_CASE(OP2, 3, VAL);		\
81 	WRITE_WB_REG_CASE(OP2, 4, VAL);		\
82 	WRITE_WB_REG_CASE(OP2, 5, VAL);		\
83 	WRITE_WB_REG_CASE(OP2, 6, VAL);		\
84 	WRITE_WB_REG_CASE(OP2, 7, VAL);		\
85 	WRITE_WB_REG_CASE(OP2, 8, VAL);		\
86 	WRITE_WB_REG_CASE(OP2, 9, VAL);		\
87 	WRITE_WB_REG_CASE(OP2, 10, VAL);	\
88 	WRITE_WB_REG_CASE(OP2, 11, VAL);	\
89 	WRITE_WB_REG_CASE(OP2, 12, VAL);	\
90 	WRITE_WB_REG_CASE(OP2, 13, VAL);	\
91 	WRITE_WB_REG_CASE(OP2, 14, VAL);	\
92 	WRITE_WB_REG_CASE(OP2, 15, VAL)
93 
94 static u32 read_wb_reg(int n)
95 {
96 	u32 val = 0;
97 
98 	switch (n) {
99 	GEN_READ_WB_REG_CASES(ARM_OP2_BVR, val);
100 	GEN_READ_WB_REG_CASES(ARM_OP2_BCR, val);
101 	GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val);
102 	GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val);
103 	default:
104 		pr_warn("attempt to read from unknown breakpoint register %d\n",
105 			n);
106 	}
107 
108 	return val;
109 }
110 
111 static void write_wb_reg(int n, u32 val)
112 {
113 	switch (n) {
114 	GEN_WRITE_WB_REG_CASES(ARM_OP2_BVR, val);
115 	GEN_WRITE_WB_REG_CASES(ARM_OP2_BCR, val);
116 	GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val);
117 	GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val);
118 	default:
119 		pr_warn("attempt to write to unknown breakpoint register %d\n",
120 			n);
121 	}
122 	isb();
123 }
124 
125 /* Determine debug architecture. */
126 static u8 get_debug_arch(void)
127 {
128 	u32 didr;
129 
130 	/* Do we implement the extended CPUID interface? */
131 	if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
132 		pr_warn_once("CPUID feature registers not supported. "
133 			     "Assuming v6 debug is present.\n");
134 		return ARM_DEBUG_ARCH_V6;
135 	}
136 
137 	ARM_DBG_READ(c0, c0, 0, didr);
138 	return (didr >> 16) & 0xf;
139 }
140 
141 u8 arch_get_debug_arch(void)
142 {
143 	return debug_arch;
144 }
145 
146 static int debug_arch_supported(void)
147 {
148 	u8 arch = get_debug_arch();
149 
150 	/* We don't support the memory-mapped interface. */
151 	return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) ||
152 		arch >= ARM_DEBUG_ARCH_V7_1;
153 }
154 
155 /* Can we determine the watchpoint access type from the fsr? */
156 static int debug_exception_updates_fsr(void)
157 {
158 	return get_debug_arch() >= ARM_DEBUG_ARCH_V8;
159 }
160 
161 /* Determine number of WRP registers available. */
162 static int get_num_wrp_resources(void)
163 {
164 	u32 didr;
165 	ARM_DBG_READ(c0, c0, 0, didr);
166 	return ((didr >> 28) & 0xf) + 1;
167 }
168 
169 /* Determine number of BRP registers available. */
170 static int get_num_brp_resources(void)
171 {
172 	u32 didr;
173 	ARM_DBG_READ(c0, c0, 0, didr);
174 	return ((didr >> 24) & 0xf) + 1;
175 }
176 
177 /* Does this core support mismatch breakpoints? */
178 static int core_has_mismatch_brps(void)
179 {
180 	return (get_debug_arch() >= ARM_DEBUG_ARCH_V7_ECP14 &&
181 		get_num_brp_resources() > 1);
182 }
183 
184 /* Determine number of usable WRPs available. */
185 static int get_num_wrps(void)
186 {
187 	/*
188 	 * On debug architectures prior to 7.1, when a watchpoint fires, the
189 	 * only way to work out which watchpoint it was is by disassembling
190 	 * the faulting instruction and working out the address of the memory
191 	 * access.
192 	 *
193 	 * Furthermore, we can only do this if the watchpoint was precise
194 	 * since imprecise watchpoints prevent us from calculating register
195 	 * based addresses.
196 	 *
197 	 * Providing we have more than 1 breakpoint register, we only report
198 	 * a single watchpoint register for the time being. This way, we always
199 	 * know which watchpoint fired. In the future we can either add a
200 	 * disassembler and address generation emulator, or we can insert a
201 	 * check to see if the DFAR is set on watchpoint exception entry
202 	 * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows
203 	 * that it is set on some implementations].
204 	 */
205 	if (get_debug_arch() < ARM_DEBUG_ARCH_V7_1)
206 		return 1;
207 
208 	return get_num_wrp_resources();
209 }
210 
211 /* Determine number of usable BRPs available. */
212 static int get_num_brps(void)
213 {
214 	int brps = get_num_brp_resources();
215 	return core_has_mismatch_brps() ? brps - 1 : brps;
216 }
217 
218 /*
219  * In order to access the breakpoint/watchpoint control registers,
220  * we must be running in debug monitor mode. Unfortunately, we can
221  * be put into halting debug mode at any time by an external debugger
222  * but there is nothing we can do to prevent that.
223  */
224 static int monitor_mode_enabled(void)
225 {
226 	u32 dscr;
227 	ARM_DBG_READ(c0, c1, 0, dscr);
228 	return !!(dscr & ARM_DSCR_MDBGEN);
229 }
230 
231 static int enable_monitor_mode(void)
232 {
233 	u32 dscr;
234 	ARM_DBG_READ(c0, c1, 0, dscr);
235 
236 	/* If monitor mode is already enabled, just return. */
237 	if (dscr & ARM_DSCR_MDBGEN)
238 		goto out;
239 
240 	/* Write to the corresponding DSCR. */
241 	switch (get_debug_arch()) {
242 	case ARM_DEBUG_ARCH_V6:
243 	case ARM_DEBUG_ARCH_V6_1:
244 		ARM_DBG_WRITE(c0, c1, 0, (dscr | ARM_DSCR_MDBGEN));
245 		break;
246 	case ARM_DEBUG_ARCH_V7_ECP14:
247 	case ARM_DEBUG_ARCH_V7_1:
248 	case ARM_DEBUG_ARCH_V8:
249 	case ARM_DEBUG_ARCH_V8_1:
250 	case ARM_DEBUG_ARCH_V8_2:
251 	case ARM_DEBUG_ARCH_V8_4:
252 		ARM_DBG_WRITE(c0, c2, 2, (dscr | ARM_DSCR_MDBGEN));
253 		isb();
254 		break;
255 	default:
256 		return -ENODEV;
257 	}
258 
259 	/* Check that the write made it through. */
260 	ARM_DBG_READ(c0, c1, 0, dscr);
261 	if (!(dscr & ARM_DSCR_MDBGEN)) {
262 		pr_warn_once("Failed to enable monitor mode on CPU %d.\n",
263 				smp_processor_id());
264 		return -EPERM;
265 	}
266 
267 out:
268 	return 0;
269 }
270 
271 int hw_breakpoint_slots(int type)
272 {
273 	if (!debug_arch_supported())
274 		return 0;
275 
276 	/*
277 	 * We can be called early, so don't rely on
278 	 * our static variables being initialised.
279 	 */
280 	switch (type) {
281 	case TYPE_INST:
282 		return get_num_brps();
283 	case TYPE_DATA:
284 		return get_num_wrps();
285 	default:
286 		pr_warn("unknown slot type: %d\n", type);
287 		return 0;
288 	}
289 }
290 
291 /*
292  * Check if 8-bit byte-address select is available.
293  * This clobbers WRP 0.
294  */
295 static u8 get_max_wp_len(void)
296 {
297 	u32 ctrl_reg;
298 	struct arch_hw_breakpoint_ctrl ctrl;
299 	u8 size = 4;
300 
301 	if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14)
302 		goto out;
303 
304 	memset(&ctrl, 0, sizeof(ctrl));
305 	ctrl.len = ARM_BREAKPOINT_LEN_8;
306 	ctrl_reg = encode_ctrl_reg(ctrl);
307 
308 	write_wb_reg(ARM_BASE_WVR, 0);
309 	write_wb_reg(ARM_BASE_WCR, ctrl_reg);
310 	if ((read_wb_reg(ARM_BASE_WCR) & ctrl_reg) == ctrl_reg)
311 		size = 8;
312 
313 out:
314 	return size;
315 }
316 
317 u8 arch_get_max_wp_len(void)
318 {
319 	return max_watchpoint_len;
320 }
321 
322 /*
323  * Install a perf counter breakpoint.
324  */
325 int arch_install_hw_breakpoint(struct perf_event *bp)
326 {
327 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
328 	struct perf_event **slot, **slots;
329 	int i, max_slots, ctrl_base, val_base;
330 	u32 addr, ctrl;
331 
332 	addr = info->address;
333 	ctrl = encode_ctrl_reg(info->ctrl) | 0x1;
334 
335 	if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
336 		/* Breakpoint */
337 		ctrl_base = ARM_BASE_BCR;
338 		val_base = ARM_BASE_BVR;
339 		slots = this_cpu_ptr(bp_on_reg);
340 		max_slots = core_num_brps;
341 	} else {
342 		/* Watchpoint */
343 		ctrl_base = ARM_BASE_WCR;
344 		val_base = ARM_BASE_WVR;
345 		slots = this_cpu_ptr(wp_on_reg);
346 		max_slots = core_num_wrps;
347 	}
348 
349 	for (i = 0; i < max_slots; ++i) {
350 		slot = &slots[i];
351 
352 		if (!*slot) {
353 			*slot = bp;
354 			break;
355 		}
356 	}
357 
358 	if (i == max_slots) {
359 		pr_warn("Can't find any breakpoint slot\n");
360 		return -EBUSY;
361 	}
362 
363 	/* Override the breakpoint data with the step data. */
364 	if (info->step_ctrl.enabled) {
365 		addr = info->trigger & ~0x3;
366 		ctrl = encode_ctrl_reg(info->step_ctrl);
367 		if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE) {
368 			i = 0;
369 			ctrl_base = ARM_BASE_BCR + core_num_brps;
370 			val_base = ARM_BASE_BVR + core_num_brps;
371 		}
372 	}
373 
374 	/* Setup the address register. */
375 	write_wb_reg(val_base + i, addr);
376 
377 	/* Setup the control register. */
378 	write_wb_reg(ctrl_base + i, ctrl);
379 	return 0;
380 }
381 
382 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
383 {
384 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
385 	struct perf_event **slot, **slots;
386 	int i, max_slots, base;
387 
388 	if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
389 		/* Breakpoint */
390 		base = ARM_BASE_BCR;
391 		slots = this_cpu_ptr(bp_on_reg);
392 		max_slots = core_num_brps;
393 	} else {
394 		/* Watchpoint */
395 		base = ARM_BASE_WCR;
396 		slots = this_cpu_ptr(wp_on_reg);
397 		max_slots = core_num_wrps;
398 	}
399 
400 	/* Remove the breakpoint. */
401 	for (i = 0; i < max_slots; ++i) {
402 		slot = &slots[i];
403 
404 		if (*slot == bp) {
405 			*slot = NULL;
406 			break;
407 		}
408 	}
409 
410 	if (i == max_slots) {
411 		pr_warn("Can't find any breakpoint slot\n");
412 		return;
413 	}
414 
415 	/* Ensure that we disable the mismatch breakpoint. */
416 	if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE &&
417 	    info->step_ctrl.enabled) {
418 		i = 0;
419 		base = ARM_BASE_BCR + core_num_brps;
420 	}
421 
422 	/* Reset the control register. */
423 	write_wb_reg(base + i, 0);
424 }
425 
426 static int get_hbp_len(u8 hbp_len)
427 {
428 	unsigned int len_in_bytes = 0;
429 
430 	switch (hbp_len) {
431 	case ARM_BREAKPOINT_LEN_1:
432 		len_in_bytes = 1;
433 		break;
434 	case ARM_BREAKPOINT_LEN_2:
435 		len_in_bytes = 2;
436 		break;
437 	case ARM_BREAKPOINT_LEN_4:
438 		len_in_bytes = 4;
439 		break;
440 	case ARM_BREAKPOINT_LEN_8:
441 		len_in_bytes = 8;
442 		break;
443 	}
444 
445 	return len_in_bytes;
446 }
447 
448 /*
449  * Check whether bp virtual address is in kernel space.
450  */
451 int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
452 {
453 	unsigned int len;
454 	unsigned long va;
455 
456 	va = hw->address;
457 	len = get_hbp_len(hw->ctrl.len);
458 
459 	return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
460 }
461 
462 /*
463  * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
464  * Hopefully this will disappear when ptrace can bypass the conversion
465  * to generic breakpoint descriptions.
466  */
467 int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
468 			   int *gen_len, int *gen_type)
469 {
470 	/* Type */
471 	switch (ctrl.type) {
472 	case ARM_BREAKPOINT_EXECUTE:
473 		*gen_type = HW_BREAKPOINT_X;
474 		break;
475 	case ARM_BREAKPOINT_LOAD:
476 		*gen_type = HW_BREAKPOINT_R;
477 		break;
478 	case ARM_BREAKPOINT_STORE:
479 		*gen_type = HW_BREAKPOINT_W;
480 		break;
481 	case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
482 		*gen_type = HW_BREAKPOINT_RW;
483 		break;
484 	default:
485 		return -EINVAL;
486 	}
487 
488 	/* Len */
489 	switch (ctrl.len) {
490 	case ARM_BREAKPOINT_LEN_1:
491 		*gen_len = HW_BREAKPOINT_LEN_1;
492 		break;
493 	case ARM_BREAKPOINT_LEN_2:
494 		*gen_len = HW_BREAKPOINT_LEN_2;
495 		break;
496 	case ARM_BREAKPOINT_LEN_4:
497 		*gen_len = HW_BREAKPOINT_LEN_4;
498 		break;
499 	case ARM_BREAKPOINT_LEN_8:
500 		*gen_len = HW_BREAKPOINT_LEN_8;
501 		break;
502 	default:
503 		return -EINVAL;
504 	}
505 
506 	return 0;
507 }
508 
509 /*
510  * Construct an arch_hw_breakpoint from a perf_event.
511  */
512 static int arch_build_bp_info(struct perf_event *bp,
513 			      const struct perf_event_attr *attr,
514 			      struct arch_hw_breakpoint *hw)
515 {
516 	/* Type */
517 	switch (attr->bp_type) {
518 	case HW_BREAKPOINT_X:
519 		hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
520 		break;
521 	case HW_BREAKPOINT_R:
522 		hw->ctrl.type = ARM_BREAKPOINT_LOAD;
523 		break;
524 	case HW_BREAKPOINT_W:
525 		hw->ctrl.type = ARM_BREAKPOINT_STORE;
526 		break;
527 	case HW_BREAKPOINT_RW:
528 		hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
529 		break;
530 	default:
531 		return -EINVAL;
532 	}
533 
534 	/* Len */
535 	switch (attr->bp_len) {
536 	case HW_BREAKPOINT_LEN_1:
537 		hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
538 		break;
539 	case HW_BREAKPOINT_LEN_2:
540 		hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
541 		break;
542 	case HW_BREAKPOINT_LEN_4:
543 		hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
544 		break;
545 	case HW_BREAKPOINT_LEN_8:
546 		hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
547 		if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE)
548 			&& max_watchpoint_len >= 8)
549 			break;
550 		fallthrough;
551 	default:
552 		return -EINVAL;
553 	}
554 
555 	/*
556 	 * Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes.
557 	 * Watchpoints can be of length 1, 2, 4 or 8 bytes if supported
558 	 * by the hardware and must be aligned to the appropriate number of
559 	 * bytes.
560 	 */
561 	if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
562 	    hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
563 	    hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
564 		return -EINVAL;
565 
566 	/* Address */
567 	hw->address = attr->bp_addr;
568 
569 	/* Privilege */
570 	hw->ctrl.privilege = ARM_BREAKPOINT_USER;
571 	if (arch_check_bp_in_kernelspace(hw))
572 		hw->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
573 
574 	/* Enabled? */
575 	hw->ctrl.enabled = !attr->disabled;
576 
577 	/* Mismatch */
578 	hw->ctrl.mismatch = 0;
579 
580 	return 0;
581 }
582 
583 /*
584  * Validate the arch-specific HW Breakpoint register settings.
585  */
586 int hw_breakpoint_arch_parse(struct perf_event *bp,
587 			     const struct perf_event_attr *attr,
588 			     struct arch_hw_breakpoint *hw)
589 {
590 	int ret = 0;
591 	u32 offset, alignment_mask = 0x3;
592 
593 	/* Ensure that we are in monitor debug mode. */
594 	if (!monitor_mode_enabled())
595 		return -ENODEV;
596 
597 	/* Build the arch_hw_breakpoint. */
598 	ret = arch_build_bp_info(bp, attr, hw);
599 	if (ret)
600 		goto out;
601 
602 	/* Check address alignment. */
603 	if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
604 		alignment_mask = 0x7;
605 	offset = hw->address & alignment_mask;
606 	switch (offset) {
607 	case 0:
608 		/* Aligned */
609 		break;
610 	case 1:
611 	case 2:
612 		/* Allow halfword watchpoints and breakpoints. */
613 		if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
614 			break;
615 		fallthrough;
616 	case 3:
617 		/* Allow single byte watchpoint. */
618 		if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
619 			break;
620 		fallthrough;
621 	default:
622 		ret = -EINVAL;
623 		goto out;
624 	}
625 
626 	hw->address &= ~alignment_mask;
627 	hw->ctrl.len <<= offset;
628 
629 	if (uses_default_overflow_handler(bp)) {
630 		/*
631 		 * Mismatch breakpoints are required for single-stepping
632 		 * breakpoints.
633 		 */
634 		if (!core_has_mismatch_brps())
635 			return -EINVAL;
636 
637 		/* We don't allow mismatch breakpoints in kernel space. */
638 		if (arch_check_bp_in_kernelspace(hw))
639 			return -EPERM;
640 
641 		/*
642 		 * Per-cpu breakpoints are not supported by our stepping
643 		 * mechanism.
644 		 */
645 		if (!bp->hw.target)
646 			return -EINVAL;
647 
648 		/*
649 		 * We only support specific access types if the fsr
650 		 * reports them.
651 		 */
652 		if (!debug_exception_updates_fsr() &&
653 		    (hw->ctrl.type == ARM_BREAKPOINT_LOAD ||
654 		     hw->ctrl.type == ARM_BREAKPOINT_STORE))
655 			return -EINVAL;
656 	}
657 
658 out:
659 	return ret;
660 }
661 
662 /*
663  * Enable/disable single-stepping over the breakpoint bp at address addr.
664  */
665 static void enable_single_step(struct perf_event *bp, u32 addr)
666 {
667 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
668 
669 	arch_uninstall_hw_breakpoint(bp);
670 	info->step_ctrl.mismatch  = 1;
671 	info->step_ctrl.len	  = ARM_BREAKPOINT_LEN_4;
672 	info->step_ctrl.type	  = ARM_BREAKPOINT_EXECUTE;
673 	info->step_ctrl.privilege = info->ctrl.privilege;
674 	info->step_ctrl.enabled	  = 1;
675 	info->trigger		  = addr;
676 	arch_install_hw_breakpoint(bp);
677 }
678 
679 static void disable_single_step(struct perf_event *bp)
680 {
681 	arch_uninstall_hw_breakpoint(bp);
682 	counter_arch_bp(bp)->step_ctrl.enabled = 0;
683 	arch_install_hw_breakpoint(bp);
684 }
685 
686 /*
687  * Arm32 hardware does not always report a watchpoint hit address that matches
688  * one of the watchpoints set. It can also report an address "near" the
689  * watchpoint if a single instruction access both watched and unwatched
690  * addresses. There is no straight-forward way, short of disassembling the
691  * offending instruction, to map that address back to the watchpoint. This
692  * function computes the distance of the memory access from the watchpoint as a
693  * heuristic for the likelyhood that a given access triggered the watchpoint.
694  *
695  * See this same function in the arm64 platform code, which has the same
696  * problem.
697  *
698  * The function returns the distance of the address from the bytes watched by
699  * the watchpoint. In case of an exact match, it returns 0.
700  */
701 static u32 get_distance_from_watchpoint(unsigned long addr, u32 val,
702 					struct arch_hw_breakpoint_ctrl *ctrl)
703 {
704 	u32 wp_low, wp_high;
705 	u32 lens, lene;
706 
707 	lens = __ffs(ctrl->len);
708 	lene = __fls(ctrl->len);
709 
710 	wp_low = val + lens;
711 	wp_high = val + lene;
712 	if (addr < wp_low)
713 		return wp_low - addr;
714 	else if (addr > wp_high)
715 		return addr - wp_high;
716 	else
717 		return 0;
718 }
719 
720 static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
721 				       struct arch_hw_breakpoint *info)
722 {
723 	return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER;
724 }
725 
726 static void watchpoint_handler(unsigned long addr, unsigned int fsr,
727 			       struct pt_regs *regs)
728 {
729 	int i, access, closest_match = 0;
730 	u32 min_dist = -1, dist;
731 	u32 val, ctrl_reg;
732 	struct perf_event *wp, **slots;
733 	struct arch_hw_breakpoint *info;
734 	struct arch_hw_breakpoint_ctrl ctrl;
735 
736 	slots = this_cpu_ptr(wp_on_reg);
737 
738 	/*
739 	 * Find all watchpoints that match the reported address. If no exact
740 	 * match is found. Attribute the hit to the closest watchpoint.
741 	 */
742 	rcu_read_lock();
743 	for (i = 0; i < core_num_wrps; ++i) {
744 		wp = slots[i];
745 		if (wp == NULL)
746 			continue;
747 
748 		/*
749 		 * The DFAR is an unknown value on debug architectures prior
750 		 * to 7.1. Since we only allow a single watchpoint on these
751 		 * older CPUs, we can set the trigger to the lowest possible
752 		 * faulting address.
753 		 */
754 		if (debug_arch < ARM_DEBUG_ARCH_V7_1) {
755 			BUG_ON(i > 0);
756 			info = counter_arch_bp(wp);
757 			info->trigger = wp->attr.bp_addr;
758 		} else {
759 			/* Check that the access type matches. */
760 			if (debug_exception_updates_fsr()) {
761 				access = (fsr & ARM_FSR_ACCESS_MASK) ?
762 					  HW_BREAKPOINT_W : HW_BREAKPOINT_R;
763 				if (!(access & hw_breakpoint_type(wp)))
764 					continue;
765 			}
766 
767 			val = read_wb_reg(ARM_BASE_WVR + i);
768 			ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
769 			decode_ctrl_reg(ctrl_reg, &ctrl);
770 			dist = get_distance_from_watchpoint(addr, val, &ctrl);
771 			if (dist < min_dist) {
772 				min_dist = dist;
773 				closest_match = i;
774 			}
775 			/* Is this an exact match? */
776 			if (dist != 0)
777 				continue;
778 
779 			/* We have a winner. */
780 			info = counter_arch_bp(wp);
781 			info->trigger = addr;
782 		}
783 
784 		pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
785 
786 		/*
787 		 * If we triggered a user watchpoint from a uaccess routine,
788 		 * then handle the stepping ourselves since userspace really
789 		 * can't help us with this.
790 		 */
791 		if (watchpoint_fault_on_uaccess(regs, info))
792 			goto step;
793 
794 		perf_bp_event(wp, regs);
795 
796 		/*
797 		 * Defer stepping to the overflow handler if one is installed.
798 		 * Otherwise, insert a temporary mismatch breakpoint so that
799 		 * we can single-step over the watchpoint trigger.
800 		 */
801 		if (!uses_default_overflow_handler(wp))
802 			continue;
803 step:
804 		enable_single_step(wp, instruction_pointer(regs));
805 	}
806 
807 	if (min_dist > 0 && min_dist != -1) {
808 		/* No exact match found. */
809 		wp = slots[closest_match];
810 		info = counter_arch_bp(wp);
811 		info->trigger = addr;
812 		pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
813 		perf_bp_event(wp, regs);
814 		if (uses_default_overflow_handler(wp))
815 			enable_single_step(wp, instruction_pointer(regs));
816 	}
817 
818 	rcu_read_unlock();
819 }
820 
821 static void watchpoint_single_step_handler(unsigned long pc)
822 {
823 	int i;
824 	struct perf_event *wp, **slots;
825 	struct arch_hw_breakpoint *info;
826 
827 	slots = this_cpu_ptr(wp_on_reg);
828 
829 	for (i = 0; i < core_num_wrps; ++i) {
830 		rcu_read_lock();
831 
832 		wp = slots[i];
833 
834 		if (wp == NULL)
835 			goto unlock;
836 
837 		info = counter_arch_bp(wp);
838 		if (!info->step_ctrl.enabled)
839 			goto unlock;
840 
841 		/*
842 		 * Restore the original watchpoint if we've completed the
843 		 * single-step.
844 		 */
845 		if (info->trigger != pc)
846 			disable_single_step(wp);
847 
848 unlock:
849 		rcu_read_unlock();
850 	}
851 }
852 
853 static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
854 {
855 	int i;
856 	u32 ctrl_reg, val, addr;
857 	struct perf_event *bp, **slots;
858 	struct arch_hw_breakpoint *info;
859 	struct arch_hw_breakpoint_ctrl ctrl;
860 
861 	slots = this_cpu_ptr(bp_on_reg);
862 
863 	/* The exception entry code places the amended lr in the PC. */
864 	addr = regs->ARM_pc;
865 
866 	/* Check the currently installed breakpoints first. */
867 	for (i = 0; i < core_num_brps; ++i) {
868 		rcu_read_lock();
869 
870 		bp = slots[i];
871 
872 		if (bp == NULL)
873 			goto unlock;
874 
875 		info = counter_arch_bp(bp);
876 
877 		/* Check if the breakpoint value matches. */
878 		val = read_wb_reg(ARM_BASE_BVR + i);
879 		if (val != (addr & ~0x3))
880 			goto mismatch;
881 
882 		/* Possible match, check the byte address select to confirm. */
883 		ctrl_reg = read_wb_reg(ARM_BASE_BCR + i);
884 		decode_ctrl_reg(ctrl_reg, &ctrl);
885 		if ((1 << (addr & 0x3)) & ctrl.len) {
886 			info->trigger = addr;
887 			pr_debug("breakpoint fired: address = 0x%x\n", addr);
888 			perf_bp_event(bp, regs);
889 			if (uses_default_overflow_handler(bp))
890 				enable_single_step(bp, addr);
891 			goto unlock;
892 		}
893 
894 mismatch:
895 		/* If we're stepping a breakpoint, it can now be restored. */
896 		if (info->step_ctrl.enabled)
897 			disable_single_step(bp);
898 unlock:
899 		rcu_read_unlock();
900 	}
901 
902 	/* Handle any pending watchpoint single-step breakpoints. */
903 	watchpoint_single_step_handler(addr);
904 }
905 
906 /*
907  * Called from either the Data Abort Handler [watchpoint] or the
908  * Prefetch Abort Handler [breakpoint] with interrupts disabled.
909  */
910 static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
911 				 struct pt_regs *regs)
912 {
913 	int ret = 0;
914 	u32 dscr;
915 
916 	preempt_disable();
917 
918 	if (interrupts_enabled(regs))
919 		local_irq_enable();
920 
921 	/* We only handle watchpoints and hardware breakpoints. */
922 	ARM_DBG_READ(c0, c1, 0, dscr);
923 
924 	/* Perform perf callbacks. */
925 	switch (ARM_DSCR_MOE(dscr)) {
926 	case ARM_ENTRY_BREAKPOINT:
927 		breakpoint_handler(addr, regs);
928 		break;
929 	case ARM_ENTRY_ASYNC_WATCHPOINT:
930 		WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
931 		fallthrough;
932 	case ARM_ENTRY_SYNC_WATCHPOINT:
933 		watchpoint_handler(addr, fsr, regs);
934 		break;
935 	default:
936 		ret = 1; /* Unhandled fault. */
937 	}
938 
939 	preempt_enable();
940 
941 	return ret;
942 }
943 
944 #ifdef CONFIG_ARM_ERRATA_764319
945 static int oslsr_fault;
946 
947 static int debug_oslsr_trap(struct pt_regs *regs, unsigned int instr)
948 {
949 	oslsr_fault = 1;
950 	instruction_pointer(regs) += 4;
951 	return 0;
952 }
953 
954 static struct undef_hook debug_oslsr_hook = {
955 	.instr_mask  = 0xffffffff,
956 	.instr_val = 0xee115e91,
957 	.fn = debug_oslsr_trap,
958 };
959 #endif
960 
961 /*
962  * One-time initialisation.
963  */
964 static cpumask_t debug_err_mask;
965 
966 static int debug_reg_trap(struct pt_regs *regs, unsigned int instr)
967 {
968 	int cpu = smp_processor_id();
969 
970 	pr_warn("Debug register access (0x%x) caused undefined instruction on CPU %d\n",
971 		instr, cpu);
972 
973 	/* Set the error flag for this CPU and skip the faulting instruction. */
974 	cpumask_set_cpu(cpu, &debug_err_mask);
975 	instruction_pointer(regs) += 4;
976 	return 0;
977 }
978 
979 static struct undef_hook debug_reg_hook = {
980 	.instr_mask	= 0x0fe80f10,
981 	.instr_val	= 0x0e000e10,
982 	.fn		= debug_reg_trap,
983 };
984 
985 /* Does this core support OS Save and Restore? */
986 static bool core_has_os_save_restore(void)
987 {
988 	u32 oslsr;
989 
990 	switch (get_debug_arch()) {
991 	case ARM_DEBUG_ARCH_V7_1:
992 		return true;
993 	case ARM_DEBUG_ARCH_V7_ECP14:
994 #ifdef CONFIG_ARM_ERRATA_764319
995 		oslsr_fault = 0;
996 		register_undef_hook(&debug_oslsr_hook);
997 		ARM_DBG_READ(c1, c1, 4, oslsr);
998 		unregister_undef_hook(&debug_oslsr_hook);
999 		if (oslsr_fault)
1000 			return false;
1001 #else
1002 		ARM_DBG_READ(c1, c1, 4, oslsr);
1003 #endif
1004 		if (oslsr & ARM_OSLSR_OSLM0)
1005 			return true;
1006 		fallthrough;
1007 	default:
1008 		return false;
1009 	}
1010 }
1011 
1012 static void reset_ctrl_regs(unsigned int cpu)
1013 {
1014 	int i, raw_num_brps, err = 0;
1015 	u32 val;
1016 
1017 	/*
1018 	 * v7 debug contains save and restore registers so that debug state
1019 	 * can be maintained across low-power modes without leaving the debug
1020 	 * logic powered up. It is IMPLEMENTATION DEFINED whether we can access
1021 	 * the debug registers out of reset, so we must unlock the OS Lock
1022 	 * Access Register to avoid taking undefined instruction exceptions
1023 	 * later on.
1024 	 */
1025 	switch (debug_arch) {
1026 	case ARM_DEBUG_ARCH_V6:
1027 	case ARM_DEBUG_ARCH_V6_1:
1028 		/* ARMv6 cores clear the registers out of reset. */
1029 		goto out_mdbgen;
1030 	case ARM_DEBUG_ARCH_V7_ECP14:
1031 		/*
1032 		 * Ensure sticky power-down is clear (i.e. debug logic is
1033 		 * powered up).
1034 		 */
1035 		ARM_DBG_READ(c1, c5, 4, val);
1036 		if ((val & 0x1) == 0)
1037 			err = -EPERM;
1038 
1039 		if (!has_ossr)
1040 			goto clear_vcr;
1041 		break;
1042 	case ARM_DEBUG_ARCH_V7_1:
1043 		/*
1044 		 * Ensure the OS double lock is clear.
1045 		 */
1046 		ARM_DBG_READ(c1, c3, 4, val);
1047 		if ((val & 0x1) == 1)
1048 			err = -EPERM;
1049 		break;
1050 	}
1051 
1052 	if (err) {
1053 		pr_warn_once("CPU %d debug is powered down!\n", cpu);
1054 		cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
1055 		return;
1056 	}
1057 
1058 	/*
1059 	 * Unconditionally clear the OS lock by writing a value
1060 	 * other than CS_LAR_KEY to the access register.
1061 	 */
1062 	ARM_DBG_WRITE(c1, c0, 4, ~CORESIGHT_UNLOCK);
1063 	isb();
1064 
1065 	/*
1066 	 * Clear any configured vector-catch events before
1067 	 * enabling monitor mode.
1068 	 */
1069 clear_vcr:
1070 	ARM_DBG_WRITE(c0, c7, 0, 0);
1071 	isb();
1072 
1073 	if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
1074 		pr_warn_once("CPU %d failed to disable vector catch\n", cpu);
1075 		return;
1076 	}
1077 
1078 	/*
1079 	 * The control/value register pairs are UNKNOWN out of reset so
1080 	 * clear them to avoid spurious debug events.
1081 	 */
1082 	raw_num_brps = get_num_brp_resources();
1083 	for (i = 0; i < raw_num_brps; ++i) {
1084 		write_wb_reg(ARM_BASE_BCR + i, 0UL);
1085 		write_wb_reg(ARM_BASE_BVR + i, 0UL);
1086 	}
1087 
1088 	for (i = 0; i < core_num_wrps; ++i) {
1089 		write_wb_reg(ARM_BASE_WCR + i, 0UL);
1090 		write_wb_reg(ARM_BASE_WVR + i, 0UL);
1091 	}
1092 
1093 	if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
1094 		pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu);
1095 		return;
1096 	}
1097 
1098 	/*
1099 	 * Have a crack at enabling monitor mode. We don't actually need
1100 	 * it yet, but reporting an error early is useful if it fails.
1101 	 */
1102 out_mdbgen:
1103 	if (enable_monitor_mode())
1104 		cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
1105 }
1106 
1107 static int dbg_reset_online(unsigned int cpu)
1108 {
1109 	local_irq_disable();
1110 	reset_ctrl_regs(cpu);
1111 	local_irq_enable();
1112 	return 0;
1113 }
1114 
1115 #ifdef CONFIG_CPU_PM
1116 static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
1117 			     void *v)
1118 {
1119 	if (action == CPU_PM_EXIT)
1120 		reset_ctrl_regs(smp_processor_id());
1121 
1122 	return NOTIFY_OK;
1123 }
1124 
1125 static struct notifier_block dbg_cpu_pm_nb = {
1126 	.notifier_call = dbg_cpu_pm_notify,
1127 };
1128 
1129 static void __init pm_init(void)
1130 {
1131 	cpu_pm_register_notifier(&dbg_cpu_pm_nb);
1132 }
1133 #else
1134 static inline void pm_init(void)
1135 {
1136 }
1137 #endif
1138 
1139 static int __init arch_hw_breakpoint_init(void)
1140 {
1141 	int ret;
1142 
1143 	debug_arch = get_debug_arch();
1144 
1145 	if (!debug_arch_supported()) {
1146 		pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
1147 		return 0;
1148 	}
1149 
1150 	/*
1151 	 * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
1152 	 * whenever a WFI is issued, even if the core is not powered down, in
1153 	 * violation of the architecture.  When DBGPRSR.SPD is set, accesses to
1154 	 * breakpoint and watchpoint registers are treated as undefined, so
1155 	 * this results in boot time and runtime failures when these are
1156 	 * accessed and we unexpectedly take a trap.
1157 	 *
1158 	 * It's not clear if/how this can be worked around, so we blacklist
1159 	 * Scorpion CPUs to avoid these issues.
1160 	*/
1161 	if (read_cpuid_part() == ARM_CPU_PART_SCORPION) {
1162 		pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
1163 		return 0;
1164 	}
1165 
1166 	has_ossr = core_has_os_save_restore();
1167 
1168 	/* Determine how many BRPs/WRPs are available. */
1169 	core_num_brps = get_num_brps();
1170 	core_num_wrps = get_num_wrps();
1171 
1172 	/*
1173 	 * We need to tread carefully here because DBGSWENABLE may be
1174 	 * driven low on this core and there isn't an architected way to
1175 	 * determine that.
1176 	 */
1177 	cpus_read_lock();
1178 	register_undef_hook(&debug_reg_hook);
1179 
1180 	/*
1181 	 * Register CPU notifier which resets the breakpoint resources. We
1182 	 * assume that a halting debugger will leave the world in a nice state
1183 	 * for us.
1184 	 */
1185 	ret = cpuhp_setup_state_cpuslocked(CPUHP_AP_ONLINE_DYN,
1186 					   "arm/hw_breakpoint:online",
1187 					   dbg_reset_online, NULL);
1188 	unregister_undef_hook(&debug_reg_hook);
1189 	if (WARN_ON(ret < 0) || !cpumask_empty(&debug_err_mask)) {
1190 		core_num_brps = 0;
1191 		core_num_wrps = 0;
1192 		if (ret > 0)
1193 			cpuhp_remove_state_nocalls_cpuslocked(ret);
1194 		cpus_read_unlock();
1195 		return 0;
1196 	}
1197 
1198 	pr_info("found %d " "%s" "breakpoint and %d watchpoint registers.\n",
1199 		core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " :
1200 		"", core_num_wrps);
1201 
1202 	/* Work out the maximum supported watchpoint length. */
1203 	max_watchpoint_len = get_max_wp_len();
1204 	pr_info("maximum watchpoint size is %u bytes.\n",
1205 			max_watchpoint_len);
1206 
1207 	/* Register debug fault handler. */
1208 	hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
1209 			TRAP_HWBKPT, "watchpoint debug exception");
1210 	hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
1211 			TRAP_HWBKPT, "breakpoint debug exception");
1212 	cpus_read_unlock();
1213 
1214 	/* Register PM notifiers. */
1215 	pm_init();
1216 	return 0;
1217 }
1218 arch_initcall(arch_hw_breakpoint_init);
1219 
1220 void hw_breakpoint_pmu_read(struct perf_event *bp)
1221 {
1222 }
1223 
1224 /*
1225  * Dummy function to register with die_notifier.
1226  */
1227 int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
1228 					unsigned long val, void *data)
1229 {
1230 	return NOTIFY_DONE;
1231 }
1232