1 /* 2 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, 3 * using the CPU's debug registers. 4 * 5 * Copyright (C) 2012 ARM Limited 6 * Author: Will Deacon <will.deacon@arm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #define pr_fmt(fmt) "hw-breakpoint: " fmt 22 23 #include <linux/compat.h> 24 #include <linux/cpu_pm.h> 25 #include <linux/errno.h> 26 #include <linux/hw_breakpoint.h> 27 #include <linux/kprobes.h> 28 #include <linux/perf_event.h> 29 #include <linux/ptrace.h> 30 #include <linux/smp.h> 31 32 #include <asm/compat.h> 33 #include <asm/current.h> 34 #include <asm/debug-monitors.h> 35 #include <asm/hw_breakpoint.h> 36 #include <asm/traps.h> 37 #include <asm/cputype.h> 38 #include <asm/system_misc.h> 39 40 /* Breakpoint currently in use for each BRP. */ 41 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); 42 43 /* Watchpoint currently in use for each WRP. */ 44 static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]); 45 46 /* Currently stepping a per-CPU kernel breakpoint. */ 47 static DEFINE_PER_CPU(int, stepping_kernel_bp); 48 49 /* Number of BRP/WRP registers on this CPU. */ 50 static int core_num_brps; 51 static int core_num_wrps; 52 53 int hw_breakpoint_slots(int type) 54 { 55 /* 56 * We can be called early, so don't rely on 57 * our static variables being initialised. 58 */ 59 switch (type) { 60 case TYPE_INST: 61 return get_num_brps(); 62 case TYPE_DATA: 63 return get_num_wrps(); 64 default: 65 pr_warning("unknown slot type: %d\n", type); 66 return 0; 67 } 68 } 69 70 #define READ_WB_REG_CASE(OFF, N, REG, VAL) \ 71 case (OFF + N): \ 72 AARCH64_DBG_READ(N, REG, VAL); \ 73 break 74 75 #define WRITE_WB_REG_CASE(OFF, N, REG, VAL) \ 76 case (OFF + N): \ 77 AARCH64_DBG_WRITE(N, REG, VAL); \ 78 break 79 80 #define GEN_READ_WB_REG_CASES(OFF, REG, VAL) \ 81 READ_WB_REG_CASE(OFF, 0, REG, VAL); \ 82 READ_WB_REG_CASE(OFF, 1, REG, VAL); \ 83 READ_WB_REG_CASE(OFF, 2, REG, VAL); \ 84 READ_WB_REG_CASE(OFF, 3, REG, VAL); \ 85 READ_WB_REG_CASE(OFF, 4, REG, VAL); \ 86 READ_WB_REG_CASE(OFF, 5, REG, VAL); \ 87 READ_WB_REG_CASE(OFF, 6, REG, VAL); \ 88 READ_WB_REG_CASE(OFF, 7, REG, VAL); \ 89 READ_WB_REG_CASE(OFF, 8, REG, VAL); \ 90 READ_WB_REG_CASE(OFF, 9, REG, VAL); \ 91 READ_WB_REG_CASE(OFF, 10, REG, VAL); \ 92 READ_WB_REG_CASE(OFF, 11, REG, VAL); \ 93 READ_WB_REG_CASE(OFF, 12, REG, VAL); \ 94 READ_WB_REG_CASE(OFF, 13, REG, VAL); \ 95 READ_WB_REG_CASE(OFF, 14, REG, VAL); \ 96 READ_WB_REG_CASE(OFF, 15, REG, VAL) 97 98 #define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL) \ 99 WRITE_WB_REG_CASE(OFF, 0, REG, VAL); \ 100 WRITE_WB_REG_CASE(OFF, 1, REG, VAL); \ 101 WRITE_WB_REG_CASE(OFF, 2, REG, VAL); \ 102 WRITE_WB_REG_CASE(OFF, 3, REG, VAL); \ 103 WRITE_WB_REG_CASE(OFF, 4, REG, VAL); \ 104 WRITE_WB_REG_CASE(OFF, 5, REG, VAL); \ 105 WRITE_WB_REG_CASE(OFF, 6, REG, VAL); \ 106 WRITE_WB_REG_CASE(OFF, 7, REG, VAL); \ 107 WRITE_WB_REG_CASE(OFF, 8, REG, VAL); \ 108 WRITE_WB_REG_CASE(OFF, 9, REG, VAL); \ 109 WRITE_WB_REG_CASE(OFF, 10, REG, VAL); \ 110 WRITE_WB_REG_CASE(OFF, 11, REG, VAL); \ 111 WRITE_WB_REG_CASE(OFF, 12, REG, VAL); \ 112 WRITE_WB_REG_CASE(OFF, 13, REG, VAL); \ 113 WRITE_WB_REG_CASE(OFF, 14, REG, VAL); \ 114 WRITE_WB_REG_CASE(OFF, 15, REG, VAL) 115 116 static u64 read_wb_reg(int reg, int n) 117 { 118 u64 val = 0; 119 120 switch (reg + n) { 121 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val); 122 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val); 123 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val); 124 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val); 125 default: 126 pr_warning("attempt to read from unknown breakpoint register %d\n", n); 127 } 128 129 return val; 130 } 131 NOKPROBE_SYMBOL(read_wb_reg); 132 133 static void write_wb_reg(int reg, int n, u64 val) 134 { 135 switch (reg + n) { 136 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val); 137 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val); 138 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val); 139 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val); 140 default: 141 pr_warning("attempt to write to unknown breakpoint register %d\n", n); 142 } 143 isb(); 144 } 145 NOKPROBE_SYMBOL(write_wb_reg); 146 147 /* 148 * Convert a breakpoint privilege level to the corresponding exception 149 * level. 150 */ 151 static enum dbg_active_el debug_exception_level(int privilege) 152 { 153 switch (privilege) { 154 case AARCH64_BREAKPOINT_EL0: 155 return DBG_ACTIVE_EL0; 156 case AARCH64_BREAKPOINT_EL1: 157 return DBG_ACTIVE_EL1; 158 default: 159 pr_warning("invalid breakpoint privilege level %d\n", privilege); 160 return -EINVAL; 161 } 162 } 163 NOKPROBE_SYMBOL(debug_exception_level); 164 165 enum hw_breakpoint_ops { 166 HW_BREAKPOINT_INSTALL, 167 HW_BREAKPOINT_UNINSTALL, 168 HW_BREAKPOINT_RESTORE 169 }; 170 171 static int is_compat_bp(struct perf_event *bp) 172 { 173 struct task_struct *tsk = bp->hw.target; 174 175 /* 176 * tsk can be NULL for per-cpu (non-ptrace) breakpoints. 177 * In this case, use the native interface, since we don't have 178 * the notion of a "compat CPU" and could end up relying on 179 * deprecated behaviour if we use unaligned watchpoints in 180 * AArch64 state. 181 */ 182 return tsk && is_compat_thread(task_thread_info(tsk)); 183 } 184 185 /** 186 * hw_breakpoint_slot_setup - Find and setup a perf slot according to 187 * operations 188 * 189 * @slots: pointer to array of slots 190 * @max_slots: max number of slots 191 * @bp: perf_event to setup 192 * @ops: operation to be carried out on the slot 193 * 194 * Return: 195 * slot index on success 196 * -ENOSPC if no slot is available/matches 197 * -EINVAL on wrong operations parameter 198 */ 199 static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots, 200 struct perf_event *bp, 201 enum hw_breakpoint_ops ops) 202 { 203 int i; 204 struct perf_event **slot; 205 206 for (i = 0; i < max_slots; ++i) { 207 slot = &slots[i]; 208 switch (ops) { 209 case HW_BREAKPOINT_INSTALL: 210 if (!*slot) { 211 *slot = bp; 212 return i; 213 } 214 break; 215 case HW_BREAKPOINT_UNINSTALL: 216 if (*slot == bp) { 217 *slot = NULL; 218 return i; 219 } 220 break; 221 case HW_BREAKPOINT_RESTORE: 222 if (*slot == bp) 223 return i; 224 break; 225 default: 226 pr_warn_once("Unhandled hw breakpoint ops %d\n", ops); 227 return -EINVAL; 228 } 229 } 230 return -ENOSPC; 231 } 232 233 static int hw_breakpoint_control(struct perf_event *bp, 234 enum hw_breakpoint_ops ops) 235 { 236 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 237 struct perf_event **slots; 238 struct debug_info *debug_info = ¤t->thread.debug; 239 int i, max_slots, ctrl_reg, val_reg, reg_enable; 240 enum dbg_active_el dbg_el = debug_exception_level(info->ctrl.privilege); 241 u32 ctrl; 242 243 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { 244 /* Breakpoint */ 245 ctrl_reg = AARCH64_DBG_REG_BCR; 246 val_reg = AARCH64_DBG_REG_BVR; 247 slots = this_cpu_ptr(bp_on_reg); 248 max_slots = core_num_brps; 249 reg_enable = !debug_info->bps_disabled; 250 } else { 251 /* Watchpoint */ 252 ctrl_reg = AARCH64_DBG_REG_WCR; 253 val_reg = AARCH64_DBG_REG_WVR; 254 slots = this_cpu_ptr(wp_on_reg); 255 max_slots = core_num_wrps; 256 reg_enable = !debug_info->wps_disabled; 257 } 258 259 i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops); 260 261 if (WARN_ONCE(i < 0, "Can't find any breakpoint slot")) 262 return i; 263 264 switch (ops) { 265 case HW_BREAKPOINT_INSTALL: 266 /* 267 * Ensure debug monitors are enabled at the correct exception 268 * level. 269 */ 270 enable_debug_monitors(dbg_el); 271 /* Fall through */ 272 case HW_BREAKPOINT_RESTORE: 273 /* Setup the address register. */ 274 write_wb_reg(val_reg, i, info->address); 275 276 /* Setup the control register. */ 277 ctrl = encode_ctrl_reg(info->ctrl); 278 write_wb_reg(ctrl_reg, i, 279 reg_enable ? ctrl | 0x1 : ctrl & ~0x1); 280 break; 281 case HW_BREAKPOINT_UNINSTALL: 282 /* Reset the control register. */ 283 write_wb_reg(ctrl_reg, i, 0); 284 285 /* 286 * Release the debug monitors for the correct exception 287 * level. 288 */ 289 disable_debug_monitors(dbg_el); 290 break; 291 } 292 293 return 0; 294 } 295 296 /* 297 * Install a perf counter breakpoint. 298 */ 299 int arch_install_hw_breakpoint(struct perf_event *bp) 300 { 301 return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL); 302 } 303 304 void arch_uninstall_hw_breakpoint(struct perf_event *bp) 305 { 306 hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL); 307 } 308 309 static int get_hbp_len(u8 hbp_len) 310 { 311 unsigned int len_in_bytes = 0; 312 313 switch (hbp_len) { 314 case ARM_BREAKPOINT_LEN_1: 315 len_in_bytes = 1; 316 break; 317 case ARM_BREAKPOINT_LEN_2: 318 len_in_bytes = 2; 319 break; 320 case ARM_BREAKPOINT_LEN_4: 321 len_in_bytes = 4; 322 break; 323 case ARM_BREAKPOINT_LEN_8: 324 len_in_bytes = 8; 325 break; 326 } 327 328 return len_in_bytes; 329 } 330 331 /* 332 * Check whether bp virtual address is in kernel space. 333 */ 334 int arch_check_bp_in_kernelspace(struct perf_event *bp) 335 { 336 unsigned int len; 337 unsigned long va; 338 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 339 340 va = info->address; 341 len = get_hbp_len(info->ctrl.len); 342 343 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); 344 } 345 346 /* 347 * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl. 348 * Hopefully this will disappear when ptrace can bypass the conversion 349 * to generic breakpoint descriptions. 350 */ 351 int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, 352 int *gen_len, int *gen_type) 353 { 354 /* Type */ 355 switch (ctrl.type) { 356 case ARM_BREAKPOINT_EXECUTE: 357 *gen_type = HW_BREAKPOINT_X; 358 break; 359 case ARM_BREAKPOINT_LOAD: 360 *gen_type = HW_BREAKPOINT_R; 361 break; 362 case ARM_BREAKPOINT_STORE: 363 *gen_type = HW_BREAKPOINT_W; 364 break; 365 case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE: 366 *gen_type = HW_BREAKPOINT_RW; 367 break; 368 default: 369 return -EINVAL; 370 } 371 372 /* Len */ 373 switch (ctrl.len) { 374 case ARM_BREAKPOINT_LEN_1: 375 *gen_len = HW_BREAKPOINT_LEN_1; 376 break; 377 case ARM_BREAKPOINT_LEN_2: 378 *gen_len = HW_BREAKPOINT_LEN_2; 379 break; 380 case ARM_BREAKPOINT_LEN_4: 381 *gen_len = HW_BREAKPOINT_LEN_4; 382 break; 383 case ARM_BREAKPOINT_LEN_8: 384 *gen_len = HW_BREAKPOINT_LEN_8; 385 break; 386 default: 387 return -EINVAL; 388 } 389 390 return 0; 391 } 392 393 /* 394 * Construct an arch_hw_breakpoint from a perf_event. 395 */ 396 static int arch_build_bp_info(struct perf_event *bp) 397 { 398 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 399 400 /* Type */ 401 switch (bp->attr.bp_type) { 402 case HW_BREAKPOINT_X: 403 info->ctrl.type = ARM_BREAKPOINT_EXECUTE; 404 break; 405 case HW_BREAKPOINT_R: 406 info->ctrl.type = ARM_BREAKPOINT_LOAD; 407 break; 408 case HW_BREAKPOINT_W: 409 info->ctrl.type = ARM_BREAKPOINT_STORE; 410 break; 411 case HW_BREAKPOINT_RW: 412 info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; 413 break; 414 default: 415 return -EINVAL; 416 } 417 418 /* Len */ 419 switch (bp->attr.bp_len) { 420 case HW_BREAKPOINT_LEN_1: 421 info->ctrl.len = ARM_BREAKPOINT_LEN_1; 422 break; 423 case HW_BREAKPOINT_LEN_2: 424 info->ctrl.len = ARM_BREAKPOINT_LEN_2; 425 break; 426 case HW_BREAKPOINT_LEN_4: 427 info->ctrl.len = ARM_BREAKPOINT_LEN_4; 428 break; 429 case HW_BREAKPOINT_LEN_8: 430 info->ctrl.len = ARM_BREAKPOINT_LEN_8; 431 break; 432 default: 433 return -EINVAL; 434 } 435 436 /* 437 * On AArch64, we only permit breakpoints of length 4, whereas 438 * AArch32 also requires breakpoints of length 2 for Thumb. 439 * Watchpoints can be of length 1, 2, 4 or 8 bytes. 440 */ 441 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { 442 if (is_compat_bp(bp)) { 443 if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 && 444 info->ctrl.len != ARM_BREAKPOINT_LEN_4) 445 return -EINVAL; 446 } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) { 447 /* 448 * FIXME: Some tools (I'm looking at you perf) assume 449 * that breakpoints should be sizeof(long). This 450 * is nonsense. For now, we fix up the parameter 451 * but we should probably return -EINVAL instead. 452 */ 453 info->ctrl.len = ARM_BREAKPOINT_LEN_4; 454 } 455 } 456 457 /* Address */ 458 info->address = bp->attr.bp_addr; 459 460 /* 461 * Privilege 462 * Note that we disallow combined EL0/EL1 breakpoints because 463 * that would complicate the stepping code. 464 */ 465 if (arch_check_bp_in_kernelspace(bp)) 466 info->ctrl.privilege = AARCH64_BREAKPOINT_EL1; 467 else 468 info->ctrl.privilege = AARCH64_BREAKPOINT_EL0; 469 470 /* Enabled? */ 471 info->ctrl.enabled = !bp->attr.disabled; 472 473 return 0; 474 } 475 476 /* 477 * Validate the arch-specific HW Breakpoint register settings. 478 */ 479 int arch_validate_hwbkpt_settings(struct perf_event *bp) 480 { 481 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 482 int ret; 483 u64 alignment_mask, offset; 484 485 /* Build the arch_hw_breakpoint. */ 486 ret = arch_build_bp_info(bp); 487 if (ret) 488 return ret; 489 490 /* 491 * Check address alignment. 492 * We don't do any clever alignment correction for watchpoints 493 * because using 64-bit unaligned addresses is deprecated for 494 * AArch64. 495 * 496 * AArch32 tasks expect some simple alignment fixups, so emulate 497 * that here. 498 */ 499 if (is_compat_bp(bp)) { 500 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) 501 alignment_mask = 0x7; 502 else 503 alignment_mask = 0x3; 504 offset = info->address & alignment_mask; 505 switch (offset) { 506 case 0: 507 /* Aligned */ 508 break; 509 case 1: 510 /* Allow single byte watchpoint. */ 511 if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) 512 break; 513 case 2: 514 /* Allow halfword watchpoints and breakpoints. */ 515 if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) 516 break; 517 default: 518 return -EINVAL; 519 } 520 521 info->address &= ~alignment_mask; 522 info->ctrl.len <<= offset; 523 } else { 524 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) 525 alignment_mask = 0x3; 526 else 527 alignment_mask = 0x7; 528 if (info->address & alignment_mask) 529 return -EINVAL; 530 } 531 532 /* 533 * Disallow per-task kernel breakpoints since these would 534 * complicate the stepping code. 535 */ 536 if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target) 537 return -EINVAL; 538 539 return 0; 540 } 541 542 /* 543 * Enable/disable all of the breakpoints active at the specified 544 * exception level at the register level. 545 * This is used when single-stepping after a breakpoint exception. 546 */ 547 static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable) 548 { 549 int i, max_slots, privilege; 550 u32 ctrl; 551 struct perf_event **slots; 552 553 switch (reg) { 554 case AARCH64_DBG_REG_BCR: 555 slots = this_cpu_ptr(bp_on_reg); 556 max_slots = core_num_brps; 557 break; 558 case AARCH64_DBG_REG_WCR: 559 slots = this_cpu_ptr(wp_on_reg); 560 max_slots = core_num_wrps; 561 break; 562 default: 563 return; 564 } 565 566 for (i = 0; i < max_slots; ++i) { 567 if (!slots[i]) 568 continue; 569 570 privilege = counter_arch_bp(slots[i])->ctrl.privilege; 571 if (debug_exception_level(privilege) != el) 572 continue; 573 574 ctrl = read_wb_reg(reg, i); 575 if (enable) 576 ctrl |= 0x1; 577 else 578 ctrl &= ~0x1; 579 write_wb_reg(reg, i, ctrl); 580 } 581 } 582 NOKPROBE_SYMBOL(toggle_bp_registers); 583 584 /* 585 * Debug exception handlers. 586 */ 587 static int breakpoint_handler(unsigned long unused, unsigned int esr, 588 struct pt_regs *regs) 589 { 590 int i, step = 0, *kernel_step; 591 u32 ctrl_reg; 592 u64 addr, val; 593 struct perf_event *bp, **slots; 594 struct debug_info *debug_info; 595 struct arch_hw_breakpoint_ctrl ctrl; 596 597 slots = this_cpu_ptr(bp_on_reg); 598 addr = instruction_pointer(regs); 599 debug_info = ¤t->thread.debug; 600 601 for (i = 0; i < core_num_brps; ++i) { 602 rcu_read_lock(); 603 604 bp = slots[i]; 605 606 if (bp == NULL) 607 goto unlock; 608 609 /* Check if the breakpoint value matches. */ 610 val = read_wb_reg(AARCH64_DBG_REG_BVR, i); 611 if (val != (addr & ~0x3)) 612 goto unlock; 613 614 /* Possible match, check the byte address select to confirm. */ 615 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i); 616 decode_ctrl_reg(ctrl_reg, &ctrl); 617 if (!((1 << (addr & 0x3)) & ctrl.len)) 618 goto unlock; 619 620 counter_arch_bp(bp)->trigger = addr; 621 perf_bp_event(bp, regs); 622 623 /* Do we need to handle the stepping? */ 624 if (is_default_overflow_handler(bp)) 625 step = 1; 626 unlock: 627 rcu_read_unlock(); 628 } 629 630 if (!step) 631 return 0; 632 633 if (user_mode(regs)) { 634 debug_info->bps_disabled = 1; 635 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0); 636 637 /* If we're already stepping a watchpoint, just return. */ 638 if (debug_info->wps_disabled) 639 return 0; 640 641 if (test_thread_flag(TIF_SINGLESTEP)) 642 debug_info->suspended_step = 1; 643 else 644 user_enable_single_step(current); 645 } else { 646 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0); 647 kernel_step = this_cpu_ptr(&stepping_kernel_bp); 648 649 if (*kernel_step != ARM_KERNEL_STEP_NONE) 650 return 0; 651 652 if (kernel_active_single_step()) { 653 *kernel_step = ARM_KERNEL_STEP_SUSPEND; 654 } else { 655 *kernel_step = ARM_KERNEL_STEP_ACTIVE; 656 kernel_enable_single_step(regs); 657 } 658 } 659 660 return 0; 661 } 662 NOKPROBE_SYMBOL(breakpoint_handler); 663 664 static int watchpoint_handler(unsigned long addr, unsigned int esr, 665 struct pt_regs *regs) 666 { 667 int i, step = 0, *kernel_step, access; 668 u32 ctrl_reg; 669 u64 val, alignment_mask; 670 struct perf_event *wp, **slots; 671 struct debug_info *debug_info; 672 struct arch_hw_breakpoint *info; 673 struct arch_hw_breakpoint_ctrl ctrl; 674 675 slots = this_cpu_ptr(wp_on_reg); 676 debug_info = ¤t->thread.debug; 677 678 for (i = 0; i < core_num_wrps; ++i) { 679 rcu_read_lock(); 680 681 wp = slots[i]; 682 683 if (wp == NULL) 684 goto unlock; 685 686 info = counter_arch_bp(wp); 687 /* AArch32 watchpoints are either 4 or 8 bytes aligned. */ 688 if (is_compat_task()) { 689 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) 690 alignment_mask = 0x7; 691 else 692 alignment_mask = 0x3; 693 } else { 694 alignment_mask = 0x7; 695 } 696 697 /* Check if the watchpoint value matches. */ 698 val = read_wb_reg(AARCH64_DBG_REG_WVR, i); 699 if (val != (addr & ~alignment_mask)) 700 goto unlock; 701 702 /* Possible match, check the byte address select to confirm. */ 703 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i); 704 decode_ctrl_reg(ctrl_reg, &ctrl); 705 if (!((1 << (addr & alignment_mask)) & ctrl.len)) 706 goto unlock; 707 708 /* 709 * Check that the access type matches. 710 * 0 => load, otherwise => store 711 */ 712 access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W : 713 HW_BREAKPOINT_R; 714 if (!(access & hw_breakpoint_type(wp))) 715 goto unlock; 716 717 info->trigger = addr; 718 perf_bp_event(wp, regs); 719 720 /* Do we need to handle the stepping? */ 721 if (is_default_overflow_handler(wp)) 722 step = 1; 723 724 unlock: 725 rcu_read_unlock(); 726 } 727 728 if (!step) 729 return 0; 730 731 /* 732 * We always disable EL0 watchpoints because the kernel can 733 * cause these to fire via an unprivileged access. 734 */ 735 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 0); 736 737 if (user_mode(regs)) { 738 debug_info->wps_disabled = 1; 739 740 /* If we're already stepping a breakpoint, just return. */ 741 if (debug_info->bps_disabled) 742 return 0; 743 744 if (test_thread_flag(TIF_SINGLESTEP)) 745 debug_info->suspended_step = 1; 746 else 747 user_enable_single_step(current); 748 } else { 749 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0); 750 kernel_step = this_cpu_ptr(&stepping_kernel_bp); 751 752 if (*kernel_step != ARM_KERNEL_STEP_NONE) 753 return 0; 754 755 if (kernel_active_single_step()) { 756 *kernel_step = ARM_KERNEL_STEP_SUSPEND; 757 } else { 758 *kernel_step = ARM_KERNEL_STEP_ACTIVE; 759 kernel_enable_single_step(regs); 760 } 761 } 762 763 return 0; 764 } 765 NOKPROBE_SYMBOL(watchpoint_handler); 766 767 /* 768 * Handle single-step exception. 769 */ 770 int reinstall_suspended_bps(struct pt_regs *regs) 771 { 772 struct debug_info *debug_info = ¤t->thread.debug; 773 int handled_exception = 0, *kernel_step; 774 775 kernel_step = this_cpu_ptr(&stepping_kernel_bp); 776 777 /* 778 * Called from single-step exception handler. 779 * Return 0 if execution can resume, 1 if a SIGTRAP should be 780 * reported. 781 */ 782 if (user_mode(regs)) { 783 if (debug_info->bps_disabled) { 784 debug_info->bps_disabled = 0; 785 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1); 786 handled_exception = 1; 787 } 788 789 if (debug_info->wps_disabled) { 790 debug_info->wps_disabled = 0; 791 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1); 792 handled_exception = 1; 793 } 794 795 if (handled_exception) { 796 if (debug_info->suspended_step) { 797 debug_info->suspended_step = 0; 798 /* Allow exception handling to fall-through. */ 799 handled_exception = 0; 800 } else { 801 user_disable_single_step(current); 802 } 803 } 804 } else if (*kernel_step != ARM_KERNEL_STEP_NONE) { 805 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 1); 806 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 1); 807 808 if (!debug_info->wps_disabled) 809 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1); 810 811 if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) { 812 kernel_disable_single_step(); 813 handled_exception = 1; 814 } else { 815 handled_exception = 0; 816 } 817 818 *kernel_step = ARM_KERNEL_STEP_NONE; 819 } 820 821 return !handled_exception; 822 } 823 NOKPROBE_SYMBOL(reinstall_suspended_bps); 824 825 /* 826 * Context-switcher for restoring suspended breakpoints. 827 */ 828 void hw_breakpoint_thread_switch(struct task_struct *next) 829 { 830 /* 831 * current next 832 * disabled: 0 0 => The usual case, NOTIFY_DONE 833 * 0 1 => Disable the registers 834 * 1 0 => Enable the registers 835 * 1 1 => NOTIFY_DONE. per-task bps will 836 * get taken care of by perf. 837 */ 838 839 struct debug_info *current_debug_info, *next_debug_info; 840 841 current_debug_info = ¤t->thread.debug; 842 next_debug_info = &next->thread.debug; 843 844 /* Update breakpoints. */ 845 if (current_debug_info->bps_disabled != next_debug_info->bps_disabled) 846 toggle_bp_registers(AARCH64_DBG_REG_BCR, 847 DBG_ACTIVE_EL0, 848 !next_debug_info->bps_disabled); 849 850 /* Update watchpoints. */ 851 if (current_debug_info->wps_disabled != next_debug_info->wps_disabled) 852 toggle_bp_registers(AARCH64_DBG_REG_WCR, 853 DBG_ACTIVE_EL0, 854 !next_debug_info->wps_disabled); 855 } 856 857 /* 858 * CPU initialisation. 859 */ 860 static void hw_breakpoint_reset(void *unused) 861 { 862 int i; 863 struct perf_event **slots; 864 /* 865 * When a CPU goes through cold-boot, it does not have any installed 866 * slot, so it is safe to share the same function for restoring and 867 * resetting breakpoints; when a CPU is hotplugged in, it goes 868 * through the slots, which are all empty, hence it just resets control 869 * and value for debug registers. 870 * When this function is triggered on warm-boot through a CPU PM 871 * notifier some slots might be initialized; if so they are 872 * reprogrammed according to the debug slots content. 873 */ 874 for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) { 875 if (slots[i]) { 876 hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE); 877 } else { 878 write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL); 879 write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL); 880 } 881 } 882 883 for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) { 884 if (slots[i]) { 885 hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE); 886 } else { 887 write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL); 888 write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL); 889 } 890 } 891 } 892 893 static int hw_breakpoint_reset_notify(struct notifier_block *self, 894 unsigned long action, 895 void *hcpu) 896 { 897 if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE) { 898 local_irq_disable(); 899 hw_breakpoint_reset(NULL); 900 local_irq_enable(); 901 } 902 return NOTIFY_OK; 903 } 904 905 static struct notifier_block hw_breakpoint_reset_nb = { 906 .notifier_call = hw_breakpoint_reset_notify, 907 }; 908 909 #ifdef CONFIG_CPU_PM 910 extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)); 911 #else 912 static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) 913 { 914 } 915 #endif 916 917 /* 918 * One-time initialisation. 919 */ 920 static int __init arch_hw_breakpoint_init(void) 921 { 922 core_num_brps = get_num_brps(); 923 core_num_wrps = get_num_wrps(); 924 925 pr_info("found %d breakpoint and %d watchpoint registers.\n", 926 core_num_brps, core_num_wrps); 927 928 cpu_notifier_register_begin(); 929 930 /* 931 * Reset the breakpoint resources. We assume that a halting 932 * debugger will leave the world in a nice state for us. 933 */ 934 smp_call_function(hw_breakpoint_reset, NULL, 1); 935 hw_breakpoint_reset(NULL); 936 937 /* Register debug fault handlers. */ 938 hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP, 939 TRAP_HWBKPT, "hw-breakpoint handler"); 940 hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP, 941 TRAP_HWBKPT, "hw-watchpoint handler"); 942 943 /* Register hotplug notifier. */ 944 __register_cpu_notifier(&hw_breakpoint_reset_nb); 945 946 cpu_notifier_register_done(); 947 948 /* Register cpu_suspend hw breakpoint restore hook */ 949 cpu_suspend_set_dbg_restorer(hw_breakpoint_reset); 950 951 return 0; 952 } 953 arch_initcall(arch_hw_breakpoint_init); 954 955 void hw_breakpoint_pmu_read(struct perf_event *bp) 956 { 957 } 958 959 /* 960 * Dummy function to register with die_notifier. 961 */ 962 int hw_breakpoint_exceptions_notify(struct notifier_block *unused, 963 unsigned long val, void *data) 964 { 965 return NOTIFY_DONE; 966 } 967