1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License version 2 as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 14 * 15 * Copyright (C) 2009, 2010 ARM Limited 16 * 17 * Author: Will Deacon <will.deacon@arm.com> 18 */ 19 20 /* 21 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, 22 * using the CPU's debug registers. 23 */ 24 #define pr_fmt(fmt) "hw-breakpoint: " fmt 25 26 #include <linux/errno.h> 27 #include <linux/hardirq.h> 28 #include <linux/perf_event.h> 29 #include <linux/hw_breakpoint.h> 30 #include <linux/smp.h> 31 32 #include <asm/cacheflush.h> 33 #include <asm/cputype.h> 34 #include <asm/current.h> 35 #include <asm/hw_breakpoint.h> 36 #include <asm/kdebug.h> 37 #include <asm/traps.h> 38 39 /* Breakpoint currently in use for each BRP. */ 40 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); 41 42 /* Watchpoint currently in use for each WRP. */ 43 static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]); 44 45 /* Number of BRP/WRP registers on this CPU. */ 46 static int core_num_brps; 47 static int core_num_wrps; 48 49 /* Debug architecture version. */ 50 static u8 debug_arch; 51 52 /* Maximum supported watchpoint length. */ 53 static u8 max_watchpoint_len; 54 55 #define READ_WB_REG_CASE(OP2, M, VAL) \ 56 case ((OP2 << 4) + M): \ 57 ARM_DBG_READ(c ## M, OP2, VAL); \ 58 break 59 60 #define WRITE_WB_REG_CASE(OP2, M, VAL) \ 61 case ((OP2 << 4) + M): \ 62 ARM_DBG_WRITE(c ## M, OP2, VAL);\ 63 break 64 65 #define GEN_READ_WB_REG_CASES(OP2, VAL) \ 66 READ_WB_REG_CASE(OP2, 0, VAL); \ 67 READ_WB_REG_CASE(OP2, 1, VAL); \ 68 READ_WB_REG_CASE(OP2, 2, VAL); \ 69 READ_WB_REG_CASE(OP2, 3, VAL); \ 70 READ_WB_REG_CASE(OP2, 4, VAL); \ 71 READ_WB_REG_CASE(OP2, 5, VAL); \ 72 READ_WB_REG_CASE(OP2, 6, VAL); \ 73 READ_WB_REG_CASE(OP2, 7, VAL); \ 74 READ_WB_REG_CASE(OP2, 8, VAL); \ 75 READ_WB_REG_CASE(OP2, 9, VAL); \ 76 READ_WB_REG_CASE(OP2, 10, VAL); \ 77 READ_WB_REG_CASE(OP2, 11, VAL); \ 78 READ_WB_REG_CASE(OP2, 12, VAL); \ 79 READ_WB_REG_CASE(OP2, 13, VAL); \ 80 READ_WB_REG_CASE(OP2, 14, VAL); \ 81 READ_WB_REG_CASE(OP2, 15, VAL) 82 83 #define GEN_WRITE_WB_REG_CASES(OP2, VAL) \ 84 WRITE_WB_REG_CASE(OP2, 0, VAL); \ 85 WRITE_WB_REG_CASE(OP2, 1, VAL); \ 86 WRITE_WB_REG_CASE(OP2, 2, VAL); \ 87 WRITE_WB_REG_CASE(OP2, 3, VAL); \ 88 WRITE_WB_REG_CASE(OP2, 4, VAL); \ 89 WRITE_WB_REG_CASE(OP2, 5, VAL); \ 90 WRITE_WB_REG_CASE(OP2, 6, VAL); \ 91 WRITE_WB_REG_CASE(OP2, 7, VAL); \ 92 WRITE_WB_REG_CASE(OP2, 8, VAL); \ 93 WRITE_WB_REG_CASE(OP2, 9, VAL); \ 94 WRITE_WB_REG_CASE(OP2, 10, VAL); \ 95 WRITE_WB_REG_CASE(OP2, 11, VAL); \ 96 WRITE_WB_REG_CASE(OP2, 12, VAL); \ 97 WRITE_WB_REG_CASE(OP2, 13, VAL); \ 98 WRITE_WB_REG_CASE(OP2, 14, VAL); \ 99 WRITE_WB_REG_CASE(OP2, 15, VAL) 100 101 static u32 read_wb_reg(int n) 102 { 103 u32 val = 0; 104 105 switch (n) { 106 GEN_READ_WB_REG_CASES(ARM_OP2_BVR, val); 107 GEN_READ_WB_REG_CASES(ARM_OP2_BCR, val); 108 GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val); 109 GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val); 110 default: 111 pr_warning("attempt to read from unknown breakpoint " 112 "register %d\n", n); 113 } 114 115 return val; 116 } 117 118 static void write_wb_reg(int n, u32 val) 119 { 120 switch (n) { 121 GEN_WRITE_WB_REG_CASES(ARM_OP2_BVR, val); 122 GEN_WRITE_WB_REG_CASES(ARM_OP2_BCR, val); 123 GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val); 124 GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val); 125 default: 126 pr_warning("attempt to write to unknown breakpoint " 127 "register %d\n", n); 128 } 129 isb(); 130 } 131 132 /* Determine debug architecture. */ 133 static u8 get_debug_arch(void) 134 { 135 u32 didr; 136 137 /* Do we implement the extended CPUID interface? */ 138 if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { 139 pr_warning("CPUID feature registers not supported. " 140 "Assuming v6 debug is present.\n"); 141 return ARM_DEBUG_ARCH_V6; 142 } 143 144 ARM_DBG_READ(c0, 0, didr); 145 return (didr >> 16) & 0xf; 146 } 147 148 u8 arch_get_debug_arch(void) 149 { 150 return debug_arch; 151 } 152 153 static int debug_arch_supported(void) 154 { 155 u8 arch = get_debug_arch(); 156 157 /* We don't support the memory-mapped interface. */ 158 return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) || 159 arch >= ARM_DEBUG_ARCH_V7_1; 160 } 161 162 /* Can we determine the watchpoint access type from the fsr? */ 163 static int debug_exception_updates_fsr(void) 164 { 165 return 0; 166 } 167 168 /* Determine number of WRP registers available. */ 169 static int get_num_wrp_resources(void) 170 { 171 u32 didr; 172 ARM_DBG_READ(c0, 0, didr); 173 return ((didr >> 28) & 0xf) + 1; 174 } 175 176 /* Determine number of BRP registers available. */ 177 static int get_num_brp_resources(void) 178 { 179 u32 didr; 180 ARM_DBG_READ(c0, 0, didr); 181 return ((didr >> 24) & 0xf) + 1; 182 } 183 184 /* Does this core support mismatch breakpoints? */ 185 static int core_has_mismatch_brps(void) 186 { 187 return (get_debug_arch() >= ARM_DEBUG_ARCH_V7_ECP14 && 188 get_num_brp_resources() > 1); 189 } 190 191 /* Determine number of usable WRPs available. */ 192 static int get_num_wrps(void) 193 { 194 /* 195 * On debug architectures prior to 7.1, when a watchpoint fires, the 196 * only way to work out which watchpoint it was is by disassembling 197 * the faulting instruction and working out the address of the memory 198 * access. 199 * 200 * Furthermore, we can only do this if the watchpoint was precise 201 * since imprecise watchpoints prevent us from calculating register 202 * based addresses. 203 * 204 * Providing we have more than 1 breakpoint register, we only report 205 * a single watchpoint register for the time being. This way, we always 206 * know which watchpoint fired. In the future we can either add a 207 * disassembler and address generation emulator, or we can insert a 208 * check to see if the DFAR is set on watchpoint exception entry 209 * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows 210 * that it is set on some implementations]. 211 */ 212 if (get_debug_arch() < ARM_DEBUG_ARCH_V7_1) 213 return 1; 214 215 return get_num_wrp_resources(); 216 } 217 218 /* Determine number of usable BRPs available. */ 219 static int get_num_brps(void) 220 { 221 int brps = get_num_brp_resources(); 222 return core_has_mismatch_brps() ? brps - 1 : brps; 223 } 224 225 /* 226 * In order to access the breakpoint/watchpoint control registers, 227 * we must be running in debug monitor mode. Unfortunately, we can 228 * be put into halting debug mode at any time by an external debugger 229 * but there is nothing we can do to prevent that. 230 */ 231 static int enable_monitor_mode(void) 232 { 233 u32 dscr; 234 int ret = 0; 235 236 ARM_DBG_READ(c1, 0, dscr); 237 238 /* Ensure that halting mode is disabled. */ 239 if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN, 240 "halting debug mode enabled. Unable to access hardware resources.\n")) { 241 ret = -EPERM; 242 goto out; 243 } 244 245 /* If monitor mode is already enabled, just return. */ 246 if (dscr & ARM_DSCR_MDBGEN) 247 goto out; 248 249 /* Write to the corresponding DSCR. */ 250 switch (get_debug_arch()) { 251 case ARM_DEBUG_ARCH_V6: 252 case ARM_DEBUG_ARCH_V6_1: 253 ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN)); 254 break; 255 case ARM_DEBUG_ARCH_V7_ECP14: 256 case ARM_DEBUG_ARCH_V7_1: 257 ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN)); 258 break; 259 default: 260 ret = -ENODEV; 261 goto out; 262 } 263 264 /* Check that the write made it through. */ 265 ARM_DBG_READ(c1, 0, dscr); 266 if (!(dscr & ARM_DSCR_MDBGEN)) 267 ret = -EPERM; 268 269 out: 270 return ret; 271 } 272 273 int hw_breakpoint_slots(int type) 274 { 275 if (!debug_arch_supported()) 276 return 0; 277 278 /* 279 * We can be called early, so don't rely on 280 * our static variables being initialised. 281 */ 282 switch (type) { 283 case TYPE_INST: 284 return get_num_brps(); 285 case TYPE_DATA: 286 return get_num_wrps(); 287 default: 288 pr_warning("unknown slot type: %d\n", type); 289 return 0; 290 } 291 } 292 293 /* 294 * Check if 8-bit byte-address select is available. 295 * This clobbers WRP 0. 296 */ 297 static u8 get_max_wp_len(void) 298 { 299 u32 ctrl_reg; 300 struct arch_hw_breakpoint_ctrl ctrl; 301 u8 size = 4; 302 303 if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14) 304 goto out; 305 306 memset(&ctrl, 0, sizeof(ctrl)); 307 ctrl.len = ARM_BREAKPOINT_LEN_8; 308 ctrl_reg = encode_ctrl_reg(ctrl); 309 310 write_wb_reg(ARM_BASE_WVR, 0); 311 write_wb_reg(ARM_BASE_WCR, ctrl_reg); 312 if ((read_wb_reg(ARM_BASE_WCR) & ctrl_reg) == ctrl_reg) 313 size = 8; 314 315 out: 316 return size; 317 } 318 319 u8 arch_get_max_wp_len(void) 320 { 321 return max_watchpoint_len; 322 } 323 324 /* 325 * Install a perf counter breakpoint. 326 */ 327 int arch_install_hw_breakpoint(struct perf_event *bp) 328 { 329 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 330 struct perf_event **slot, **slots; 331 int i, max_slots, ctrl_base, val_base, ret = 0; 332 u32 addr, ctrl; 333 334 /* Ensure that we are in monitor mode and halting mode is disabled. */ 335 ret = enable_monitor_mode(); 336 if (ret) 337 goto out; 338 339 addr = info->address; 340 ctrl = encode_ctrl_reg(info->ctrl) | 0x1; 341 342 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { 343 /* Breakpoint */ 344 ctrl_base = ARM_BASE_BCR; 345 val_base = ARM_BASE_BVR; 346 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 347 max_slots = core_num_brps; 348 } else { 349 /* Watchpoint */ 350 ctrl_base = ARM_BASE_WCR; 351 val_base = ARM_BASE_WVR; 352 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 353 max_slots = core_num_wrps; 354 } 355 356 for (i = 0; i < max_slots; ++i) { 357 slot = &slots[i]; 358 359 if (!*slot) { 360 *slot = bp; 361 break; 362 } 363 } 364 365 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) { 366 ret = -EBUSY; 367 goto out; 368 } 369 370 /* Override the breakpoint data with the step data. */ 371 if (info->step_ctrl.enabled) { 372 addr = info->trigger & ~0x3; 373 ctrl = encode_ctrl_reg(info->step_ctrl); 374 if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE) { 375 i = 0; 376 ctrl_base = ARM_BASE_BCR + core_num_brps; 377 val_base = ARM_BASE_BVR + core_num_brps; 378 } 379 } 380 381 /* Setup the address register. */ 382 write_wb_reg(val_base + i, addr); 383 384 /* Setup the control register. */ 385 write_wb_reg(ctrl_base + i, ctrl); 386 387 out: 388 return ret; 389 } 390 391 void arch_uninstall_hw_breakpoint(struct perf_event *bp) 392 { 393 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 394 struct perf_event **slot, **slots; 395 int i, max_slots, base; 396 397 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { 398 /* Breakpoint */ 399 base = ARM_BASE_BCR; 400 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 401 max_slots = core_num_brps; 402 } else { 403 /* Watchpoint */ 404 base = ARM_BASE_WCR; 405 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 406 max_slots = core_num_wrps; 407 } 408 409 /* Remove the breakpoint. */ 410 for (i = 0; i < max_slots; ++i) { 411 slot = &slots[i]; 412 413 if (*slot == bp) { 414 *slot = NULL; 415 break; 416 } 417 } 418 419 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) 420 return; 421 422 /* Ensure that we disable the mismatch breakpoint. */ 423 if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE && 424 info->step_ctrl.enabled) { 425 i = 0; 426 base = ARM_BASE_BCR + core_num_brps; 427 } 428 429 /* Reset the control register. */ 430 write_wb_reg(base + i, 0); 431 } 432 433 static int get_hbp_len(u8 hbp_len) 434 { 435 unsigned int len_in_bytes = 0; 436 437 switch (hbp_len) { 438 case ARM_BREAKPOINT_LEN_1: 439 len_in_bytes = 1; 440 break; 441 case ARM_BREAKPOINT_LEN_2: 442 len_in_bytes = 2; 443 break; 444 case ARM_BREAKPOINT_LEN_4: 445 len_in_bytes = 4; 446 break; 447 case ARM_BREAKPOINT_LEN_8: 448 len_in_bytes = 8; 449 break; 450 } 451 452 return len_in_bytes; 453 } 454 455 /* 456 * Check whether bp virtual address is in kernel space. 457 */ 458 int arch_check_bp_in_kernelspace(struct perf_event *bp) 459 { 460 unsigned int len; 461 unsigned long va; 462 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 463 464 va = info->address; 465 len = get_hbp_len(info->ctrl.len); 466 467 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); 468 } 469 470 /* 471 * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl. 472 * Hopefully this will disappear when ptrace can bypass the conversion 473 * to generic breakpoint descriptions. 474 */ 475 int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, 476 int *gen_len, int *gen_type) 477 { 478 /* Type */ 479 switch (ctrl.type) { 480 case ARM_BREAKPOINT_EXECUTE: 481 *gen_type = HW_BREAKPOINT_X; 482 break; 483 case ARM_BREAKPOINT_LOAD: 484 *gen_type = HW_BREAKPOINT_R; 485 break; 486 case ARM_BREAKPOINT_STORE: 487 *gen_type = HW_BREAKPOINT_W; 488 break; 489 case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE: 490 *gen_type = HW_BREAKPOINT_RW; 491 break; 492 default: 493 return -EINVAL; 494 } 495 496 /* Len */ 497 switch (ctrl.len) { 498 case ARM_BREAKPOINT_LEN_1: 499 *gen_len = HW_BREAKPOINT_LEN_1; 500 break; 501 case ARM_BREAKPOINT_LEN_2: 502 *gen_len = HW_BREAKPOINT_LEN_2; 503 break; 504 case ARM_BREAKPOINT_LEN_4: 505 *gen_len = HW_BREAKPOINT_LEN_4; 506 break; 507 case ARM_BREAKPOINT_LEN_8: 508 *gen_len = HW_BREAKPOINT_LEN_8; 509 break; 510 default: 511 return -EINVAL; 512 } 513 514 return 0; 515 } 516 517 /* 518 * Construct an arch_hw_breakpoint from a perf_event. 519 */ 520 static int arch_build_bp_info(struct perf_event *bp) 521 { 522 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 523 524 /* Type */ 525 switch (bp->attr.bp_type) { 526 case HW_BREAKPOINT_X: 527 info->ctrl.type = ARM_BREAKPOINT_EXECUTE; 528 break; 529 case HW_BREAKPOINT_R: 530 info->ctrl.type = ARM_BREAKPOINT_LOAD; 531 break; 532 case HW_BREAKPOINT_W: 533 info->ctrl.type = ARM_BREAKPOINT_STORE; 534 break; 535 case HW_BREAKPOINT_RW: 536 info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; 537 break; 538 default: 539 return -EINVAL; 540 } 541 542 /* Len */ 543 switch (bp->attr.bp_len) { 544 case HW_BREAKPOINT_LEN_1: 545 info->ctrl.len = ARM_BREAKPOINT_LEN_1; 546 break; 547 case HW_BREAKPOINT_LEN_2: 548 info->ctrl.len = ARM_BREAKPOINT_LEN_2; 549 break; 550 case HW_BREAKPOINT_LEN_4: 551 info->ctrl.len = ARM_BREAKPOINT_LEN_4; 552 break; 553 case HW_BREAKPOINT_LEN_8: 554 info->ctrl.len = ARM_BREAKPOINT_LEN_8; 555 if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE) 556 && max_watchpoint_len >= 8) 557 break; 558 default: 559 return -EINVAL; 560 } 561 562 /* 563 * Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes. 564 * Watchpoints can be of length 1, 2, 4 or 8 bytes if supported 565 * by the hardware and must be aligned to the appropriate number of 566 * bytes. 567 */ 568 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE && 569 info->ctrl.len != ARM_BREAKPOINT_LEN_2 && 570 info->ctrl.len != ARM_BREAKPOINT_LEN_4) 571 return -EINVAL; 572 573 /* Address */ 574 info->address = bp->attr.bp_addr; 575 576 /* Privilege */ 577 info->ctrl.privilege = ARM_BREAKPOINT_USER; 578 if (arch_check_bp_in_kernelspace(bp)) 579 info->ctrl.privilege |= ARM_BREAKPOINT_PRIV; 580 581 /* Enabled? */ 582 info->ctrl.enabled = !bp->attr.disabled; 583 584 /* Mismatch */ 585 info->ctrl.mismatch = 0; 586 587 return 0; 588 } 589 590 /* 591 * Validate the arch-specific HW Breakpoint register settings. 592 */ 593 int arch_validate_hwbkpt_settings(struct perf_event *bp) 594 { 595 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 596 int ret = 0; 597 u32 offset, alignment_mask = 0x3; 598 599 /* Build the arch_hw_breakpoint. */ 600 ret = arch_build_bp_info(bp); 601 if (ret) 602 goto out; 603 604 /* Check address alignment. */ 605 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) 606 alignment_mask = 0x7; 607 offset = info->address & alignment_mask; 608 switch (offset) { 609 case 0: 610 /* Aligned */ 611 break; 612 case 1: 613 case 2: 614 /* Allow halfword watchpoints and breakpoints. */ 615 if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) 616 break; 617 case 3: 618 /* Allow single byte watchpoint. */ 619 if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) 620 break; 621 default: 622 ret = -EINVAL; 623 goto out; 624 } 625 626 info->address &= ~alignment_mask; 627 info->ctrl.len <<= offset; 628 629 if (!bp->overflow_handler) { 630 /* 631 * Mismatch breakpoints are required for single-stepping 632 * breakpoints. 633 */ 634 if (!core_has_mismatch_brps()) 635 return -EINVAL; 636 637 /* We don't allow mismatch breakpoints in kernel space. */ 638 if (arch_check_bp_in_kernelspace(bp)) 639 return -EPERM; 640 641 /* 642 * Per-cpu breakpoints are not supported by our stepping 643 * mechanism. 644 */ 645 if (!bp->hw.bp_target) 646 return -EINVAL; 647 648 /* 649 * We only support specific access types if the fsr 650 * reports them. 651 */ 652 if (!debug_exception_updates_fsr() && 653 (info->ctrl.type == ARM_BREAKPOINT_LOAD || 654 info->ctrl.type == ARM_BREAKPOINT_STORE)) 655 return -EINVAL; 656 } 657 658 out: 659 return ret; 660 } 661 662 /* 663 * Enable/disable single-stepping over the breakpoint bp at address addr. 664 */ 665 static void enable_single_step(struct perf_event *bp, u32 addr) 666 { 667 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 668 669 arch_uninstall_hw_breakpoint(bp); 670 info->step_ctrl.mismatch = 1; 671 info->step_ctrl.len = ARM_BREAKPOINT_LEN_4; 672 info->step_ctrl.type = ARM_BREAKPOINT_EXECUTE; 673 info->step_ctrl.privilege = info->ctrl.privilege; 674 info->step_ctrl.enabled = 1; 675 info->trigger = addr; 676 arch_install_hw_breakpoint(bp); 677 } 678 679 static void disable_single_step(struct perf_event *bp) 680 { 681 arch_uninstall_hw_breakpoint(bp); 682 counter_arch_bp(bp)->step_ctrl.enabled = 0; 683 arch_install_hw_breakpoint(bp); 684 } 685 686 static void watchpoint_handler(unsigned long addr, unsigned int fsr, 687 struct pt_regs *regs) 688 { 689 int i, access; 690 u32 val, ctrl_reg, alignment_mask; 691 struct perf_event *wp, **slots; 692 struct arch_hw_breakpoint *info; 693 struct arch_hw_breakpoint_ctrl ctrl; 694 695 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 696 697 for (i = 0; i < core_num_wrps; ++i) { 698 rcu_read_lock(); 699 700 wp = slots[i]; 701 702 if (wp == NULL) 703 goto unlock; 704 705 info = counter_arch_bp(wp); 706 /* 707 * The DFAR is an unknown value on debug architectures prior 708 * to 7.1. Since we only allow a single watchpoint on these 709 * older CPUs, we can set the trigger to the lowest possible 710 * faulting address. 711 */ 712 if (debug_arch < ARM_DEBUG_ARCH_V7_1) { 713 BUG_ON(i > 0); 714 info->trigger = wp->attr.bp_addr; 715 } else { 716 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) 717 alignment_mask = 0x7; 718 else 719 alignment_mask = 0x3; 720 721 /* Check if the watchpoint value matches. */ 722 val = read_wb_reg(ARM_BASE_WVR + i); 723 if (val != (addr & ~alignment_mask)) 724 goto unlock; 725 726 /* Possible match, check the byte address select. */ 727 ctrl_reg = read_wb_reg(ARM_BASE_WCR + i); 728 decode_ctrl_reg(ctrl_reg, &ctrl); 729 if (!((1 << (addr & alignment_mask)) & ctrl.len)) 730 goto unlock; 731 732 /* Check that the access type matches. */ 733 if (debug_exception_updates_fsr()) { 734 access = (fsr & ARM_FSR_ACCESS_MASK) ? 735 HW_BREAKPOINT_W : HW_BREAKPOINT_R; 736 if (!(access & hw_breakpoint_type(wp))) 737 goto unlock; 738 } 739 740 /* We have a winner. */ 741 info->trigger = addr; 742 } 743 744 pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); 745 perf_bp_event(wp, regs); 746 747 /* 748 * If no overflow handler is present, insert a temporary 749 * mismatch breakpoint so we can single-step over the 750 * watchpoint trigger. 751 */ 752 if (!wp->overflow_handler) 753 enable_single_step(wp, instruction_pointer(regs)); 754 755 unlock: 756 rcu_read_unlock(); 757 } 758 } 759 760 static void watchpoint_single_step_handler(unsigned long pc) 761 { 762 int i; 763 struct perf_event *wp, **slots; 764 struct arch_hw_breakpoint *info; 765 766 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 767 768 for (i = 0; i < core_num_wrps; ++i) { 769 rcu_read_lock(); 770 771 wp = slots[i]; 772 773 if (wp == NULL) 774 goto unlock; 775 776 info = counter_arch_bp(wp); 777 if (!info->step_ctrl.enabled) 778 goto unlock; 779 780 /* 781 * Restore the original watchpoint if we've completed the 782 * single-step. 783 */ 784 if (info->trigger != pc) 785 disable_single_step(wp); 786 787 unlock: 788 rcu_read_unlock(); 789 } 790 } 791 792 static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) 793 { 794 int i; 795 u32 ctrl_reg, val, addr; 796 struct perf_event *bp, **slots; 797 struct arch_hw_breakpoint *info; 798 struct arch_hw_breakpoint_ctrl ctrl; 799 800 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 801 802 /* The exception entry code places the amended lr in the PC. */ 803 addr = regs->ARM_pc; 804 805 /* Check the currently installed breakpoints first. */ 806 for (i = 0; i < core_num_brps; ++i) { 807 rcu_read_lock(); 808 809 bp = slots[i]; 810 811 if (bp == NULL) 812 goto unlock; 813 814 info = counter_arch_bp(bp); 815 816 /* Check if the breakpoint value matches. */ 817 val = read_wb_reg(ARM_BASE_BVR + i); 818 if (val != (addr & ~0x3)) 819 goto mismatch; 820 821 /* Possible match, check the byte address select to confirm. */ 822 ctrl_reg = read_wb_reg(ARM_BASE_BCR + i); 823 decode_ctrl_reg(ctrl_reg, &ctrl); 824 if ((1 << (addr & 0x3)) & ctrl.len) { 825 info->trigger = addr; 826 pr_debug("breakpoint fired: address = 0x%x\n", addr); 827 perf_bp_event(bp, regs); 828 if (!bp->overflow_handler) 829 enable_single_step(bp, addr); 830 goto unlock; 831 } 832 833 mismatch: 834 /* If we're stepping a breakpoint, it can now be restored. */ 835 if (info->step_ctrl.enabled) 836 disable_single_step(bp); 837 unlock: 838 rcu_read_unlock(); 839 } 840 841 /* Handle any pending watchpoint single-step breakpoints. */ 842 watchpoint_single_step_handler(addr); 843 } 844 845 /* 846 * Called from either the Data Abort Handler [watchpoint] or the 847 * Prefetch Abort Handler [breakpoint] with interrupts disabled. 848 */ 849 static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, 850 struct pt_regs *regs) 851 { 852 int ret = 0; 853 u32 dscr; 854 855 preempt_disable(); 856 857 if (interrupts_enabled(regs)) 858 local_irq_enable(); 859 860 /* We only handle watchpoints and hardware breakpoints. */ 861 ARM_DBG_READ(c1, 0, dscr); 862 863 /* Perform perf callbacks. */ 864 switch (ARM_DSCR_MOE(dscr)) { 865 case ARM_ENTRY_BREAKPOINT: 866 breakpoint_handler(addr, regs); 867 break; 868 case ARM_ENTRY_ASYNC_WATCHPOINT: 869 WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n"); 870 case ARM_ENTRY_SYNC_WATCHPOINT: 871 watchpoint_handler(addr, fsr, regs); 872 break; 873 default: 874 ret = 1; /* Unhandled fault. */ 875 } 876 877 preempt_enable(); 878 879 return ret; 880 } 881 882 /* 883 * One-time initialisation. 884 */ 885 static cpumask_t debug_err_mask; 886 887 static int debug_reg_trap(struct pt_regs *regs, unsigned int instr) 888 { 889 int cpu = smp_processor_id(); 890 891 pr_warning("Debug register access (0x%x) caused undefined instruction on CPU %d\n", 892 instr, cpu); 893 894 /* Set the error flag for this CPU and skip the faulting instruction. */ 895 cpumask_set_cpu(cpu, &debug_err_mask); 896 instruction_pointer(regs) += 4; 897 return 0; 898 } 899 900 static struct undef_hook debug_reg_hook = { 901 .instr_mask = 0x0fe80f10, 902 .instr_val = 0x0e000e10, 903 .fn = debug_reg_trap, 904 }; 905 906 static void reset_ctrl_regs(void *unused) 907 { 908 int i, raw_num_brps, err = 0, cpu = smp_processor_id(); 909 u32 dbg_power; 910 911 /* 912 * v7 debug contains save and restore registers so that debug state 913 * can be maintained across low-power modes without leaving the debug 914 * logic powered up. It is IMPLEMENTATION DEFINED whether we can access 915 * the debug registers out of reset, so we must unlock the OS Lock 916 * Access Register to avoid taking undefined instruction exceptions 917 * later on. 918 */ 919 switch (debug_arch) { 920 case ARM_DEBUG_ARCH_V6: 921 case ARM_DEBUG_ARCH_V6_1: 922 /* ARMv6 cores just need to reset the registers. */ 923 goto reset_regs; 924 case ARM_DEBUG_ARCH_V7_ECP14: 925 /* 926 * Ensure sticky power-down is clear (i.e. debug logic is 927 * powered up). 928 */ 929 asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power)); 930 if ((dbg_power & 0x1) == 0) 931 err = -EPERM; 932 break; 933 case ARM_DEBUG_ARCH_V7_1: 934 /* 935 * Ensure the OS double lock is clear. 936 */ 937 asm volatile("mrc p14, 0, %0, c1, c3, 4" : "=r" (dbg_power)); 938 if ((dbg_power & 0x1) == 1) 939 err = -EPERM; 940 break; 941 } 942 943 if (err) { 944 pr_warning("CPU %d debug is powered down!\n", cpu); 945 cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); 946 return; 947 } 948 949 /* 950 * Unconditionally clear the lock by writing a value 951 * other than 0xC5ACCE55 to the access register. 952 */ 953 asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); 954 isb(); 955 956 /* 957 * Clear any configured vector-catch events before 958 * enabling monitor mode. 959 */ 960 asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0)); 961 isb(); 962 963 reset_regs: 964 if (enable_monitor_mode()) 965 return; 966 967 /* We must also reset any reserved registers. */ 968 raw_num_brps = get_num_brp_resources(); 969 for (i = 0; i < raw_num_brps; ++i) { 970 write_wb_reg(ARM_BASE_BCR + i, 0UL); 971 write_wb_reg(ARM_BASE_BVR + i, 0UL); 972 } 973 974 for (i = 0; i < core_num_wrps; ++i) { 975 write_wb_reg(ARM_BASE_WCR + i, 0UL); 976 write_wb_reg(ARM_BASE_WVR + i, 0UL); 977 } 978 } 979 980 static int __cpuinit dbg_reset_notify(struct notifier_block *self, 981 unsigned long action, void *cpu) 982 { 983 if (action == CPU_ONLINE) 984 smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1); 985 986 return NOTIFY_OK; 987 } 988 989 static struct notifier_block __cpuinitdata dbg_reset_nb = { 990 .notifier_call = dbg_reset_notify, 991 }; 992 993 static int __init arch_hw_breakpoint_init(void) 994 { 995 u32 dscr; 996 997 debug_arch = get_debug_arch(); 998 999 if (!debug_arch_supported()) { 1000 pr_info("debug architecture 0x%x unsupported.\n", debug_arch); 1001 return 0; 1002 } 1003 1004 /* Determine how many BRPs/WRPs are available. */ 1005 core_num_brps = get_num_brps(); 1006 core_num_wrps = get_num_wrps(); 1007 1008 /* 1009 * We need to tread carefully here because DBGSWENABLE may be 1010 * driven low on this core and there isn't an architected way to 1011 * determine that. 1012 */ 1013 register_undef_hook(&debug_reg_hook); 1014 1015 /* 1016 * Reset the breakpoint resources. We assume that a halting 1017 * debugger will leave the world in a nice state for us. 1018 */ 1019 on_each_cpu(reset_ctrl_regs, NULL, 1); 1020 unregister_undef_hook(&debug_reg_hook); 1021 if (!cpumask_empty(&debug_err_mask)) { 1022 core_num_brps = 0; 1023 core_num_wrps = 0; 1024 return 0; 1025 } 1026 1027 pr_info("found %d " "%s" "breakpoint and %d watchpoint registers.\n", 1028 core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " : 1029 "", core_num_wrps); 1030 1031 ARM_DBG_READ(c1, 0, dscr); 1032 if (dscr & ARM_DSCR_HDBGEN) { 1033 max_watchpoint_len = 4; 1034 pr_warning("halting debug mode enabled. Assuming maximum watchpoint size of %u bytes.\n", 1035 max_watchpoint_len); 1036 } else { 1037 /* Work out the maximum supported watchpoint length. */ 1038 max_watchpoint_len = get_max_wp_len(); 1039 pr_info("maximum watchpoint size is %u bytes.\n", 1040 max_watchpoint_len); 1041 } 1042 1043 /* Register debug fault handler. */ 1044 hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, 1045 TRAP_HWBKPT, "watchpoint debug exception"); 1046 hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, 1047 TRAP_HWBKPT, "breakpoint debug exception"); 1048 1049 /* Register hotplug notifier. */ 1050 register_cpu_notifier(&dbg_reset_nb); 1051 return 0; 1052 } 1053 arch_initcall(arch_hw_breakpoint_init); 1054 1055 void hw_breakpoint_pmu_read(struct perf_event *bp) 1056 { 1057 } 1058 1059 /* 1060 * Dummy function to register with die_notifier. 1061 */ 1062 int hw_breakpoint_exceptions_notify(struct notifier_block *unused, 1063 unsigned long val, void *data) 1064 { 1065 return NOTIFY_DONE; 1066 } 1067