11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 25aae8a53SK.Prasad /* 35aae8a53SK.Prasad * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, 45aae8a53SK.Prasad * using the CPU's debug registers. Derived from 55aae8a53SK.Prasad * "arch/x86/kernel/hw_breakpoint.c" 65aae8a53SK.Prasad * 75aae8a53SK.Prasad * Copyright 2010 IBM Corporation 85aae8a53SK.Prasad * Author: K.Prasad <prasad@linux.vnet.ibm.com> 95aae8a53SK.Prasad */ 105aae8a53SK.Prasad 115aae8a53SK.Prasad #include <linux/hw_breakpoint.h> 125aae8a53SK.Prasad #include <linux/notifier.h> 135aae8a53SK.Prasad #include <linux/kprobes.h> 145aae8a53SK.Prasad #include <linux/percpu.h> 155aae8a53SK.Prasad #include <linux/kernel.h> 165aae8a53SK.Prasad #include <linux/sched.h> 175aae8a53SK.Prasad #include <linux/smp.h> 18c1fe190cSMichael Neuling #include <linux/debugfs.h> 19c1fe190cSMichael Neuling #include <linux/init.h> 205aae8a53SK.Prasad 215aae8a53SK.Prasad #include <asm/hw_breakpoint.h> 225aae8a53SK.Prasad #include <asm/processor.h> 235aae8a53SK.Prasad #include <asm/sstep.h> 2485ce9a5dSMichael Neuling #include <asm/debug.h> 25c1fe190cSMichael Neuling #include <asm/debugfs.h> 26c1fe190cSMichael Neuling #include <asm/hvcall.h> 2775346251SJordan Niethe #include <asm/inst.h> 287c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 295aae8a53SK.Prasad 305aae8a53SK.Prasad /* 315aae8a53SK.Prasad * Stores the breakpoints currently in use on each breakpoint address 325aae8a53SK.Prasad * register for every cpu 335aae8a53SK.Prasad */ 34*74c68810SRavi Bangoria static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]); 355aae8a53SK.Prasad 365aae8a53SK.Prasad /* 37d09ec738SPaul Mackerras * Returns total number of data or instruction breakpoints available. 38d09ec738SPaul Mackerras */ 39d09ec738SPaul Mackerras int hw_breakpoint_slots(int type) 40d09ec738SPaul Mackerras { 41d09ec738SPaul Mackerras if (type == TYPE_DATA) 42a6ba44e8SRavi Bangoria return nr_wp_slots(); 43d09ec738SPaul Mackerras return 0; /* no instruction breakpoints available */ 44d09ec738SPaul Mackerras } 45d09ec738SPaul Mackerras 46*74c68810SRavi Bangoria static bool single_step_pending(void) 47*74c68810SRavi Bangoria { 48*74c68810SRavi Bangoria int i; 49*74c68810SRavi Bangoria 50*74c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 51*74c68810SRavi Bangoria if (current->thread.last_hit_ubp[i]) 52*74c68810SRavi Bangoria return true; 53*74c68810SRavi Bangoria } 54*74c68810SRavi Bangoria return false; 55*74c68810SRavi Bangoria } 56*74c68810SRavi Bangoria 57d09ec738SPaul Mackerras /* 585aae8a53SK.Prasad * Install a perf counter breakpoint. 595aae8a53SK.Prasad * 605aae8a53SK.Prasad * We seek a free debug address register and use it for this 615aae8a53SK.Prasad * breakpoint. 625aae8a53SK.Prasad * 635aae8a53SK.Prasad * Atomic: we hold the counter->ctx->lock and we only handle variables 645aae8a53SK.Prasad * and registers local to this cpu. 655aae8a53SK.Prasad */ 665aae8a53SK.Prasad int arch_install_hw_breakpoint(struct perf_event *bp) 675aae8a53SK.Prasad { 685aae8a53SK.Prasad struct arch_hw_breakpoint *info = counter_arch_bp(bp); 69*74c68810SRavi Bangoria struct perf_event **slot; 70*74c68810SRavi Bangoria int i; 715aae8a53SK.Prasad 72*74c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 73*74c68810SRavi Bangoria slot = this_cpu_ptr(&bp_per_reg[i]); 74*74c68810SRavi Bangoria if (!*slot) { 755aae8a53SK.Prasad *slot = bp; 76*74c68810SRavi Bangoria break; 77*74c68810SRavi Bangoria } 78*74c68810SRavi Bangoria } 79*74c68810SRavi Bangoria 80*74c68810SRavi Bangoria if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot")) 81*74c68810SRavi Bangoria return -EBUSY; 825aae8a53SK.Prasad 835aae8a53SK.Prasad /* 845aae8a53SK.Prasad * Do not install DABR values if the instruction must be single-stepped. 855aae8a53SK.Prasad * If so, DABR will be populated in single_step_dabr_instruction(). 865aae8a53SK.Prasad */ 87*74c68810SRavi Bangoria if (!single_step_pending()) 88*74c68810SRavi Bangoria __set_breakpoint(i, info); 895aae8a53SK.Prasad 905aae8a53SK.Prasad return 0; 915aae8a53SK.Prasad } 925aae8a53SK.Prasad 935aae8a53SK.Prasad /* 945aae8a53SK.Prasad * Uninstall the breakpoint contained in the given counter. 955aae8a53SK.Prasad * 965aae8a53SK.Prasad * First we search the debug address register it uses and then we disable 975aae8a53SK.Prasad * it. 985aae8a53SK.Prasad * 995aae8a53SK.Prasad * Atomic: we hold the counter->ctx->lock and we only handle variables 1005aae8a53SK.Prasad * and registers local to this cpu. 1015aae8a53SK.Prasad */ 1025aae8a53SK.Prasad void arch_uninstall_hw_breakpoint(struct perf_event *bp) 1035aae8a53SK.Prasad { 104*74c68810SRavi Bangoria struct arch_hw_breakpoint null_brk = {0}; 105*74c68810SRavi Bangoria struct perf_event **slot; 106*74c68810SRavi Bangoria int i; 1075aae8a53SK.Prasad 108*74c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 109*74c68810SRavi Bangoria slot = this_cpu_ptr(&bp_per_reg[i]); 110*74c68810SRavi Bangoria if (*slot == bp) { 111*74c68810SRavi Bangoria *slot = NULL; 112*74c68810SRavi Bangoria break; 113*74c68810SRavi Bangoria } 1145aae8a53SK.Prasad } 1155aae8a53SK.Prasad 116*74c68810SRavi Bangoria if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot")) 117*74c68810SRavi Bangoria return; 118*74c68810SRavi Bangoria 119*74c68810SRavi Bangoria __set_breakpoint(i, &null_brk); 1205aae8a53SK.Prasad } 1215aae8a53SK.Prasad 122c9e82aebSRavi Bangoria static bool is_ptrace_bp(struct perf_event *bp) 123c9e82aebSRavi Bangoria { 124c9e82aebSRavi Bangoria return bp->overflow_handler == ptrace_triggered; 125c9e82aebSRavi Bangoria } 126c9e82aebSRavi Bangoria 1275aae8a53SK.Prasad /* 1285aae8a53SK.Prasad * Perform cleanup of arch-specific counters during unregistration 1295aae8a53SK.Prasad * of the perf-event 1305aae8a53SK.Prasad */ 1315aae8a53SK.Prasad void arch_unregister_hw_breakpoint(struct perf_event *bp) 1325aae8a53SK.Prasad { 1335aae8a53SK.Prasad /* 1345aae8a53SK.Prasad * If the breakpoint is unregistered between a hw_breakpoint_handler() 1355aae8a53SK.Prasad * and the single_step_dabr_instruction(), then cleanup the breakpoint 1365aae8a53SK.Prasad * restoration variables to prevent dangling pointers. 137fb822e60SRavi Bangoria * FIXME, this should not be using bp->ctx at all! Sayeth peterz. 1385aae8a53SK.Prasad */ 139*74c68810SRavi Bangoria if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) { 140*74c68810SRavi Bangoria int i; 141*74c68810SRavi Bangoria 142*74c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 143*74c68810SRavi Bangoria if (bp->ctx->task->thread.last_hit_ubp[i] == bp) 144*74c68810SRavi Bangoria bp->ctx->task->thread.last_hit_ubp[i] = NULL; 145*74c68810SRavi Bangoria } 146*74c68810SRavi Bangoria } 1475aae8a53SK.Prasad } 1485aae8a53SK.Prasad 1495aae8a53SK.Prasad /* 1505aae8a53SK.Prasad * Check for virtual address in kernel space. 1515aae8a53SK.Prasad */ 1528e983ff9SFrederic Weisbecker int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) 1535aae8a53SK.Prasad { 1548e983ff9SFrederic Weisbecker return is_kernel_addr(hw->address); 1555aae8a53SK.Prasad } 1565aae8a53SK.Prasad 1575aae8a53SK.Prasad int arch_bp_generic_fields(int type, int *gen_bp_type) 1585aae8a53SK.Prasad { 1599422de3eSMichael Neuling *gen_bp_type = 0; 1609422de3eSMichael Neuling if (type & HW_BRK_TYPE_READ) 1619422de3eSMichael Neuling *gen_bp_type |= HW_BREAKPOINT_R; 1629422de3eSMichael Neuling if (type & HW_BRK_TYPE_WRITE) 1639422de3eSMichael Neuling *gen_bp_type |= HW_BREAKPOINT_W; 1649422de3eSMichael Neuling if (*gen_bp_type == 0) 1655aae8a53SK.Prasad return -EINVAL; 1665aae8a53SK.Prasad return 0; 1675aae8a53SK.Prasad } 1685aae8a53SK.Prasad 1695aae8a53SK.Prasad /* 170b57aeab8SRavi Bangoria * Watchpoint match range is always doubleword(8 bytes) aligned on 171b57aeab8SRavi Bangoria * powerpc. If the given range is crossing doubleword boundary, we 172b57aeab8SRavi Bangoria * need to increase the length such that next doubleword also get 173b57aeab8SRavi Bangoria * covered. Ex, 174b57aeab8SRavi Bangoria * 175b57aeab8SRavi Bangoria * address len = 6 bytes 176b57aeab8SRavi Bangoria * |=========. 177b57aeab8SRavi Bangoria * |------------v--|------v--------| 178b57aeab8SRavi Bangoria * | | | | | | | | | | | | | | | | | 179b57aeab8SRavi Bangoria * |---------------|---------------| 180b57aeab8SRavi Bangoria * <---8 bytes---> 181b57aeab8SRavi Bangoria * 182b57aeab8SRavi Bangoria * In this case, we should configure hw as: 183e68ef121SRavi Bangoria * start_addr = address & ~(HW_BREAKPOINT_SIZE - 1) 184b57aeab8SRavi Bangoria * len = 16 bytes 185b57aeab8SRavi Bangoria * 186e68ef121SRavi Bangoria * @start_addr is inclusive but @end_addr is exclusive. 187b57aeab8SRavi Bangoria */ 188b57aeab8SRavi Bangoria static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw) 189b57aeab8SRavi Bangoria { 190b57aeab8SRavi Bangoria u16 max_len = DABR_MAX_LEN; 191b57aeab8SRavi Bangoria u16 hw_len; 192b57aeab8SRavi Bangoria unsigned long start_addr, end_addr; 193b57aeab8SRavi Bangoria 194e68ef121SRavi Bangoria start_addr = ALIGN_DOWN(hw->address, HW_BREAKPOINT_SIZE); 195e68ef121SRavi Bangoria end_addr = ALIGN(hw->address + hw->len, HW_BREAKPOINT_SIZE); 196e68ef121SRavi Bangoria hw_len = end_addr - start_addr; 197b57aeab8SRavi Bangoria 198b57aeab8SRavi Bangoria if (dawr_enabled()) { 199b57aeab8SRavi Bangoria max_len = DAWR_MAX_LEN; 200b57aeab8SRavi Bangoria /* DAWR region can't cross 512 bytes boundary */ 201e68ef121SRavi Bangoria if (ALIGN(start_addr, SZ_512M) != ALIGN(end_addr - 1, SZ_512M)) 202b57aeab8SRavi Bangoria return -EINVAL; 20339413ae0SChristophe Leroy } else if (IS_ENABLED(CONFIG_PPC_8xx)) { 20439413ae0SChristophe Leroy /* 8xx can setup a range without limitation */ 20539413ae0SChristophe Leroy max_len = U16_MAX; 206b57aeab8SRavi Bangoria } 207b57aeab8SRavi Bangoria 208b57aeab8SRavi Bangoria if (hw_len > max_len) 209b57aeab8SRavi Bangoria return -EINVAL; 210b57aeab8SRavi Bangoria 211b57aeab8SRavi Bangoria hw->hw_len = hw_len; 212b57aeab8SRavi Bangoria return 0; 213b57aeab8SRavi Bangoria } 214b57aeab8SRavi Bangoria 215b57aeab8SRavi Bangoria /* 2165aae8a53SK.Prasad * Validate the arch-specific HW Breakpoint register settings 2175aae8a53SK.Prasad */ 2185d5176baSFrederic Weisbecker int hw_breakpoint_arch_parse(struct perf_event *bp, 2195d5176baSFrederic Weisbecker const struct perf_event_attr *attr, 2205d5176baSFrederic Weisbecker struct arch_hw_breakpoint *hw) 2215aae8a53SK.Prasad { 222b57aeab8SRavi Bangoria int ret = -EINVAL; 2235aae8a53SK.Prasad 224b57aeab8SRavi Bangoria if (!bp || !attr->bp_len) 2255aae8a53SK.Prasad return ret; 2265aae8a53SK.Prasad 2275d5176baSFrederic Weisbecker hw->type = HW_BRK_TYPE_TRANSLATE; 2285d5176baSFrederic Weisbecker if (attr->bp_type & HW_BREAKPOINT_R) 2295d5176baSFrederic Weisbecker hw->type |= HW_BRK_TYPE_READ; 2305d5176baSFrederic Weisbecker if (attr->bp_type & HW_BREAKPOINT_W) 2315d5176baSFrederic Weisbecker hw->type |= HW_BRK_TYPE_WRITE; 2325d5176baSFrederic Weisbecker if (hw->type == HW_BRK_TYPE_TRANSLATE) 2339422de3eSMichael Neuling /* must set alteast read or write */ 2345aae8a53SK.Prasad return ret; 2355d5176baSFrederic Weisbecker if (!attr->exclude_user) 2365d5176baSFrederic Weisbecker hw->type |= HW_BRK_TYPE_USER; 2375d5176baSFrederic Weisbecker if (!attr->exclude_kernel) 2385d5176baSFrederic Weisbecker hw->type |= HW_BRK_TYPE_KERNEL; 2395d5176baSFrederic Weisbecker if (!attr->exclude_hv) 2405d5176baSFrederic Weisbecker hw->type |= HW_BRK_TYPE_HYP; 2415d5176baSFrederic Weisbecker hw->address = attr->bp_addr; 2425d5176baSFrederic Weisbecker hw->len = attr->bp_len; 2435aae8a53SK.Prasad 24485ce9a5dSMichael Neuling if (!ppc_breakpoint_available()) 24585ce9a5dSMichael Neuling return -ENODEV; 246b57aeab8SRavi Bangoria 247b57aeab8SRavi Bangoria return hw_breakpoint_validate_len(hw); 2485aae8a53SK.Prasad } 2495aae8a53SK.Prasad 2505aae8a53SK.Prasad /* 25106532a67SK.Prasad * Restores the breakpoint on the debug registers. 25206532a67SK.Prasad * Invoke this function if it is known that the execution context is 25306532a67SK.Prasad * about to change to cause loss of MSR_SE settings. 25406532a67SK.Prasad */ 25506532a67SK.Prasad void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) 25606532a67SK.Prasad { 25706532a67SK.Prasad struct arch_hw_breakpoint *info; 258*74c68810SRavi Bangoria int i; 25906532a67SK.Prasad 260*74c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 261*74c68810SRavi Bangoria if (unlikely(tsk->thread.last_hit_ubp[i])) 262*74c68810SRavi Bangoria goto reset; 263*74c68810SRavi Bangoria } 26406532a67SK.Prasad return; 26506532a67SK.Prasad 266*74c68810SRavi Bangoria reset: 26706532a67SK.Prasad regs->msr &= ~MSR_SE; 268*74c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 269*74c68810SRavi Bangoria info = counter_arch_bp(__this_cpu_read(bp_per_reg[i])); 270*74c68810SRavi Bangoria __set_breakpoint(i, info); 271*74c68810SRavi Bangoria tsk->thread.last_hit_ubp[i] = NULL; 272*74c68810SRavi Bangoria } 27306532a67SK.Prasad } 27406532a67SK.Prasad 275*74c68810SRavi Bangoria static bool dar_in_user_range(unsigned long dar, struct arch_hw_breakpoint *info) 276bc01bdf6SRavi Bangoria { 27727985b2aSRavi Bangoria return ((info->address <= dar) && (dar - info->address < info->len)); 27827985b2aSRavi Bangoria } 279bc01bdf6SRavi Bangoria 280*74c68810SRavi Bangoria static bool dar_user_range_overlaps(unsigned long dar, int size, 28127985b2aSRavi Bangoria struct arch_hw_breakpoint *info) 282658d029dSChristophe Leroy { 283*74c68810SRavi Bangoria return ((dar < info->address + info->len) && 284*74c68810SRavi Bangoria (dar + size > info->address)); 285*74c68810SRavi Bangoria } 286bc01bdf6SRavi Bangoria 287*74c68810SRavi Bangoria static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint *info) 288*74c68810SRavi Bangoria { 289*74c68810SRavi Bangoria unsigned long hw_start_addr, hw_end_addr; 290bc01bdf6SRavi Bangoria 291*74c68810SRavi Bangoria hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE); 292*74c68810SRavi Bangoria hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE); 29327985b2aSRavi Bangoria 294*74c68810SRavi Bangoria return ((hw_start_addr <= dar) && (hw_end_addr > dar)); 295*74c68810SRavi Bangoria } 296*74c68810SRavi Bangoria 297*74c68810SRavi Bangoria static bool dar_hw_range_overlaps(unsigned long dar, int size, 298*74c68810SRavi Bangoria struct arch_hw_breakpoint *info) 299*74c68810SRavi Bangoria { 300*74c68810SRavi Bangoria unsigned long hw_start_addr, hw_end_addr; 301*74c68810SRavi Bangoria 302*74c68810SRavi Bangoria hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE); 303*74c68810SRavi Bangoria hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE); 304*74c68810SRavi Bangoria 305*74c68810SRavi Bangoria return ((dar < hw_end_addr) && (dar + size > hw_start_addr)); 306bc01bdf6SRavi Bangoria } 307658d029dSChristophe Leroy 30827985b2aSRavi Bangoria /* 309*74c68810SRavi Bangoria * If hw has multiple DAWR registers, we also need to check all 310*74c68810SRavi Bangoria * dawrx constraint bits to confirm this is _really_ a valid event. 31127985b2aSRavi Bangoria */ 312*74c68810SRavi Bangoria static bool check_dawrx_constraints(struct pt_regs *regs, int type, 313*74c68810SRavi Bangoria struct arch_hw_breakpoint *info) 314*74c68810SRavi Bangoria { 315*74c68810SRavi Bangoria if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ)) 316*74c68810SRavi Bangoria return false; 31727985b2aSRavi Bangoria 318*74c68810SRavi Bangoria if (OP_IS_STORE(type) && !(info->type & HW_BRK_TYPE_WRITE)) 319*74c68810SRavi Bangoria return false; 320*74c68810SRavi Bangoria 321*74c68810SRavi Bangoria if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL)) 322*74c68810SRavi Bangoria return false; 323*74c68810SRavi Bangoria 324*74c68810SRavi Bangoria if (user_mode(regs) && !(info->type & HW_BRK_TYPE_USER)) 325*74c68810SRavi Bangoria return false; 326*74c68810SRavi Bangoria 327*74c68810SRavi Bangoria return true; 328*74c68810SRavi Bangoria } 329*74c68810SRavi Bangoria 330*74c68810SRavi Bangoria /* 331*74c68810SRavi Bangoria * Return true if the event is valid wrt dawr configuration, 332*74c68810SRavi Bangoria * including extraneous exception. Otherwise return false. 333*74c68810SRavi Bangoria */ 334*74c68810SRavi Bangoria static bool check_constraints(struct pt_regs *regs, struct ppc_inst instr, 335*74c68810SRavi Bangoria int type, int size, struct arch_hw_breakpoint *info) 336*74c68810SRavi Bangoria { 337*74c68810SRavi Bangoria bool in_user_range = dar_in_user_range(regs->dar, info); 338*74c68810SRavi Bangoria bool dawrx_constraints; 339*74c68810SRavi Bangoria 340*74c68810SRavi Bangoria /* 341*74c68810SRavi Bangoria * 8xx supports only one breakpoint and thus we can 342*74c68810SRavi Bangoria * unconditionally return true. 343*74c68810SRavi Bangoria */ 344*74c68810SRavi Bangoria if (IS_ENABLED(CONFIG_PPC_8xx)) { 345*74c68810SRavi Bangoria if (!in_user_range) 346*74c68810SRavi Bangoria info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; 347*74c68810SRavi Bangoria return true; 348*74c68810SRavi Bangoria } 349*74c68810SRavi Bangoria 350*74c68810SRavi Bangoria if (unlikely(ppc_inst_equal(instr, ppc_inst(0)))) { 351*74c68810SRavi Bangoria if (in_user_range) 352*74c68810SRavi Bangoria return true; 353*74c68810SRavi Bangoria 354*74c68810SRavi Bangoria if (dar_in_hw_range(regs->dar, info)) { 355*74c68810SRavi Bangoria info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; 356*74c68810SRavi Bangoria return true; 357*74c68810SRavi Bangoria } 358658d029dSChristophe Leroy return false; 359658d029dSChristophe Leroy } 360658d029dSChristophe Leroy 361*74c68810SRavi Bangoria dawrx_constraints = check_dawrx_constraints(regs, type, info); 362658d029dSChristophe Leroy 363*74c68810SRavi Bangoria if (dar_user_range_overlaps(regs->dar, size, info)) 364*74c68810SRavi Bangoria return dawrx_constraints; 365*74c68810SRavi Bangoria 366*74c68810SRavi Bangoria if (dar_hw_range_overlaps(regs->dar, size, info)) { 367*74c68810SRavi Bangoria if (dawrx_constraints) { 368*74c68810SRavi Bangoria info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; 369bc01bdf6SRavi Bangoria return true; 370*74c68810SRavi Bangoria } 371*74c68810SRavi Bangoria } 372*74c68810SRavi Bangoria return false; 373*74c68810SRavi Bangoria } 374bc01bdf6SRavi Bangoria 375*74c68810SRavi Bangoria static void get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr, 376*74c68810SRavi Bangoria int *type, int *size, bool *larx_stcx) 377*74c68810SRavi Bangoria { 378*74c68810SRavi Bangoria struct instruction_op op; 379*74c68810SRavi Bangoria 380*74c68810SRavi Bangoria if (__get_user_instr_inatomic(*instr, (void __user *)regs->nip)) 381*74c68810SRavi Bangoria return; 382*74c68810SRavi Bangoria 383*74c68810SRavi Bangoria analyse_instr(&op, regs, *instr); 384*74c68810SRavi Bangoria 385*74c68810SRavi Bangoria /* 386*74c68810SRavi Bangoria * Set size = 8 if analyse_instr() fails. If it's a userspace 387*74c68810SRavi Bangoria * watchpoint(valid or extraneous), we can notify user about it. 388*74c68810SRavi Bangoria * If it's a kernel watchpoint, instruction emulation will fail 389*74c68810SRavi Bangoria * in stepping_handler() and watchpoint will be disabled. 390*74c68810SRavi Bangoria */ 391*74c68810SRavi Bangoria *type = GETTYPE(op.type); 392*74c68810SRavi Bangoria *size = !(*type == UNKNOWN) ? GETSIZE(op.type) : 8; 393*74c68810SRavi Bangoria *larx_stcx = (*type == LARX || *type == STCX); 394*74c68810SRavi Bangoria } 395*74c68810SRavi Bangoria 396658d029dSChristophe Leroy /* 397bc01bdf6SRavi Bangoria * We've failed in reliably handling the hw-breakpoint. Unregister 398bc01bdf6SRavi Bangoria * it and throw a warning message to let the user know about it. 399658d029dSChristophe Leroy */ 400*74c68810SRavi Bangoria static void handler_error(struct perf_event *bp, struct arch_hw_breakpoint *info) 401*74c68810SRavi Bangoria { 402*74c68810SRavi Bangoria WARN(1, "Unable to handle hardware breakpoint. Breakpoint at 0x%lx will be disabled.", 403*74c68810SRavi Bangoria info->address); 404658d029dSChristophe Leroy perf_event_disable_inatomic(bp); 405*74c68810SRavi Bangoria } 406*74c68810SRavi Bangoria 407*74c68810SRavi Bangoria static void larx_stcx_err(struct perf_event *bp, struct arch_hw_breakpoint *info) 408*74c68810SRavi Bangoria { 409*74c68810SRavi Bangoria printk_ratelimited("Breakpoint hit on instruction that can't be emulated. Breakpoint at 0x%lx will be disabled.\n", 410*74c68810SRavi Bangoria info->address); 411*74c68810SRavi Bangoria perf_event_disable_inatomic(bp); 412*74c68810SRavi Bangoria } 413*74c68810SRavi Bangoria 414*74c68810SRavi Bangoria static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp, 415*74c68810SRavi Bangoria struct arch_hw_breakpoint **info, int *hit, 416*74c68810SRavi Bangoria struct ppc_inst instr) 417*74c68810SRavi Bangoria { 418*74c68810SRavi Bangoria int i; 419*74c68810SRavi Bangoria int stepped; 420*74c68810SRavi Bangoria 421*74c68810SRavi Bangoria /* Do not emulate user-space instructions, instead single-step them */ 422*74c68810SRavi Bangoria if (user_mode(regs)) { 423*74c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 424*74c68810SRavi Bangoria if (!hit[i]) 425*74c68810SRavi Bangoria continue; 426*74c68810SRavi Bangoria current->thread.last_hit_ubp[i] = bp[i]; 427*74c68810SRavi Bangoria info[i] = NULL; 428*74c68810SRavi Bangoria } 429*74c68810SRavi Bangoria regs->msr |= MSR_SE; 430658d029dSChristophe Leroy return false; 431658d029dSChristophe Leroy } 432658d029dSChristophe Leroy 433*74c68810SRavi Bangoria stepped = emulate_step(regs, instr); 434*74c68810SRavi Bangoria if (!stepped) { 435*74c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 436*74c68810SRavi Bangoria if (!hit[i]) 437*74c68810SRavi Bangoria continue; 438*74c68810SRavi Bangoria handler_error(bp[i], info[i]); 439*74c68810SRavi Bangoria info[i] = NULL; 440*74c68810SRavi Bangoria } 441*74c68810SRavi Bangoria return false; 442*74c68810SRavi Bangoria } 443*74c68810SRavi Bangoria return true; 444*74c68810SRavi Bangoria } 445*74c68810SRavi Bangoria 44603465f89SNicholas Piggin int hw_breakpoint_handler(struct die_args *args) 4475aae8a53SK.Prasad { 448*74c68810SRavi Bangoria bool err = false; 4495aae8a53SK.Prasad int rc = NOTIFY_STOP; 450*74c68810SRavi Bangoria struct perf_event *bp[HBP_NUM_MAX] = { NULL }; 4515aae8a53SK.Prasad struct pt_regs *regs = args->regs; 452*74c68810SRavi Bangoria struct arch_hw_breakpoint *info[HBP_NUM_MAX] = { NULL }; 453*74c68810SRavi Bangoria int i; 454*74c68810SRavi Bangoria int hit[HBP_NUM_MAX] = {0}; 455*74c68810SRavi Bangoria int nr_hit = 0; 456*74c68810SRavi Bangoria bool ptrace_bp = false; 457*74c68810SRavi Bangoria struct ppc_inst instr = ppc_inst(0); 458*74c68810SRavi Bangoria int type = 0; 459*74c68810SRavi Bangoria int size = 0; 460*74c68810SRavi Bangoria bool larx_stcx = false; 4615aae8a53SK.Prasad 4625aae8a53SK.Prasad /* Disable breakpoints during exception handling */ 4639422de3eSMichael Neuling hw_breakpoint_disable(); 464574cb248SPaul Mackerras 4655aae8a53SK.Prasad /* 4665aae8a53SK.Prasad * The counter may be concurrently released but that can only 4675aae8a53SK.Prasad * occur from a call_rcu() path. We can then safely fetch 4685aae8a53SK.Prasad * the breakpoint, use its callback, touch its counter 4695aae8a53SK.Prasad * while we are in an rcu_read_lock() path. 4705aae8a53SK.Prasad */ 4715aae8a53SK.Prasad rcu_read_lock(); 4725aae8a53SK.Prasad 473*74c68810SRavi Bangoria if (!IS_ENABLED(CONFIG_PPC_8xx)) 474*74c68810SRavi Bangoria get_instr_detail(regs, &instr, &type, &size, &larx_stcx); 475*74c68810SRavi Bangoria 476*74c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 477*74c68810SRavi Bangoria bp[i] = __this_cpu_read(bp_per_reg[i]); 478*74c68810SRavi Bangoria if (!bp[i]) 479*74c68810SRavi Bangoria continue; 480*74c68810SRavi Bangoria 481*74c68810SRavi Bangoria info[i] = counter_arch_bp(bp[i]); 482*74c68810SRavi Bangoria info[i]->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ; 483*74c68810SRavi Bangoria 484*74c68810SRavi Bangoria if (check_constraints(regs, instr, type, size, info[i])) { 485*74c68810SRavi Bangoria if (!IS_ENABLED(CONFIG_PPC_8xx) && 486*74c68810SRavi Bangoria ppc_inst_equal(instr, ppc_inst(0))) { 487*74c68810SRavi Bangoria handler_error(bp[i], info[i]); 488*74c68810SRavi Bangoria info[i] = NULL; 489*74c68810SRavi Bangoria err = 1; 490*74c68810SRavi Bangoria continue; 491*74c68810SRavi Bangoria } 492*74c68810SRavi Bangoria 493*74c68810SRavi Bangoria if (is_ptrace_bp(bp[i])) 494*74c68810SRavi Bangoria ptrace_bp = true; 495*74c68810SRavi Bangoria hit[i] = 1; 496*74c68810SRavi Bangoria nr_hit++; 497*74c68810SRavi Bangoria } 498*74c68810SRavi Bangoria } 499*74c68810SRavi Bangoria 500*74c68810SRavi Bangoria if (err) 501*74c68810SRavi Bangoria goto reset; 502*74c68810SRavi Bangoria 503*74c68810SRavi Bangoria if (!nr_hit) { 504c21a493aSRavi Bangoria rc = NOTIFY_DONE; 5055aae8a53SK.Prasad goto out; 506c21a493aSRavi Bangoria } 5075aae8a53SK.Prasad 5085aae8a53SK.Prasad /* 5095aae8a53SK.Prasad * Return early after invoking user-callback function without restoring 5105aae8a53SK.Prasad * DABR if the breakpoint is from ptrace which always operates in 5115aae8a53SK.Prasad * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal 5125aae8a53SK.Prasad * generated in do_dabr(). 5135aae8a53SK.Prasad */ 514*74c68810SRavi Bangoria if (ptrace_bp) { 515*74c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 516*74c68810SRavi Bangoria if (!hit[i]) 517*74c68810SRavi Bangoria continue; 518*74c68810SRavi Bangoria perf_bp_event(bp[i], regs); 519*74c68810SRavi Bangoria info[i] = NULL; 520*74c68810SRavi Bangoria } 5215aae8a53SK.Prasad rc = NOTIFY_DONE; 522*74c68810SRavi Bangoria goto reset; 5235aae8a53SK.Prasad } 5245aae8a53SK.Prasad 525*74c68810SRavi Bangoria if (!IS_ENABLED(CONFIG_PPC_8xx)) { 526*74c68810SRavi Bangoria if (larx_stcx) { 527*74c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 528*74c68810SRavi Bangoria if (!hit[i]) 529*74c68810SRavi Bangoria continue; 530*74c68810SRavi Bangoria larx_stcx_err(bp[i], info[i]); 531*74c68810SRavi Bangoria info[i] = NULL; 532*74c68810SRavi Bangoria } 533*74c68810SRavi Bangoria goto reset; 534*74c68810SRavi Bangoria } 535*74c68810SRavi Bangoria 536*74c68810SRavi Bangoria if (!stepping_handler(regs, bp, info, hit, instr)) 537*74c68810SRavi Bangoria goto reset; 538e08658a6SRavi Bangoria } 5395aae8a53SK.Prasad 5405aae8a53SK.Prasad /* 5415aae8a53SK.Prasad * As a policy, the callback is invoked in a 'trigger-after-execute' 5425aae8a53SK.Prasad * fashion 5435aae8a53SK.Prasad */ 544*74c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 545*74c68810SRavi Bangoria if (!hit[i]) 546*74c68810SRavi Bangoria continue; 547*74c68810SRavi Bangoria if (!(info[i]->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) 548*74c68810SRavi Bangoria perf_bp_event(bp[i], regs); 549*74c68810SRavi Bangoria } 5505aae8a53SK.Prasad 551*74c68810SRavi Bangoria reset: 552*74c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 553*74c68810SRavi Bangoria if (!info[i]) 554*74c68810SRavi Bangoria continue; 555*74c68810SRavi Bangoria __set_breakpoint(i, info[i]); 556*74c68810SRavi Bangoria } 557*74c68810SRavi Bangoria 5585aae8a53SK.Prasad out: 5595aae8a53SK.Prasad rcu_read_unlock(); 5605aae8a53SK.Prasad return rc; 5615aae8a53SK.Prasad } 56203465f89SNicholas Piggin NOKPROBE_SYMBOL(hw_breakpoint_handler); 5635aae8a53SK.Prasad 5645aae8a53SK.Prasad /* 5655aae8a53SK.Prasad * Handle single-step exceptions following a DABR hit. 5665aae8a53SK.Prasad */ 56703465f89SNicholas Piggin static int single_step_dabr_instruction(struct die_args *args) 5685aae8a53SK.Prasad { 5695aae8a53SK.Prasad struct pt_regs *regs = args->regs; 5705aae8a53SK.Prasad struct perf_event *bp = NULL; 5713f4693eeSMichael Neuling struct arch_hw_breakpoint *info; 572*74c68810SRavi Bangoria int i; 573*74c68810SRavi Bangoria bool found = false; 5745aae8a53SK.Prasad 5755aae8a53SK.Prasad /* 5765aae8a53SK.Prasad * Check if we are single-stepping as a result of a 5775aae8a53SK.Prasad * previous HW Breakpoint exception 5785aae8a53SK.Prasad */ 579*74c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 580*74c68810SRavi Bangoria bp = current->thread.last_hit_ubp[i]; 5815aae8a53SK.Prasad 582*74c68810SRavi Bangoria if (!bp) 583*74c68810SRavi Bangoria continue; 584*74c68810SRavi Bangoria 585*74c68810SRavi Bangoria found = true; 5863f4693eeSMichael Neuling info = counter_arch_bp(bp); 5875aae8a53SK.Prasad 5885aae8a53SK.Prasad /* 589*74c68810SRavi Bangoria * We shall invoke the user-defined callback function in the 590*74c68810SRavi Bangoria * single stepping handler to confirm to 'trigger-after-execute' 591*74c68810SRavi Bangoria * semantics 5925aae8a53SK.Prasad */ 5939422de3eSMichael Neuling if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) 5945aae8a53SK.Prasad perf_bp_event(bp, regs); 595*74c68810SRavi Bangoria current->thread.last_hit_ubp[i] = NULL; 596*74c68810SRavi Bangoria } 5975aae8a53SK.Prasad 598*74c68810SRavi Bangoria if (!found) 599*74c68810SRavi Bangoria return NOTIFY_DONE; 600*74c68810SRavi Bangoria 601*74c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 602*74c68810SRavi Bangoria bp = __this_cpu_read(bp_per_reg[i]); 603*74c68810SRavi Bangoria if (!bp) 604*74c68810SRavi Bangoria continue; 605*74c68810SRavi Bangoria 606*74c68810SRavi Bangoria info = counter_arch_bp(bp); 607*74c68810SRavi Bangoria __set_breakpoint(i, info); 608*74c68810SRavi Bangoria } 60976b0f133SPaul Mackerras 61076b0f133SPaul Mackerras /* 61176b0f133SPaul Mackerras * If the process was being single-stepped by ptrace, let the 61276b0f133SPaul Mackerras * other single-step actions occur (e.g. generate SIGTRAP). 61376b0f133SPaul Mackerras */ 61476b0f133SPaul Mackerras if (test_thread_flag(TIF_SINGLESTEP)) 61576b0f133SPaul Mackerras return NOTIFY_DONE; 61676b0f133SPaul Mackerras 6175aae8a53SK.Prasad return NOTIFY_STOP; 6185aae8a53SK.Prasad } 61903465f89SNicholas Piggin NOKPROBE_SYMBOL(single_step_dabr_instruction); 6205aae8a53SK.Prasad 6215aae8a53SK.Prasad /* 6225aae8a53SK.Prasad * Handle debug exception notifications. 6235aae8a53SK.Prasad */ 62403465f89SNicholas Piggin int hw_breakpoint_exceptions_notify( 6255aae8a53SK.Prasad struct notifier_block *unused, unsigned long val, void *data) 6265aae8a53SK.Prasad { 6275aae8a53SK.Prasad int ret = NOTIFY_DONE; 6285aae8a53SK.Prasad 6295aae8a53SK.Prasad switch (val) { 6305aae8a53SK.Prasad case DIE_DABR_MATCH: 6315aae8a53SK.Prasad ret = hw_breakpoint_handler(data); 6325aae8a53SK.Prasad break; 6335aae8a53SK.Prasad case DIE_SSTEP: 6345aae8a53SK.Prasad ret = single_step_dabr_instruction(data); 6355aae8a53SK.Prasad break; 6365aae8a53SK.Prasad } 6375aae8a53SK.Prasad 6385aae8a53SK.Prasad return ret; 6395aae8a53SK.Prasad } 64003465f89SNicholas Piggin NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify); 6415aae8a53SK.Prasad 6425aae8a53SK.Prasad /* 6435aae8a53SK.Prasad * Release the user breakpoints used by ptrace 6445aae8a53SK.Prasad */ 6455aae8a53SK.Prasad void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 6465aae8a53SK.Prasad { 6476b424efaSRavi Bangoria int i; 6485aae8a53SK.Prasad struct thread_struct *t = &tsk->thread; 6495aae8a53SK.Prasad 6506b424efaSRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 6516b424efaSRavi Bangoria unregister_hw_breakpoint(t->ptrace_bps[i]); 6526b424efaSRavi Bangoria t->ptrace_bps[i] = NULL; 6536b424efaSRavi Bangoria } 6545aae8a53SK.Prasad } 6555aae8a53SK.Prasad 6565aae8a53SK.Prasad void hw_breakpoint_pmu_read(struct perf_event *bp) 6575aae8a53SK.Prasad { 6585aae8a53SK.Prasad /* TODO */ 6595aae8a53SK.Prasad } 660ccbed90bSChristophe Leroy 661ccbed90bSChristophe Leroy void ptrace_triggered(struct perf_event *bp, 662ccbed90bSChristophe Leroy struct perf_sample_data *data, struct pt_regs *regs) 663ccbed90bSChristophe Leroy { 664ccbed90bSChristophe Leroy struct perf_event_attr attr; 665ccbed90bSChristophe Leroy 666ccbed90bSChristophe Leroy /* 667ccbed90bSChristophe Leroy * Disable the breakpoint request here since ptrace has defined a 668ccbed90bSChristophe Leroy * one-shot behaviour for breakpoint exceptions in PPC64. 669ccbed90bSChristophe Leroy * The SIGTRAP signal is generated automatically for us in do_dabr(). 670ccbed90bSChristophe Leroy * We don't have to do anything about that here 671ccbed90bSChristophe Leroy */ 672ccbed90bSChristophe Leroy attr = bp->attr; 673ccbed90bSChristophe Leroy attr.disabled = true; 674ccbed90bSChristophe Leroy modify_user_hw_breakpoint(bp, &attr); 675ccbed90bSChristophe Leroy } 676