11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 25aae8a53SK.Prasad /* 35aae8a53SK.Prasad * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, 45aae8a53SK.Prasad * using the CPU's debug registers. Derived from 55aae8a53SK.Prasad * "arch/x86/kernel/hw_breakpoint.c" 65aae8a53SK.Prasad * 75aae8a53SK.Prasad * Copyright 2010 IBM Corporation 85aae8a53SK.Prasad * Author: K.Prasad <prasad@linux.vnet.ibm.com> 95aae8a53SK.Prasad */ 105aae8a53SK.Prasad 115aae8a53SK.Prasad #include <linux/hw_breakpoint.h> 125aae8a53SK.Prasad #include <linux/notifier.h> 135aae8a53SK.Prasad #include <linux/kprobes.h> 145aae8a53SK.Prasad #include <linux/percpu.h> 155aae8a53SK.Prasad #include <linux/kernel.h> 165aae8a53SK.Prasad #include <linux/sched.h> 175aae8a53SK.Prasad #include <linux/smp.h> 18f95e5a3dSMarco Elver #include <linux/spinlock.h> 19c1fe190cSMichael Neuling #include <linux/debugfs.h> 20c1fe190cSMichael Neuling #include <linux/init.h> 215aae8a53SK.Prasad 225aae8a53SK.Prasad #include <asm/hw_breakpoint.h> 235aae8a53SK.Prasad #include <asm/processor.h> 245aae8a53SK.Prasad #include <asm/sstep.h> 2585ce9a5dSMichael Neuling #include <asm/debug.h> 26c1fe190cSMichael Neuling #include <asm/hvcall.h> 2775346251SJordan Niethe #include <asm/inst.h> 287c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 295aae8a53SK.Prasad 305aae8a53SK.Prasad /* 315aae8a53SK.Prasad * Stores the breakpoints currently in use on each breakpoint address 325aae8a53SK.Prasad * register for every cpu 335aae8a53SK.Prasad */ 3474c68810SRavi Bangoria static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]); 355aae8a53SK.Prasad 365aae8a53SK.Prasad /* 37d09ec738SPaul Mackerras * Returns total number of data or instruction breakpoints available. 38d09ec738SPaul Mackerras */ 39d09ec738SPaul Mackerras int hw_breakpoint_slots(int type) 40d09ec738SPaul Mackerras { 41d09ec738SPaul Mackerras if (type == TYPE_DATA) 42a6ba44e8SRavi Bangoria return nr_wp_slots(); 43d09ec738SPaul Mackerras return 0; /* no instruction breakpoints available */ 44d09ec738SPaul Mackerras } 45d09ec738SPaul Mackerras 4674c68810SRavi Bangoria static bool single_step_pending(void) 4774c68810SRavi Bangoria { 4874c68810SRavi Bangoria int i; 4974c68810SRavi Bangoria 5074c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 5174c68810SRavi Bangoria if (current->thread.last_hit_ubp[i]) 5274c68810SRavi Bangoria return true; 5374c68810SRavi Bangoria } 5474c68810SRavi Bangoria return false; 5574c68810SRavi Bangoria } 5674c68810SRavi Bangoria 57d09ec738SPaul Mackerras /* 585aae8a53SK.Prasad * Install a perf counter breakpoint. 595aae8a53SK.Prasad * 605aae8a53SK.Prasad * We seek a free debug address register and use it for this 615aae8a53SK.Prasad * breakpoint. 625aae8a53SK.Prasad * 635aae8a53SK.Prasad * Atomic: we hold the counter->ctx->lock and we only handle variables 645aae8a53SK.Prasad * and registers local to this cpu. 655aae8a53SK.Prasad */ 665aae8a53SK.Prasad int arch_install_hw_breakpoint(struct perf_event *bp) 675aae8a53SK.Prasad { 685aae8a53SK.Prasad struct arch_hw_breakpoint *info = counter_arch_bp(bp); 6974c68810SRavi Bangoria struct perf_event **slot; 7074c68810SRavi Bangoria int i; 715aae8a53SK.Prasad 7274c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 7374c68810SRavi Bangoria slot = this_cpu_ptr(&bp_per_reg[i]); 7474c68810SRavi Bangoria if (!*slot) { 755aae8a53SK.Prasad *slot = bp; 7674c68810SRavi Bangoria break; 7774c68810SRavi Bangoria } 7874c68810SRavi Bangoria } 7974c68810SRavi Bangoria 8074c68810SRavi Bangoria if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot")) 8174c68810SRavi Bangoria return -EBUSY; 825aae8a53SK.Prasad 835aae8a53SK.Prasad /* 845aae8a53SK.Prasad * Do not install DABR values if the instruction must be single-stepped. 855aae8a53SK.Prasad * If so, DABR will be populated in single_step_dabr_instruction(). 865aae8a53SK.Prasad */ 8774c68810SRavi Bangoria if (!single_step_pending()) 8874c68810SRavi Bangoria __set_breakpoint(i, info); 895aae8a53SK.Prasad 905aae8a53SK.Prasad return 0; 915aae8a53SK.Prasad } 925aae8a53SK.Prasad 935aae8a53SK.Prasad /* 945aae8a53SK.Prasad * Uninstall the breakpoint contained in the given counter. 955aae8a53SK.Prasad * 965aae8a53SK.Prasad * First we search the debug address register it uses and then we disable 975aae8a53SK.Prasad * it. 985aae8a53SK.Prasad * 995aae8a53SK.Prasad * Atomic: we hold the counter->ctx->lock and we only handle variables 1005aae8a53SK.Prasad * and registers local to this cpu. 1015aae8a53SK.Prasad */ 1025aae8a53SK.Prasad void arch_uninstall_hw_breakpoint(struct perf_event *bp) 1035aae8a53SK.Prasad { 10474c68810SRavi Bangoria struct arch_hw_breakpoint null_brk = {0}; 10574c68810SRavi Bangoria struct perf_event **slot; 10674c68810SRavi Bangoria int i; 1075aae8a53SK.Prasad 10874c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 10974c68810SRavi Bangoria slot = this_cpu_ptr(&bp_per_reg[i]); 11074c68810SRavi Bangoria if (*slot == bp) { 11174c68810SRavi Bangoria *slot = NULL; 11274c68810SRavi Bangoria break; 11374c68810SRavi Bangoria } 1145aae8a53SK.Prasad } 1155aae8a53SK.Prasad 11674c68810SRavi Bangoria if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot")) 11774c68810SRavi Bangoria return; 11874c68810SRavi Bangoria 11974c68810SRavi Bangoria __set_breakpoint(i, &null_brk); 1205aae8a53SK.Prasad } 1215aae8a53SK.Prasad 122c9e82aebSRavi Bangoria static bool is_ptrace_bp(struct perf_event *bp) 123c9e82aebSRavi Bangoria { 124c9e82aebSRavi Bangoria return bp->overflow_handler == ptrace_triggered; 125c9e82aebSRavi Bangoria } 126c9e82aebSRavi Bangoria 12729da4f91SRavi Bangoria struct breakpoint { 12829da4f91SRavi Bangoria struct list_head list; 12929da4f91SRavi Bangoria struct perf_event *bp; 13029da4f91SRavi Bangoria bool ptrace_bp; 13129da4f91SRavi Bangoria }; 13229da4f91SRavi Bangoria 133f95e5a3dSMarco Elver /* 134f95e5a3dSMarco Elver * While kernel/events/hw_breakpoint.c does its own synchronization, we cannot 135f95e5a3dSMarco Elver * rely on it safely synchronizing internals here; however, we can rely on it 136f95e5a3dSMarco Elver * not requesting more breakpoints than available. 137f95e5a3dSMarco Elver */ 138f95e5a3dSMarco Elver static DEFINE_SPINLOCK(cpu_bps_lock); 13929da4f91SRavi Bangoria static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]); 140f95e5a3dSMarco Elver static DEFINE_SPINLOCK(task_bps_lock); 14129da4f91SRavi Bangoria static LIST_HEAD(task_bps); 14229da4f91SRavi Bangoria 14329da4f91SRavi Bangoria static struct breakpoint *alloc_breakpoint(struct perf_event *bp) 14429da4f91SRavi Bangoria { 14529da4f91SRavi Bangoria struct breakpoint *tmp; 14629da4f91SRavi Bangoria 14729da4f91SRavi Bangoria tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 14829da4f91SRavi Bangoria if (!tmp) 14929da4f91SRavi Bangoria return ERR_PTR(-ENOMEM); 15029da4f91SRavi Bangoria tmp->bp = bp; 15129da4f91SRavi Bangoria tmp->ptrace_bp = is_ptrace_bp(bp); 15229da4f91SRavi Bangoria return tmp; 15329da4f91SRavi Bangoria } 15429da4f91SRavi Bangoria 15529da4f91SRavi Bangoria static bool bp_addr_range_overlap(struct perf_event *bp1, struct perf_event *bp2) 15629da4f91SRavi Bangoria { 15729da4f91SRavi Bangoria __u64 bp1_saddr, bp1_eaddr, bp2_saddr, bp2_eaddr; 15829da4f91SRavi Bangoria 15929da4f91SRavi Bangoria bp1_saddr = ALIGN_DOWN(bp1->attr.bp_addr, HW_BREAKPOINT_SIZE); 16029da4f91SRavi Bangoria bp1_eaddr = ALIGN(bp1->attr.bp_addr + bp1->attr.bp_len, HW_BREAKPOINT_SIZE); 16129da4f91SRavi Bangoria bp2_saddr = ALIGN_DOWN(bp2->attr.bp_addr, HW_BREAKPOINT_SIZE); 16229da4f91SRavi Bangoria bp2_eaddr = ALIGN(bp2->attr.bp_addr + bp2->attr.bp_len, HW_BREAKPOINT_SIZE); 16329da4f91SRavi Bangoria 16429da4f91SRavi Bangoria return (bp1_saddr < bp2_eaddr && bp1_eaddr > bp2_saddr); 16529da4f91SRavi Bangoria } 16629da4f91SRavi Bangoria 16729da4f91SRavi Bangoria static bool alternate_infra_bp(struct breakpoint *b, struct perf_event *bp) 16829da4f91SRavi Bangoria { 16929da4f91SRavi Bangoria return is_ptrace_bp(bp) ? !b->ptrace_bp : b->ptrace_bp; 17029da4f91SRavi Bangoria } 17129da4f91SRavi Bangoria 17229da4f91SRavi Bangoria static bool can_co_exist(struct breakpoint *b, struct perf_event *bp) 17329da4f91SRavi Bangoria { 17429da4f91SRavi Bangoria return !(alternate_infra_bp(b, bp) && bp_addr_range_overlap(b->bp, bp)); 17529da4f91SRavi Bangoria } 17629da4f91SRavi Bangoria 17729da4f91SRavi Bangoria static int task_bps_add(struct perf_event *bp) 17829da4f91SRavi Bangoria { 17929da4f91SRavi Bangoria struct breakpoint *tmp; 18029da4f91SRavi Bangoria 18129da4f91SRavi Bangoria tmp = alloc_breakpoint(bp); 18229da4f91SRavi Bangoria if (IS_ERR(tmp)) 18329da4f91SRavi Bangoria return PTR_ERR(tmp); 18429da4f91SRavi Bangoria 185f95e5a3dSMarco Elver spin_lock(&task_bps_lock); 18629da4f91SRavi Bangoria list_add(&tmp->list, &task_bps); 187f95e5a3dSMarco Elver spin_unlock(&task_bps_lock); 18829da4f91SRavi Bangoria return 0; 18929da4f91SRavi Bangoria } 19029da4f91SRavi Bangoria 19129da4f91SRavi Bangoria static void task_bps_remove(struct perf_event *bp) 19229da4f91SRavi Bangoria { 19329da4f91SRavi Bangoria struct list_head *pos, *q; 19429da4f91SRavi Bangoria 195f95e5a3dSMarco Elver spin_lock(&task_bps_lock); 19629da4f91SRavi Bangoria list_for_each_safe(pos, q, &task_bps) { 19729da4f91SRavi Bangoria struct breakpoint *tmp = list_entry(pos, struct breakpoint, list); 19829da4f91SRavi Bangoria 19929da4f91SRavi Bangoria if (tmp->bp == bp) { 20029da4f91SRavi Bangoria list_del(&tmp->list); 20129da4f91SRavi Bangoria kfree(tmp); 20229da4f91SRavi Bangoria break; 20329da4f91SRavi Bangoria } 20429da4f91SRavi Bangoria } 205f95e5a3dSMarco Elver spin_unlock(&task_bps_lock); 20629da4f91SRavi Bangoria } 20729da4f91SRavi Bangoria 20829da4f91SRavi Bangoria /* 20929da4f91SRavi Bangoria * If any task has breakpoint from alternate infrastructure, 21029da4f91SRavi Bangoria * return true. Otherwise return false. 21129da4f91SRavi Bangoria */ 21229da4f91SRavi Bangoria static bool all_task_bps_check(struct perf_event *bp) 21329da4f91SRavi Bangoria { 21429da4f91SRavi Bangoria struct breakpoint *tmp; 215f95e5a3dSMarco Elver bool ret = false; 21629da4f91SRavi Bangoria 217f95e5a3dSMarco Elver spin_lock(&task_bps_lock); 21829da4f91SRavi Bangoria list_for_each_entry(tmp, &task_bps, list) { 219f95e5a3dSMarco Elver if (!can_co_exist(tmp, bp)) { 220f95e5a3dSMarco Elver ret = true; 221f95e5a3dSMarco Elver break; 22229da4f91SRavi Bangoria } 223f95e5a3dSMarco Elver } 224f95e5a3dSMarco Elver spin_unlock(&task_bps_lock); 225f95e5a3dSMarco Elver return ret; 22629da4f91SRavi Bangoria } 22729da4f91SRavi Bangoria 22829da4f91SRavi Bangoria /* 22929da4f91SRavi Bangoria * If same task has breakpoint from alternate infrastructure, 23029da4f91SRavi Bangoria * return true. Otherwise return false. 23129da4f91SRavi Bangoria */ 23229da4f91SRavi Bangoria static bool same_task_bps_check(struct perf_event *bp) 23329da4f91SRavi Bangoria { 23429da4f91SRavi Bangoria struct breakpoint *tmp; 235f95e5a3dSMarco Elver bool ret = false; 23629da4f91SRavi Bangoria 237f95e5a3dSMarco Elver spin_lock(&task_bps_lock); 23829da4f91SRavi Bangoria list_for_each_entry(tmp, &task_bps, list) { 23929da4f91SRavi Bangoria if (tmp->bp->hw.target == bp->hw.target && 240f95e5a3dSMarco Elver !can_co_exist(tmp, bp)) { 241f95e5a3dSMarco Elver ret = true; 242f95e5a3dSMarco Elver break; 24329da4f91SRavi Bangoria } 244f95e5a3dSMarco Elver } 245f95e5a3dSMarco Elver spin_unlock(&task_bps_lock); 246f95e5a3dSMarco Elver return ret; 24729da4f91SRavi Bangoria } 24829da4f91SRavi Bangoria 24929da4f91SRavi Bangoria static int cpu_bps_add(struct perf_event *bp) 25029da4f91SRavi Bangoria { 25129da4f91SRavi Bangoria struct breakpoint **cpu_bp; 25229da4f91SRavi Bangoria struct breakpoint *tmp; 25329da4f91SRavi Bangoria int i = 0; 25429da4f91SRavi Bangoria 25529da4f91SRavi Bangoria tmp = alloc_breakpoint(bp); 25629da4f91SRavi Bangoria if (IS_ERR(tmp)) 25729da4f91SRavi Bangoria return PTR_ERR(tmp); 25829da4f91SRavi Bangoria 259f95e5a3dSMarco Elver spin_lock(&cpu_bps_lock); 26029da4f91SRavi Bangoria cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu); 26129da4f91SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 26229da4f91SRavi Bangoria if (!cpu_bp[i]) { 26329da4f91SRavi Bangoria cpu_bp[i] = tmp; 26429da4f91SRavi Bangoria break; 26529da4f91SRavi Bangoria } 26629da4f91SRavi Bangoria } 267f95e5a3dSMarco Elver spin_unlock(&cpu_bps_lock); 26829da4f91SRavi Bangoria return 0; 26929da4f91SRavi Bangoria } 27029da4f91SRavi Bangoria 27129da4f91SRavi Bangoria static void cpu_bps_remove(struct perf_event *bp) 27229da4f91SRavi Bangoria { 27329da4f91SRavi Bangoria struct breakpoint **cpu_bp; 27429da4f91SRavi Bangoria int i = 0; 27529da4f91SRavi Bangoria 276f95e5a3dSMarco Elver spin_lock(&cpu_bps_lock); 27729da4f91SRavi Bangoria cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu); 27829da4f91SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 27929da4f91SRavi Bangoria if (!cpu_bp[i]) 28029da4f91SRavi Bangoria continue; 28129da4f91SRavi Bangoria 28229da4f91SRavi Bangoria if (cpu_bp[i]->bp == bp) { 28329da4f91SRavi Bangoria kfree(cpu_bp[i]); 28429da4f91SRavi Bangoria cpu_bp[i] = NULL; 28529da4f91SRavi Bangoria break; 28629da4f91SRavi Bangoria } 28729da4f91SRavi Bangoria } 288f95e5a3dSMarco Elver spin_unlock(&cpu_bps_lock); 28929da4f91SRavi Bangoria } 29029da4f91SRavi Bangoria 29129da4f91SRavi Bangoria static bool cpu_bps_check(int cpu, struct perf_event *bp) 29229da4f91SRavi Bangoria { 29329da4f91SRavi Bangoria struct breakpoint **cpu_bp; 294f95e5a3dSMarco Elver bool ret = false; 29529da4f91SRavi Bangoria int i; 29629da4f91SRavi Bangoria 297f95e5a3dSMarco Elver spin_lock(&cpu_bps_lock); 29829da4f91SRavi Bangoria cpu_bp = per_cpu_ptr(cpu_bps, cpu); 29929da4f91SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 300f95e5a3dSMarco Elver if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp)) { 301f95e5a3dSMarco Elver ret = true; 302f95e5a3dSMarco Elver break; 30329da4f91SRavi Bangoria } 304f95e5a3dSMarco Elver } 305f95e5a3dSMarco Elver spin_unlock(&cpu_bps_lock); 306f95e5a3dSMarco Elver return ret; 30729da4f91SRavi Bangoria } 30829da4f91SRavi Bangoria 30929da4f91SRavi Bangoria static bool all_cpu_bps_check(struct perf_event *bp) 31029da4f91SRavi Bangoria { 31129da4f91SRavi Bangoria int cpu; 31229da4f91SRavi Bangoria 31329da4f91SRavi Bangoria for_each_online_cpu(cpu) { 31429da4f91SRavi Bangoria if (cpu_bps_check(cpu, bp)) 31529da4f91SRavi Bangoria return true; 31629da4f91SRavi Bangoria } 31729da4f91SRavi Bangoria return false; 31829da4f91SRavi Bangoria } 31929da4f91SRavi Bangoria 32029da4f91SRavi Bangoria int arch_reserve_bp_slot(struct perf_event *bp) 32129da4f91SRavi Bangoria { 32229da4f91SRavi Bangoria int ret; 32329da4f91SRavi Bangoria 32429da4f91SRavi Bangoria /* ptrace breakpoint */ 32529da4f91SRavi Bangoria if (is_ptrace_bp(bp)) { 32629da4f91SRavi Bangoria if (all_cpu_bps_check(bp)) 32729da4f91SRavi Bangoria return -ENOSPC; 32829da4f91SRavi Bangoria 32929da4f91SRavi Bangoria if (same_task_bps_check(bp)) 33029da4f91SRavi Bangoria return -ENOSPC; 33129da4f91SRavi Bangoria 33229da4f91SRavi Bangoria return task_bps_add(bp); 33329da4f91SRavi Bangoria } 33429da4f91SRavi Bangoria 33529da4f91SRavi Bangoria /* perf breakpoint */ 33629da4f91SRavi Bangoria if (is_kernel_addr(bp->attr.bp_addr)) 33729da4f91SRavi Bangoria return 0; 33829da4f91SRavi Bangoria 33929da4f91SRavi Bangoria if (bp->hw.target && bp->cpu == -1) { 34029da4f91SRavi Bangoria if (same_task_bps_check(bp)) 34129da4f91SRavi Bangoria return -ENOSPC; 34229da4f91SRavi Bangoria 34329da4f91SRavi Bangoria return task_bps_add(bp); 34429da4f91SRavi Bangoria } else if (!bp->hw.target && bp->cpu != -1) { 34529da4f91SRavi Bangoria if (all_task_bps_check(bp)) 34629da4f91SRavi Bangoria return -ENOSPC; 34729da4f91SRavi Bangoria 34829da4f91SRavi Bangoria return cpu_bps_add(bp); 34929da4f91SRavi Bangoria } 35029da4f91SRavi Bangoria 35129da4f91SRavi Bangoria if (same_task_bps_check(bp)) 35229da4f91SRavi Bangoria return -ENOSPC; 35329da4f91SRavi Bangoria 35429da4f91SRavi Bangoria ret = cpu_bps_add(bp); 35529da4f91SRavi Bangoria if (ret) 35629da4f91SRavi Bangoria return ret; 35729da4f91SRavi Bangoria ret = task_bps_add(bp); 35829da4f91SRavi Bangoria if (ret) 35929da4f91SRavi Bangoria cpu_bps_remove(bp); 36029da4f91SRavi Bangoria 36129da4f91SRavi Bangoria return ret; 36229da4f91SRavi Bangoria } 36329da4f91SRavi Bangoria 36429da4f91SRavi Bangoria void arch_release_bp_slot(struct perf_event *bp) 36529da4f91SRavi Bangoria { 36629da4f91SRavi Bangoria if (!is_kernel_addr(bp->attr.bp_addr)) { 36729da4f91SRavi Bangoria if (bp->hw.target) 36829da4f91SRavi Bangoria task_bps_remove(bp); 36929da4f91SRavi Bangoria if (bp->cpu != -1) 37029da4f91SRavi Bangoria cpu_bps_remove(bp); 37129da4f91SRavi Bangoria } 37229da4f91SRavi Bangoria } 37329da4f91SRavi Bangoria 3745aae8a53SK.Prasad /* 3755aae8a53SK.Prasad * Perform cleanup of arch-specific counters during unregistration 3765aae8a53SK.Prasad * of the perf-event 3775aae8a53SK.Prasad */ 3785aae8a53SK.Prasad void arch_unregister_hw_breakpoint(struct perf_event *bp) 3795aae8a53SK.Prasad { 3805aae8a53SK.Prasad /* 3815aae8a53SK.Prasad * If the breakpoint is unregistered between a hw_breakpoint_handler() 3825aae8a53SK.Prasad * and the single_step_dabr_instruction(), then cleanup the breakpoint 3835aae8a53SK.Prasad * restoration variables to prevent dangling pointers. 384fb822e60SRavi Bangoria * FIXME, this should not be using bp->ctx at all! Sayeth peterz. 3855aae8a53SK.Prasad */ 38674c68810SRavi Bangoria if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) { 38774c68810SRavi Bangoria int i; 38874c68810SRavi Bangoria 38974c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 39074c68810SRavi Bangoria if (bp->ctx->task->thread.last_hit_ubp[i] == bp) 39174c68810SRavi Bangoria bp->ctx->task->thread.last_hit_ubp[i] = NULL; 39274c68810SRavi Bangoria } 39374c68810SRavi Bangoria } 3945aae8a53SK.Prasad } 3955aae8a53SK.Prasad 3965aae8a53SK.Prasad /* 3975aae8a53SK.Prasad * Check for virtual address in kernel space. 3985aae8a53SK.Prasad */ 3998e983ff9SFrederic Weisbecker int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) 4005aae8a53SK.Prasad { 4018e983ff9SFrederic Weisbecker return is_kernel_addr(hw->address); 4025aae8a53SK.Prasad } 4035aae8a53SK.Prasad 4045aae8a53SK.Prasad int arch_bp_generic_fields(int type, int *gen_bp_type) 4055aae8a53SK.Prasad { 4069422de3eSMichael Neuling *gen_bp_type = 0; 4079422de3eSMichael Neuling if (type & HW_BRK_TYPE_READ) 4089422de3eSMichael Neuling *gen_bp_type |= HW_BREAKPOINT_R; 4099422de3eSMichael Neuling if (type & HW_BRK_TYPE_WRITE) 4109422de3eSMichael Neuling *gen_bp_type |= HW_BREAKPOINT_W; 4119422de3eSMichael Neuling if (*gen_bp_type == 0) 4125aae8a53SK.Prasad return -EINVAL; 4135aae8a53SK.Prasad return 0; 4145aae8a53SK.Prasad } 4155aae8a53SK.Prasad 4165aae8a53SK.Prasad /* 417b57aeab8SRavi Bangoria * Watchpoint match range is always doubleword(8 bytes) aligned on 418b57aeab8SRavi Bangoria * powerpc. If the given range is crossing doubleword boundary, we 419b57aeab8SRavi Bangoria * need to increase the length such that next doubleword also get 420b57aeab8SRavi Bangoria * covered. Ex, 421b57aeab8SRavi Bangoria * 422b57aeab8SRavi Bangoria * address len = 6 bytes 423b57aeab8SRavi Bangoria * |=========. 424b57aeab8SRavi Bangoria * |------------v--|------v--------| 425b57aeab8SRavi Bangoria * | | | | | | | | | | | | | | | | | 426b57aeab8SRavi Bangoria * |---------------|---------------| 427b57aeab8SRavi Bangoria * <---8 bytes---> 428b57aeab8SRavi Bangoria * 429b57aeab8SRavi Bangoria * In this case, we should configure hw as: 430e68ef121SRavi Bangoria * start_addr = address & ~(HW_BREAKPOINT_SIZE - 1) 431b57aeab8SRavi Bangoria * len = 16 bytes 432b57aeab8SRavi Bangoria * 433e68ef121SRavi Bangoria * @start_addr is inclusive but @end_addr is exclusive. 434b57aeab8SRavi Bangoria */ 435b57aeab8SRavi Bangoria static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw) 436b57aeab8SRavi Bangoria { 437b57aeab8SRavi Bangoria u16 max_len = DABR_MAX_LEN; 438b57aeab8SRavi Bangoria u16 hw_len; 439b57aeab8SRavi Bangoria unsigned long start_addr, end_addr; 440b57aeab8SRavi Bangoria 441e68ef121SRavi Bangoria start_addr = ALIGN_DOWN(hw->address, HW_BREAKPOINT_SIZE); 442e68ef121SRavi Bangoria end_addr = ALIGN(hw->address + hw->len, HW_BREAKPOINT_SIZE); 443e68ef121SRavi Bangoria hw_len = end_addr - start_addr; 444b57aeab8SRavi Bangoria 445b57aeab8SRavi Bangoria if (dawr_enabled()) { 446b57aeab8SRavi Bangoria max_len = DAWR_MAX_LEN; 4473f31e49dSRavi Bangoria /* DAWR region can't cross 512 bytes boundary on p10 predecessors */ 4483f31e49dSRavi Bangoria if (!cpu_has_feature(CPU_FTR_ARCH_31) && 4493f31e49dSRavi Bangoria (ALIGN_DOWN(start_addr, SZ_512) != ALIGN_DOWN(end_addr - 1, SZ_512))) 450b57aeab8SRavi Bangoria return -EINVAL; 45139413ae0SChristophe Leroy } else if (IS_ENABLED(CONFIG_PPC_8xx)) { 45239413ae0SChristophe Leroy /* 8xx can setup a range without limitation */ 45339413ae0SChristophe Leroy max_len = U16_MAX; 454b57aeab8SRavi Bangoria } 455b57aeab8SRavi Bangoria 456b57aeab8SRavi Bangoria if (hw_len > max_len) 457b57aeab8SRavi Bangoria return -EINVAL; 458b57aeab8SRavi Bangoria 459b57aeab8SRavi Bangoria hw->hw_len = hw_len; 460b57aeab8SRavi Bangoria return 0; 461b57aeab8SRavi Bangoria } 462b57aeab8SRavi Bangoria 463b57aeab8SRavi Bangoria /* 4645aae8a53SK.Prasad * Validate the arch-specific HW Breakpoint register settings 4655aae8a53SK.Prasad */ 4665d5176baSFrederic Weisbecker int hw_breakpoint_arch_parse(struct perf_event *bp, 4675d5176baSFrederic Weisbecker const struct perf_event_attr *attr, 4685d5176baSFrederic Weisbecker struct arch_hw_breakpoint *hw) 4695aae8a53SK.Prasad { 470b57aeab8SRavi Bangoria int ret = -EINVAL; 4715aae8a53SK.Prasad 472b57aeab8SRavi Bangoria if (!bp || !attr->bp_len) 4735aae8a53SK.Prasad return ret; 4745aae8a53SK.Prasad 4755d5176baSFrederic Weisbecker hw->type = HW_BRK_TYPE_TRANSLATE; 4765d5176baSFrederic Weisbecker if (attr->bp_type & HW_BREAKPOINT_R) 4775d5176baSFrederic Weisbecker hw->type |= HW_BRK_TYPE_READ; 4785d5176baSFrederic Weisbecker if (attr->bp_type & HW_BREAKPOINT_W) 4795d5176baSFrederic Weisbecker hw->type |= HW_BRK_TYPE_WRITE; 4805d5176baSFrederic Weisbecker if (hw->type == HW_BRK_TYPE_TRANSLATE) 4819422de3eSMichael Neuling /* must set alteast read or write */ 4825aae8a53SK.Prasad return ret; 4835d5176baSFrederic Weisbecker if (!attr->exclude_user) 4845d5176baSFrederic Weisbecker hw->type |= HW_BRK_TYPE_USER; 4855d5176baSFrederic Weisbecker if (!attr->exclude_kernel) 4865d5176baSFrederic Weisbecker hw->type |= HW_BRK_TYPE_KERNEL; 4875d5176baSFrederic Weisbecker if (!attr->exclude_hv) 4885d5176baSFrederic Weisbecker hw->type |= HW_BRK_TYPE_HYP; 4895d5176baSFrederic Weisbecker hw->address = attr->bp_addr; 4905d5176baSFrederic Weisbecker hw->len = attr->bp_len; 4915aae8a53SK.Prasad 49285ce9a5dSMichael Neuling if (!ppc_breakpoint_available()) 49385ce9a5dSMichael Neuling return -ENODEV; 494b57aeab8SRavi Bangoria 495b57aeab8SRavi Bangoria return hw_breakpoint_validate_len(hw); 4965aae8a53SK.Prasad } 4975aae8a53SK.Prasad 4985aae8a53SK.Prasad /* 49906532a67SK.Prasad * Restores the breakpoint on the debug registers. 50006532a67SK.Prasad * Invoke this function if it is known that the execution context is 50106532a67SK.Prasad * about to change to cause loss of MSR_SE settings. 50206532a67SK.Prasad */ 50306532a67SK.Prasad void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) 50406532a67SK.Prasad { 50506532a67SK.Prasad struct arch_hw_breakpoint *info; 50674c68810SRavi Bangoria int i; 50706532a67SK.Prasad 50874c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 50974c68810SRavi Bangoria if (unlikely(tsk->thread.last_hit_ubp[i])) 51074c68810SRavi Bangoria goto reset; 51174c68810SRavi Bangoria } 51206532a67SK.Prasad return; 51306532a67SK.Prasad 51474c68810SRavi Bangoria reset: 51559dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr & ~MSR_SE); 51674c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 51774c68810SRavi Bangoria info = counter_arch_bp(__this_cpu_read(bp_per_reg[i])); 51874c68810SRavi Bangoria __set_breakpoint(i, info); 51974c68810SRavi Bangoria tsk->thread.last_hit_ubp[i] = NULL; 52074c68810SRavi Bangoria } 52106532a67SK.Prasad } 52206532a67SK.Prasad 523f6780ce6SRavi Bangoria static bool is_larx_stcx_instr(int type) 524f6780ce6SRavi Bangoria { 525f6780ce6SRavi Bangoria return type == LARX || type == STCX; 52674c68810SRavi Bangoria } 52774c68810SRavi Bangoria 5283d2ffcddSRavi Bangoria static bool is_octword_vsx_instr(int type, int size) 5293d2ffcddSRavi Bangoria { 5303d2ffcddSRavi Bangoria return ((type == LOAD_VSX || type == STORE_VSX) && size == 32); 5313d2ffcddSRavi Bangoria } 5323d2ffcddSRavi Bangoria 533658d029dSChristophe Leroy /* 534bc01bdf6SRavi Bangoria * We've failed in reliably handling the hw-breakpoint. Unregister 535bc01bdf6SRavi Bangoria * it and throw a warning message to let the user know about it. 536658d029dSChristophe Leroy */ 53774c68810SRavi Bangoria static void handler_error(struct perf_event *bp, struct arch_hw_breakpoint *info) 53874c68810SRavi Bangoria { 53974c68810SRavi Bangoria WARN(1, "Unable to handle hardware breakpoint. Breakpoint at 0x%lx will be disabled.", 54074c68810SRavi Bangoria info->address); 541658d029dSChristophe Leroy perf_event_disable_inatomic(bp); 54274c68810SRavi Bangoria } 54374c68810SRavi Bangoria 54474c68810SRavi Bangoria static void larx_stcx_err(struct perf_event *bp, struct arch_hw_breakpoint *info) 54574c68810SRavi Bangoria { 54674c68810SRavi Bangoria printk_ratelimited("Breakpoint hit on instruction that can't be emulated. Breakpoint at 0x%lx will be disabled.\n", 54774c68810SRavi Bangoria info->address); 54874c68810SRavi Bangoria perf_event_disable_inatomic(bp); 54974c68810SRavi Bangoria } 55074c68810SRavi Bangoria 55174c68810SRavi Bangoria static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp, 55274c68810SRavi Bangoria struct arch_hw_breakpoint **info, int *hit, 553c545b9f0SChristophe Leroy ppc_inst_t instr) 55474c68810SRavi Bangoria { 55574c68810SRavi Bangoria int i; 55674c68810SRavi Bangoria int stepped; 55774c68810SRavi Bangoria 55874c68810SRavi Bangoria /* Do not emulate user-space instructions, instead single-step them */ 55974c68810SRavi Bangoria if (user_mode(regs)) { 56074c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 56174c68810SRavi Bangoria if (!hit[i]) 56274c68810SRavi Bangoria continue; 56374c68810SRavi Bangoria current->thread.last_hit_ubp[i] = bp[i]; 56474c68810SRavi Bangoria info[i] = NULL; 56574c68810SRavi Bangoria } 56659dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr | MSR_SE); 567658d029dSChristophe Leroy return false; 568658d029dSChristophe Leroy } 569658d029dSChristophe Leroy 57074c68810SRavi Bangoria stepped = emulate_step(regs, instr); 57174c68810SRavi Bangoria if (!stepped) { 57274c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 57374c68810SRavi Bangoria if (!hit[i]) 57474c68810SRavi Bangoria continue; 57574c68810SRavi Bangoria handler_error(bp[i], info[i]); 57674c68810SRavi Bangoria info[i] = NULL; 57774c68810SRavi Bangoria } 57874c68810SRavi Bangoria return false; 57974c68810SRavi Bangoria } 58074c68810SRavi Bangoria return true; 58174c68810SRavi Bangoria } 58274c68810SRavi Bangoria 5833d2ffcddSRavi Bangoria static void handle_p10dd1_spurious_exception(struct arch_hw_breakpoint **info, 5843d2ffcddSRavi Bangoria int *hit, unsigned long ea) 5853d2ffcddSRavi Bangoria { 5863d2ffcddSRavi Bangoria int i; 5873d2ffcddSRavi Bangoria unsigned long hw_end_addr; 5883d2ffcddSRavi Bangoria 5893d2ffcddSRavi Bangoria /* 5903d2ffcddSRavi Bangoria * Handle spurious exception only when any bp_per_reg is set. 5913d2ffcddSRavi Bangoria * Otherwise this might be created by xmon and not actually a 5923d2ffcddSRavi Bangoria * spurious exception. 5933d2ffcddSRavi Bangoria */ 5943d2ffcddSRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 5953d2ffcddSRavi Bangoria if (!info[i]) 5963d2ffcddSRavi Bangoria continue; 5973d2ffcddSRavi Bangoria 5983d2ffcddSRavi Bangoria hw_end_addr = ALIGN(info[i]->address + info[i]->len, HW_BREAKPOINT_SIZE); 5993d2ffcddSRavi Bangoria 6003d2ffcddSRavi Bangoria /* 6013d2ffcddSRavi Bangoria * Ending address of DAWR range is less than starting 6023d2ffcddSRavi Bangoria * address of op. 6033d2ffcddSRavi Bangoria */ 6043d2ffcddSRavi Bangoria if ((hw_end_addr - 1) >= ea) 6053d2ffcddSRavi Bangoria continue; 6063d2ffcddSRavi Bangoria 6073d2ffcddSRavi Bangoria /* 6083d2ffcddSRavi Bangoria * Those addresses need to be in the same or in two 6093d2ffcddSRavi Bangoria * consecutive 512B blocks; 6103d2ffcddSRavi Bangoria */ 6113d2ffcddSRavi Bangoria if (((hw_end_addr - 1) >> 10) != (ea >> 10)) 6123d2ffcddSRavi Bangoria continue; 6133d2ffcddSRavi Bangoria 6143d2ffcddSRavi Bangoria /* 6153d2ffcddSRavi Bangoria * 'op address + 64B' generates an address that has a 6163d2ffcddSRavi Bangoria * carry into bit 52 (crosses 2K boundary). 6173d2ffcddSRavi Bangoria */ 6183d2ffcddSRavi Bangoria if ((ea & 0x800) == ((ea + 64) & 0x800)) 6193d2ffcddSRavi Bangoria continue; 6203d2ffcddSRavi Bangoria 6213d2ffcddSRavi Bangoria break; 6223d2ffcddSRavi Bangoria } 6233d2ffcddSRavi Bangoria 6243d2ffcddSRavi Bangoria if (i == nr_wp_slots()) 6253d2ffcddSRavi Bangoria return; 6263d2ffcddSRavi Bangoria 6273d2ffcddSRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 6283d2ffcddSRavi Bangoria if (info[i]) { 6293d2ffcddSRavi Bangoria hit[i] = 1; 6303d2ffcddSRavi Bangoria info[i]->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; 6313d2ffcddSRavi Bangoria } 6323d2ffcddSRavi Bangoria } 6333d2ffcddSRavi Bangoria } 6343d2ffcddSRavi Bangoria 63503465f89SNicholas Piggin int hw_breakpoint_handler(struct die_args *args) 6365aae8a53SK.Prasad { 63774c68810SRavi Bangoria bool err = false; 6385aae8a53SK.Prasad int rc = NOTIFY_STOP; 63974c68810SRavi Bangoria struct perf_event *bp[HBP_NUM_MAX] = { NULL }; 6405aae8a53SK.Prasad struct pt_regs *regs = args->regs; 64174c68810SRavi Bangoria struct arch_hw_breakpoint *info[HBP_NUM_MAX] = { NULL }; 64274c68810SRavi Bangoria int i; 64374c68810SRavi Bangoria int hit[HBP_NUM_MAX] = {0}; 64474c68810SRavi Bangoria int nr_hit = 0; 64574c68810SRavi Bangoria bool ptrace_bp = false; 646c545b9f0SChristophe Leroy ppc_inst_t instr = ppc_inst(0); 64774c68810SRavi Bangoria int type = 0; 64874c68810SRavi Bangoria int size = 0; 649*f6680275SRussell Currey unsigned long ea = 0; 6505aae8a53SK.Prasad 6515aae8a53SK.Prasad /* Disable breakpoints during exception handling */ 6529422de3eSMichael Neuling hw_breakpoint_disable(); 653574cb248SPaul Mackerras 6545aae8a53SK.Prasad /* 6555aae8a53SK.Prasad * The counter may be concurrently released but that can only 6565aae8a53SK.Prasad * occur from a call_rcu() path. We can then safely fetch 6575aae8a53SK.Prasad * the breakpoint, use its callback, touch its counter 6585aae8a53SK.Prasad * while we are in an rcu_read_lock() path. 6595aae8a53SK.Prasad */ 6605aae8a53SK.Prasad rcu_read_lock(); 6615aae8a53SK.Prasad 66274c68810SRavi Bangoria if (!IS_ENABLED(CONFIG_PPC_8xx)) 663edc8dd99SRavi Bangoria wp_get_instr_detail(regs, &instr, &type, &size, &ea); 66474c68810SRavi Bangoria 66574c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 66674c68810SRavi Bangoria bp[i] = __this_cpu_read(bp_per_reg[i]); 66774c68810SRavi Bangoria if (!bp[i]) 66874c68810SRavi Bangoria continue; 66974c68810SRavi Bangoria 67074c68810SRavi Bangoria info[i] = counter_arch_bp(bp[i]); 67174c68810SRavi Bangoria info[i]->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ; 67274c68810SRavi Bangoria 673edc8dd99SRavi Bangoria if (wp_check_constraints(regs, instr, ea, type, size, info[i])) { 67474c68810SRavi Bangoria if (!IS_ENABLED(CONFIG_PPC_8xx) && 67574c68810SRavi Bangoria ppc_inst_equal(instr, ppc_inst(0))) { 67674c68810SRavi Bangoria handler_error(bp[i], info[i]); 67774c68810SRavi Bangoria info[i] = NULL; 67874c68810SRavi Bangoria err = 1; 67974c68810SRavi Bangoria continue; 68074c68810SRavi Bangoria } 68174c68810SRavi Bangoria 68274c68810SRavi Bangoria if (is_ptrace_bp(bp[i])) 68374c68810SRavi Bangoria ptrace_bp = true; 68474c68810SRavi Bangoria hit[i] = 1; 68574c68810SRavi Bangoria nr_hit++; 68674c68810SRavi Bangoria } 68774c68810SRavi Bangoria } 68874c68810SRavi Bangoria 68974c68810SRavi Bangoria if (err) 69074c68810SRavi Bangoria goto reset; 69174c68810SRavi Bangoria 69274c68810SRavi Bangoria if (!nr_hit) { 6933d2ffcddSRavi Bangoria /* Workaround for Power10 DD1 */ 6943d2ffcddSRavi Bangoria if (!IS_ENABLED(CONFIG_PPC_8xx) && mfspr(SPRN_PVR) == 0x800100 && 6953d2ffcddSRavi Bangoria is_octword_vsx_instr(type, size)) { 6963d2ffcddSRavi Bangoria handle_p10dd1_spurious_exception(info, hit, ea); 6973d2ffcddSRavi Bangoria } else { 698c21a493aSRavi Bangoria rc = NOTIFY_DONE; 6995aae8a53SK.Prasad goto out; 700c21a493aSRavi Bangoria } 7013d2ffcddSRavi Bangoria } 7025aae8a53SK.Prasad 7035aae8a53SK.Prasad /* 7045aae8a53SK.Prasad * Return early after invoking user-callback function without restoring 7055aae8a53SK.Prasad * DABR if the breakpoint is from ptrace which always operates in 7065aae8a53SK.Prasad * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal 7075aae8a53SK.Prasad * generated in do_dabr(). 7085aae8a53SK.Prasad */ 70974c68810SRavi Bangoria if (ptrace_bp) { 71074c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 71174c68810SRavi Bangoria if (!hit[i]) 71274c68810SRavi Bangoria continue; 71374c68810SRavi Bangoria perf_bp_event(bp[i], regs); 71474c68810SRavi Bangoria info[i] = NULL; 71574c68810SRavi Bangoria } 7165aae8a53SK.Prasad rc = NOTIFY_DONE; 71774c68810SRavi Bangoria goto reset; 7185aae8a53SK.Prasad } 7195aae8a53SK.Prasad 72074c68810SRavi Bangoria if (!IS_ENABLED(CONFIG_PPC_8xx)) { 721f6780ce6SRavi Bangoria if (is_larx_stcx_instr(type)) { 72274c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 72374c68810SRavi Bangoria if (!hit[i]) 72474c68810SRavi Bangoria continue; 72574c68810SRavi Bangoria larx_stcx_err(bp[i], info[i]); 72674c68810SRavi Bangoria info[i] = NULL; 72774c68810SRavi Bangoria } 72874c68810SRavi Bangoria goto reset; 72974c68810SRavi Bangoria } 73074c68810SRavi Bangoria 73174c68810SRavi Bangoria if (!stepping_handler(regs, bp, info, hit, instr)) 73274c68810SRavi Bangoria goto reset; 733e08658a6SRavi Bangoria } 7345aae8a53SK.Prasad 7355aae8a53SK.Prasad /* 7365aae8a53SK.Prasad * As a policy, the callback is invoked in a 'trigger-after-execute' 7375aae8a53SK.Prasad * fashion 7385aae8a53SK.Prasad */ 73974c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 74074c68810SRavi Bangoria if (!hit[i]) 74174c68810SRavi Bangoria continue; 74274c68810SRavi Bangoria if (!(info[i]->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) 74374c68810SRavi Bangoria perf_bp_event(bp[i], regs); 74474c68810SRavi Bangoria } 7455aae8a53SK.Prasad 74674c68810SRavi Bangoria reset: 74774c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 74874c68810SRavi Bangoria if (!info[i]) 74974c68810SRavi Bangoria continue; 75074c68810SRavi Bangoria __set_breakpoint(i, info[i]); 75174c68810SRavi Bangoria } 75274c68810SRavi Bangoria 7535aae8a53SK.Prasad out: 7545aae8a53SK.Prasad rcu_read_unlock(); 7555aae8a53SK.Prasad return rc; 7565aae8a53SK.Prasad } 75703465f89SNicholas Piggin NOKPROBE_SYMBOL(hw_breakpoint_handler); 7585aae8a53SK.Prasad 7595aae8a53SK.Prasad /* 7605aae8a53SK.Prasad * Handle single-step exceptions following a DABR hit. 7615aae8a53SK.Prasad */ 76203465f89SNicholas Piggin static int single_step_dabr_instruction(struct die_args *args) 7635aae8a53SK.Prasad { 7645aae8a53SK.Prasad struct pt_regs *regs = args->regs; 7655aae8a53SK.Prasad struct perf_event *bp = NULL; 7663f4693eeSMichael Neuling struct arch_hw_breakpoint *info; 76774c68810SRavi Bangoria int i; 76874c68810SRavi Bangoria bool found = false; 7695aae8a53SK.Prasad 7705aae8a53SK.Prasad /* 7715aae8a53SK.Prasad * Check if we are single-stepping as a result of a 7725aae8a53SK.Prasad * previous HW Breakpoint exception 7735aae8a53SK.Prasad */ 77474c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 77574c68810SRavi Bangoria bp = current->thread.last_hit_ubp[i]; 7765aae8a53SK.Prasad 77774c68810SRavi Bangoria if (!bp) 77874c68810SRavi Bangoria continue; 77974c68810SRavi Bangoria 78074c68810SRavi Bangoria found = true; 7813f4693eeSMichael Neuling info = counter_arch_bp(bp); 7825aae8a53SK.Prasad 7835aae8a53SK.Prasad /* 78474c68810SRavi Bangoria * We shall invoke the user-defined callback function in the 78574c68810SRavi Bangoria * single stepping handler to confirm to 'trigger-after-execute' 78674c68810SRavi Bangoria * semantics 7875aae8a53SK.Prasad */ 7889422de3eSMichael Neuling if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) 7895aae8a53SK.Prasad perf_bp_event(bp, regs); 79074c68810SRavi Bangoria current->thread.last_hit_ubp[i] = NULL; 79174c68810SRavi Bangoria } 7925aae8a53SK.Prasad 79374c68810SRavi Bangoria if (!found) 79474c68810SRavi Bangoria return NOTIFY_DONE; 79574c68810SRavi Bangoria 79674c68810SRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 79774c68810SRavi Bangoria bp = __this_cpu_read(bp_per_reg[i]); 79874c68810SRavi Bangoria if (!bp) 79974c68810SRavi Bangoria continue; 80074c68810SRavi Bangoria 80174c68810SRavi Bangoria info = counter_arch_bp(bp); 80274c68810SRavi Bangoria __set_breakpoint(i, info); 80374c68810SRavi Bangoria } 80476b0f133SPaul Mackerras 80576b0f133SPaul Mackerras /* 80676b0f133SPaul Mackerras * If the process was being single-stepped by ptrace, let the 80776b0f133SPaul Mackerras * other single-step actions occur (e.g. generate SIGTRAP). 80876b0f133SPaul Mackerras */ 80976b0f133SPaul Mackerras if (test_thread_flag(TIF_SINGLESTEP)) 81076b0f133SPaul Mackerras return NOTIFY_DONE; 81176b0f133SPaul Mackerras 8125aae8a53SK.Prasad return NOTIFY_STOP; 8135aae8a53SK.Prasad } 81403465f89SNicholas Piggin NOKPROBE_SYMBOL(single_step_dabr_instruction); 8155aae8a53SK.Prasad 8165aae8a53SK.Prasad /* 8175aae8a53SK.Prasad * Handle debug exception notifications. 8185aae8a53SK.Prasad */ 81903465f89SNicholas Piggin int hw_breakpoint_exceptions_notify( 8205aae8a53SK.Prasad struct notifier_block *unused, unsigned long val, void *data) 8215aae8a53SK.Prasad { 8225aae8a53SK.Prasad int ret = NOTIFY_DONE; 8235aae8a53SK.Prasad 8245aae8a53SK.Prasad switch (val) { 8255aae8a53SK.Prasad case DIE_DABR_MATCH: 8265aae8a53SK.Prasad ret = hw_breakpoint_handler(data); 8275aae8a53SK.Prasad break; 8285aae8a53SK.Prasad case DIE_SSTEP: 8295aae8a53SK.Prasad ret = single_step_dabr_instruction(data); 8305aae8a53SK.Prasad break; 8315aae8a53SK.Prasad } 8325aae8a53SK.Prasad 8335aae8a53SK.Prasad return ret; 8345aae8a53SK.Prasad } 83503465f89SNicholas Piggin NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify); 8365aae8a53SK.Prasad 8375aae8a53SK.Prasad /* 8385aae8a53SK.Prasad * Release the user breakpoints used by ptrace 8395aae8a53SK.Prasad */ 8405aae8a53SK.Prasad void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 8415aae8a53SK.Prasad { 8426b424efaSRavi Bangoria int i; 8435aae8a53SK.Prasad struct thread_struct *t = &tsk->thread; 8445aae8a53SK.Prasad 8456b424efaSRavi Bangoria for (i = 0; i < nr_wp_slots(); i++) { 8466b424efaSRavi Bangoria unregister_hw_breakpoint(t->ptrace_bps[i]); 8476b424efaSRavi Bangoria t->ptrace_bps[i] = NULL; 8486b424efaSRavi Bangoria } 8495aae8a53SK.Prasad } 8505aae8a53SK.Prasad 8515aae8a53SK.Prasad void hw_breakpoint_pmu_read(struct perf_event *bp) 8525aae8a53SK.Prasad { 8535aae8a53SK.Prasad /* TODO */ 8545aae8a53SK.Prasad } 855ccbed90bSChristophe Leroy 856ccbed90bSChristophe Leroy void ptrace_triggered(struct perf_event *bp, 857ccbed90bSChristophe Leroy struct perf_sample_data *data, struct pt_regs *regs) 858ccbed90bSChristophe Leroy { 859ccbed90bSChristophe Leroy struct perf_event_attr attr; 860ccbed90bSChristophe Leroy 861ccbed90bSChristophe Leroy /* 862ccbed90bSChristophe Leroy * Disable the breakpoint request here since ptrace has defined a 863ccbed90bSChristophe Leroy * one-shot behaviour for breakpoint exceptions in PPC64. 864ccbed90bSChristophe Leroy * The SIGTRAP signal is generated automatically for us in do_dabr(). 865ccbed90bSChristophe Leroy * We don't have to do anything about that here 866ccbed90bSChristophe Leroy */ 867ccbed90bSChristophe Leroy attr = bp->attr; 868ccbed90bSChristophe Leroy attr.disabled = true; 869ccbed90bSChristophe Leroy modify_user_hw_breakpoint(bp, &attr); 870ccbed90bSChristophe Leroy } 871