1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * arch/sh/kernel/hw_breakpoint.c 4 * 5 * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC. 6 * 7 * Copyright (C) 2009 - 2010 Paul Mundt 8 */ 9 #include <linux/init.h> 10 #include <linux/perf_event.h> 11 #include <linux/sched/signal.h> 12 #include <linux/hw_breakpoint.h> 13 #include <linux/percpu.h> 14 #include <linux/kallsyms.h> 15 #include <linux/notifier.h> 16 #include <linux/kprobes.h> 17 #include <linux/kdebug.h> 18 #include <linux/io.h> 19 #include <linux/clk.h> 20 #include <asm/hw_breakpoint.h> 21 #include <asm/mmu_context.h> 22 #include <asm/ptrace.h> 23 #include <asm/traps.h> 24 25 /* 26 * Stores the breakpoints currently in use on each breakpoint address 27 * register for each cpus 28 */ 29 static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]); 30 31 /* 32 * A dummy placeholder for early accesses until the CPUs get a chance to 33 * register their UBCs later in the boot process. 34 */ 35 static struct sh_ubc ubc_dummy = { .num_events = 0 }; 36 37 static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy; 38 39 /* 40 * Install a perf counter breakpoint. 41 * 42 * We seek a free UBC channel and use it for this breakpoint. 43 * 44 * Atomic: we hold the counter->ctx->lock and we only handle variables 45 * and registers local to this cpu. 46 */ 47 int arch_install_hw_breakpoint(struct perf_event *bp) 48 { 49 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 50 int i; 51 52 for (i = 0; i < sh_ubc->num_events; i++) { 53 struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]); 54 55 if (!*slot) { 56 *slot = bp; 57 break; 58 } 59 } 60 61 if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot")) 62 return -EBUSY; 63 64 clk_enable(sh_ubc->clk); 65 sh_ubc->enable(info, i); 66 67 return 0; 68 } 69 70 /* 71 * Uninstall the breakpoint contained in the given counter. 72 * 73 * First we search the debug address register it uses and then we disable 74 * it. 75 * 76 * Atomic: we hold the counter->ctx->lock and we only handle variables 77 * and registers local to this cpu. 78 */ 79 void arch_uninstall_hw_breakpoint(struct perf_event *bp) 80 { 81 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 82 int i; 83 84 for (i = 0; i < sh_ubc->num_events; i++) { 85 struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]); 86 87 if (*slot == bp) { 88 *slot = NULL; 89 break; 90 } 91 } 92 93 if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot")) 94 return; 95 96 sh_ubc->disable(info, i); 97 clk_disable(sh_ubc->clk); 98 } 99 100 static int get_hbp_len(u16 hbp_len) 101 { 102 unsigned int len_in_bytes = 0; 103 104 switch (hbp_len) { 105 case SH_BREAKPOINT_LEN_1: 106 len_in_bytes = 1; 107 break; 108 case SH_BREAKPOINT_LEN_2: 109 len_in_bytes = 2; 110 break; 111 case SH_BREAKPOINT_LEN_4: 112 len_in_bytes = 4; 113 break; 114 case SH_BREAKPOINT_LEN_8: 115 len_in_bytes = 8; 116 break; 117 } 118 return len_in_bytes; 119 } 120 121 /* 122 * Check for virtual address in kernel space. 123 */ 124 int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) 125 { 126 unsigned int len; 127 unsigned long va; 128 129 va = hw->address; 130 len = get_hbp_len(hw->len); 131 132 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); 133 } 134 135 int arch_bp_generic_fields(int sh_len, int sh_type, 136 int *gen_len, int *gen_type) 137 { 138 /* Len */ 139 switch (sh_len) { 140 case SH_BREAKPOINT_LEN_1: 141 *gen_len = HW_BREAKPOINT_LEN_1; 142 break; 143 case SH_BREAKPOINT_LEN_2: 144 *gen_len = HW_BREAKPOINT_LEN_2; 145 break; 146 case SH_BREAKPOINT_LEN_4: 147 *gen_len = HW_BREAKPOINT_LEN_4; 148 break; 149 case SH_BREAKPOINT_LEN_8: 150 *gen_len = HW_BREAKPOINT_LEN_8; 151 break; 152 default: 153 return -EINVAL; 154 } 155 156 /* Type */ 157 switch (sh_type) { 158 case SH_BREAKPOINT_READ: 159 *gen_type = HW_BREAKPOINT_R; 160 case SH_BREAKPOINT_WRITE: 161 *gen_type = HW_BREAKPOINT_W; 162 break; 163 case SH_BREAKPOINT_RW: 164 *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; 165 break; 166 default: 167 return -EINVAL; 168 } 169 170 return 0; 171 } 172 173 static int arch_build_bp_info(struct perf_event *bp, 174 const struct perf_event_attr *attr, 175 struct arch_hw_breakpoint *hw) 176 { 177 hw->address = attr->bp_addr; 178 179 /* Len */ 180 switch (attr->bp_len) { 181 case HW_BREAKPOINT_LEN_1: 182 hw->len = SH_BREAKPOINT_LEN_1; 183 break; 184 case HW_BREAKPOINT_LEN_2: 185 hw->len = SH_BREAKPOINT_LEN_2; 186 break; 187 case HW_BREAKPOINT_LEN_4: 188 hw->len = SH_BREAKPOINT_LEN_4; 189 break; 190 case HW_BREAKPOINT_LEN_8: 191 hw->len = SH_BREAKPOINT_LEN_8; 192 break; 193 default: 194 return -EINVAL; 195 } 196 197 /* Type */ 198 switch (attr->bp_type) { 199 case HW_BREAKPOINT_R: 200 hw->type = SH_BREAKPOINT_READ; 201 break; 202 case HW_BREAKPOINT_W: 203 hw->type = SH_BREAKPOINT_WRITE; 204 break; 205 case HW_BREAKPOINT_W | HW_BREAKPOINT_R: 206 hw->type = SH_BREAKPOINT_RW; 207 break; 208 default: 209 return -EINVAL; 210 } 211 212 return 0; 213 } 214 215 /* 216 * Validate the arch-specific HW Breakpoint register settings 217 */ 218 int hw_breakpoint_arch_parse(struct perf_event *bp, 219 const struct perf_event_attr *attr, 220 struct arch_hw_breakpoint *hw) 221 { 222 unsigned int align; 223 int ret; 224 225 ret = arch_build_bp_info(bp, attr, hw); 226 if (ret) 227 return ret; 228 229 ret = -EINVAL; 230 231 switch (hw->len) { 232 case SH_BREAKPOINT_LEN_1: 233 align = 0; 234 break; 235 case SH_BREAKPOINT_LEN_2: 236 align = 1; 237 break; 238 case SH_BREAKPOINT_LEN_4: 239 align = 3; 240 break; 241 case SH_BREAKPOINT_LEN_8: 242 align = 7; 243 break; 244 default: 245 return ret; 246 } 247 248 /* 249 * Check that the low-order bits of the address are appropriate 250 * for the alignment implied by len. 251 */ 252 if (hw->address & align) 253 return -EINVAL; 254 255 return 0; 256 } 257 258 /* 259 * Release the user breakpoints used by ptrace 260 */ 261 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 262 { 263 int i; 264 struct thread_struct *t = &tsk->thread; 265 266 for (i = 0; i < sh_ubc->num_events; i++) { 267 unregister_hw_breakpoint(t->ptrace_bps[i]); 268 t->ptrace_bps[i] = NULL; 269 } 270 } 271 272 static int __kprobes hw_breakpoint_handler(struct die_args *args) 273 { 274 int cpu, i, rc = NOTIFY_STOP; 275 struct perf_event *bp; 276 unsigned int cmf, resume_mask; 277 278 /* 279 * Do an early return if none of the channels triggered. 280 */ 281 cmf = sh_ubc->triggered_mask(); 282 if (unlikely(!cmf)) 283 return NOTIFY_DONE; 284 285 /* 286 * By default, resume all of the active channels. 287 */ 288 resume_mask = sh_ubc->active_mask(); 289 290 /* 291 * Disable breakpoints during exception handling. 292 */ 293 sh_ubc->disable_all(); 294 295 cpu = get_cpu(); 296 for (i = 0; i < sh_ubc->num_events; i++) { 297 unsigned long event_mask = (1 << i); 298 299 if (likely(!(cmf & event_mask))) 300 continue; 301 302 /* 303 * The counter may be concurrently released but that can only 304 * occur from a call_rcu() path. We can then safely fetch 305 * the breakpoint, use its callback, touch its counter 306 * while we are in an rcu_read_lock() path. 307 */ 308 rcu_read_lock(); 309 310 bp = per_cpu(bp_per_reg[i], cpu); 311 if (bp) 312 rc = NOTIFY_DONE; 313 314 /* 315 * Reset the condition match flag to denote completion of 316 * exception handling. 317 */ 318 sh_ubc->clear_triggered_mask(event_mask); 319 320 /* 321 * bp can be NULL due to concurrent perf counter 322 * removing. 323 */ 324 if (!bp) { 325 rcu_read_unlock(); 326 break; 327 } 328 329 /* 330 * Don't restore the channel if the breakpoint is from 331 * ptrace, as it always operates in one-shot mode. 332 */ 333 if (bp->overflow_handler == ptrace_triggered) 334 resume_mask &= ~(1 << i); 335 336 perf_bp_event(bp, args->regs); 337 338 /* Deliver the signal to userspace */ 339 if (!arch_check_bp_in_kernelspace(&bp->hw.info)) { 340 force_sig_fault(SIGTRAP, TRAP_HWBKPT, 341 (void __user *)NULL); 342 } 343 344 rcu_read_unlock(); 345 } 346 347 if (cmf == 0) 348 rc = NOTIFY_DONE; 349 350 sh_ubc->enable_all(resume_mask); 351 352 put_cpu(); 353 354 return rc; 355 } 356 357 BUILD_TRAP_HANDLER(breakpoint) 358 { 359 unsigned long ex = lookup_exception_vector(); 360 TRAP_HANDLER_DECL; 361 362 notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP); 363 } 364 365 /* 366 * Handle debug exception notifications. 367 */ 368 int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused, 369 unsigned long val, void *data) 370 { 371 struct die_args *args = data; 372 373 if (val != DIE_BREAKPOINT) 374 return NOTIFY_DONE; 375 376 /* 377 * If the breakpoint hasn't been triggered by the UBC, it's 378 * probably from a debugger, so don't do anything more here. 379 * 380 * This also permits the UBC interface clock to remain off for 381 * non-UBC breakpoints, as we don't need to check the triggered 382 * or active channel masks. 383 */ 384 if (args->trapnr != sh_ubc->trap_nr) 385 return NOTIFY_DONE; 386 387 return hw_breakpoint_handler(data); 388 } 389 390 void hw_breakpoint_pmu_read(struct perf_event *bp) 391 { 392 /* TODO */ 393 } 394 395 int register_sh_ubc(struct sh_ubc *ubc) 396 { 397 /* Bail if it's already assigned */ 398 if (sh_ubc != &ubc_dummy) 399 return -EBUSY; 400 sh_ubc = ubc; 401 402 pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name); 403 404 WARN_ON(ubc->num_events > HBP_NUM); 405 406 return 0; 407 } 408