1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * ARMv8 single-step debug support and mdscr context switching. 4 * 5 * Copyright (C) 2012 ARM Limited 6 * 7 * Author: Will Deacon <will.deacon@arm.com> 8 */ 9 10 #include <linux/cpu.h> 11 #include <linux/debugfs.h> 12 #include <linux/hardirq.h> 13 #include <linux/init.h> 14 #include <linux/ptrace.h> 15 #include <linux/kprobes.h> 16 #include <linux/stat.h> 17 #include <linux/uaccess.h> 18 #include <linux/sched/task_stack.h> 19 20 #include <asm/cpufeature.h> 21 #include <asm/cputype.h> 22 #include <asm/daifflags.h> 23 #include <asm/debug-monitors.h> 24 #include <asm/system_misc.h> 25 #include <asm/traps.h> 26 27 /* Determine debug architecture. */ 28 u8 debug_monitors_arch(void) 29 { 30 return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1), 31 ID_AA64DFR0_DEBUGVER_SHIFT); 32 } 33 34 /* 35 * MDSCR access routines. 36 */ 37 static void mdscr_write(u32 mdscr) 38 { 39 unsigned long flags; 40 flags = local_daif_save(); 41 write_sysreg(mdscr, mdscr_el1); 42 local_daif_restore(flags); 43 } 44 NOKPROBE_SYMBOL(mdscr_write); 45 46 static u32 mdscr_read(void) 47 { 48 return read_sysreg(mdscr_el1); 49 } 50 NOKPROBE_SYMBOL(mdscr_read); 51 52 /* 53 * Allow root to disable self-hosted debug from userspace. 54 * This is useful if you want to connect an external JTAG debugger. 55 */ 56 static bool debug_enabled = true; 57 58 static int create_debug_debugfs_entry(void) 59 { 60 debugfs_create_bool("debug_enabled", 0644, NULL, &debug_enabled); 61 return 0; 62 } 63 fs_initcall(create_debug_debugfs_entry); 64 65 static int __init early_debug_disable(char *buf) 66 { 67 debug_enabled = false; 68 return 0; 69 } 70 71 early_param("nodebugmon", early_debug_disable); 72 73 /* 74 * Keep track of debug users on each core. 75 * The ref counts are per-cpu so we use a local_t type. 76 */ 77 static DEFINE_PER_CPU(int, mde_ref_count); 78 static DEFINE_PER_CPU(int, kde_ref_count); 79 80 void enable_debug_monitors(enum dbg_active_el el) 81 { 82 u32 mdscr, enable = 0; 83 84 WARN_ON(preemptible()); 85 86 if (this_cpu_inc_return(mde_ref_count) == 1) 87 enable = DBG_MDSCR_MDE; 88 89 if (el == DBG_ACTIVE_EL1 && 90 this_cpu_inc_return(kde_ref_count) == 1) 91 enable |= DBG_MDSCR_KDE; 92 93 if (enable && debug_enabled) { 94 mdscr = mdscr_read(); 95 mdscr |= enable; 96 mdscr_write(mdscr); 97 } 98 } 99 NOKPROBE_SYMBOL(enable_debug_monitors); 100 101 void disable_debug_monitors(enum dbg_active_el el) 102 { 103 u32 mdscr, disable = 0; 104 105 WARN_ON(preemptible()); 106 107 if (this_cpu_dec_return(mde_ref_count) == 0) 108 disable = ~DBG_MDSCR_MDE; 109 110 if (el == DBG_ACTIVE_EL1 && 111 this_cpu_dec_return(kde_ref_count) == 0) 112 disable &= ~DBG_MDSCR_KDE; 113 114 if (disable) { 115 mdscr = mdscr_read(); 116 mdscr &= disable; 117 mdscr_write(mdscr); 118 } 119 } 120 NOKPROBE_SYMBOL(disable_debug_monitors); 121 122 /* 123 * OS lock clearing. 124 */ 125 static int clear_os_lock(unsigned int cpu) 126 { 127 write_sysreg(0, osdlr_el1); 128 write_sysreg(0, oslar_el1); 129 isb(); 130 return 0; 131 } 132 133 static int debug_monitors_init(void) 134 { 135 return cpuhp_setup_state(CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, 136 "arm64/debug_monitors:starting", 137 clear_os_lock, NULL); 138 } 139 postcore_initcall(debug_monitors_init); 140 141 /* 142 * Single step API and exception handling. 143 */ 144 static void set_regs_spsr_ss(struct pt_regs *regs) 145 { 146 regs->pstate |= DBG_SPSR_SS; 147 } 148 NOKPROBE_SYMBOL(set_regs_spsr_ss); 149 150 static void clear_regs_spsr_ss(struct pt_regs *regs) 151 { 152 regs->pstate &= ~DBG_SPSR_SS; 153 } 154 NOKPROBE_SYMBOL(clear_regs_spsr_ss); 155 156 static DEFINE_SPINLOCK(debug_hook_lock); 157 static LIST_HEAD(user_step_hook); 158 static LIST_HEAD(kernel_step_hook); 159 160 static void register_debug_hook(struct list_head *node, struct list_head *list) 161 { 162 spin_lock(&debug_hook_lock); 163 list_add_rcu(node, list); 164 spin_unlock(&debug_hook_lock); 165 166 } 167 168 static void unregister_debug_hook(struct list_head *node) 169 { 170 spin_lock(&debug_hook_lock); 171 list_del_rcu(node); 172 spin_unlock(&debug_hook_lock); 173 synchronize_rcu(); 174 } 175 176 void register_user_step_hook(struct step_hook *hook) 177 { 178 register_debug_hook(&hook->node, &user_step_hook); 179 } 180 181 void unregister_user_step_hook(struct step_hook *hook) 182 { 183 unregister_debug_hook(&hook->node); 184 } 185 186 void register_kernel_step_hook(struct step_hook *hook) 187 { 188 register_debug_hook(&hook->node, &kernel_step_hook); 189 } 190 191 void unregister_kernel_step_hook(struct step_hook *hook) 192 { 193 unregister_debug_hook(&hook->node); 194 } 195 196 /* 197 * Call registered single step handlers 198 * There is no Syndrome info to check for determining the handler. 199 * So we call all the registered handlers, until the right handler is 200 * found which returns zero. 201 */ 202 static int call_step_hook(struct pt_regs *regs, unsigned int esr) 203 { 204 struct step_hook *hook; 205 struct list_head *list; 206 int retval = DBG_HOOK_ERROR; 207 208 list = user_mode(regs) ? &user_step_hook : &kernel_step_hook; 209 210 /* 211 * Since single-step exception disables interrupt, this function is 212 * entirely not preemptible, and we can use rcu list safely here. 213 */ 214 list_for_each_entry_rcu(hook, list, node) { 215 retval = hook->fn(regs, esr); 216 if (retval == DBG_HOOK_HANDLED) 217 break; 218 } 219 220 return retval; 221 } 222 NOKPROBE_SYMBOL(call_step_hook); 223 224 static void send_user_sigtrap(int si_code) 225 { 226 struct pt_regs *regs = current_pt_regs(); 227 228 if (WARN_ON(!user_mode(regs))) 229 return; 230 231 if (interrupts_enabled(regs)) 232 local_irq_enable(); 233 234 arm64_force_sig_fault(SIGTRAP, si_code, 235 (void __user *)instruction_pointer(regs), 236 "User debug trap"); 237 } 238 239 static int single_step_handler(unsigned long unused, unsigned int esr, 240 struct pt_regs *regs) 241 { 242 bool handler_found = false; 243 244 /* 245 * If we are stepping a pending breakpoint, call the hw_breakpoint 246 * handler first. 247 */ 248 if (!reinstall_suspended_bps(regs)) 249 return 0; 250 251 if (!handler_found && call_step_hook(regs, esr) == DBG_HOOK_HANDLED) 252 handler_found = true; 253 254 if (!handler_found && user_mode(regs)) { 255 send_user_sigtrap(TRAP_TRACE); 256 257 /* 258 * ptrace will disable single step unless explicitly 259 * asked to re-enable it. For other clients, it makes 260 * sense to leave it enabled (i.e. rewind the controls 261 * to the active-not-pending state). 262 */ 263 user_rewind_single_step(current); 264 } else if (!handler_found) { 265 pr_warn("Unexpected kernel single-step exception at EL1\n"); 266 /* 267 * Re-enable stepping since we know that we will be 268 * returning to regs. 269 */ 270 set_regs_spsr_ss(regs); 271 } 272 273 return 0; 274 } 275 NOKPROBE_SYMBOL(single_step_handler); 276 277 static LIST_HEAD(user_break_hook); 278 static LIST_HEAD(kernel_break_hook); 279 280 void register_user_break_hook(struct break_hook *hook) 281 { 282 register_debug_hook(&hook->node, &user_break_hook); 283 } 284 285 void unregister_user_break_hook(struct break_hook *hook) 286 { 287 unregister_debug_hook(&hook->node); 288 } 289 290 void register_kernel_break_hook(struct break_hook *hook) 291 { 292 register_debug_hook(&hook->node, &kernel_break_hook); 293 } 294 295 void unregister_kernel_break_hook(struct break_hook *hook) 296 { 297 unregister_debug_hook(&hook->node); 298 } 299 300 static int call_break_hook(struct pt_regs *regs, unsigned int esr) 301 { 302 struct break_hook *hook; 303 struct list_head *list; 304 int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; 305 306 list = user_mode(regs) ? &user_break_hook : &kernel_break_hook; 307 308 /* 309 * Since brk exception disables interrupt, this function is 310 * entirely not preemptible, and we can use rcu list safely here. 311 */ 312 list_for_each_entry_rcu(hook, list, node) { 313 unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; 314 315 if ((comment & ~hook->mask) == hook->imm) 316 fn = hook->fn; 317 } 318 319 return fn ? fn(regs, esr) : DBG_HOOK_ERROR; 320 } 321 NOKPROBE_SYMBOL(call_break_hook); 322 323 static int brk_handler(unsigned long unused, unsigned int esr, 324 struct pt_regs *regs) 325 { 326 if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED) 327 return 0; 328 329 if (user_mode(regs)) { 330 send_user_sigtrap(TRAP_BRKPT); 331 } else { 332 pr_warn("Unexpected kernel BRK exception at EL1\n"); 333 return -EFAULT; 334 } 335 336 return 0; 337 } 338 NOKPROBE_SYMBOL(brk_handler); 339 340 int aarch32_break_handler(struct pt_regs *regs) 341 { 342 u32 arm_instr; 343 u16 thumb_instr; 344 bool bp = false; 345 void __user *pc = (void __user *)instruction_pointer(regs); 346 347 if (!compat_user_mode(regs)) 348 return -EFAULT; 349 350 if (compat_thumb_mode(regs)) { 351 /* get 16-bit Thumb instruction */ 352 __le16 instr; 353 get_user(instr, (__le16 __user *)pc); 354 thumb_instr = le16_to_cpu(instr); 355 if (thumb_instr == AARCH32_BREAK_THUMB2_LO) { 356 /* get second half of 32-bit Thumb-2 instruction */ 357 get_user(instr, (__le16 __user *)(pc + 2)); 358 thumb_instr = le16_to_cpu(instr); 359 bp = thumb_instr == AARCH32_BREAK_THUMB2_HI; 360 } else { 361 bp = thumb_instr == AARCH32_BREAK_THUMB; 362 } 363 } else { 364 /* 32-bit ARM instruction */ 365 __le32 instr; 366 get_user(instr, (__le32 __user *)pc); 367 arm_instr = le32_to_cpu(instr); 368 bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM; 369 } 370 371 if (!bp) 372 return -EFAULT; 373 374 send_user_sigtrap(TRAP_BRKPT); 375 return 0; 376 } 377 NOKPROBE_SYMBOL(aarch32_break_handler); 378 379 static int __init debug_traps_init(void) 380 { 381 hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP, 382 TRAP_TRACE, "single-step handler"); 383 hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP, 384 TRAP_BRKPT, "ptrace BRK handler"); 385 return 0; 386 } 387 arch_initcall(debug_traps_init); 388 389 /* Re-enable single step for syscall restarting. */ 390 void user_rewind_single_step(struct task_struct *task) 391 { 392 /* 393 * If single step is active for this thread, then set SPSR.SS 394 * to 1 to avoid returning to the active-pending state. 395 */ 396 if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) 397 set_regs_spsr_ss(task_pt_regs(task)); 398 } 399 NOKPROBE_SYMBOL(user_rewind_single_step); 400 401 void user_fastforward_single_step(struct task_struct *task) 402 { 403 if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) 404 clear_regs_spsr_ss(task_pt_regs(task)); 405 } 406 407 /* Kernel API */ 408 void kernel_enable_single_step(struct pt_regs *regs) 409 { 410 WARN_ON(!irqs_disabled()); 411 set_regs_spsr_ss(regs); 412 mdscr_write(mdscr_read() | DBG_MDSCR_SS); 413 enable_debug_monitors(DBG_ACTIVE_EL1); 414 } 415 NOKPROBE_SYMBOL(kernel_enable_single_step); 416 417 void kernel_disable_single_step(void) 418 { 419 WARN_ON(!irqs_disabled()); 420 mdscr_write(mdscr_read() & ~DBG_MDSCR_SS); 421 disable_debug_monitors(DBG_ACTIVE_EL1); 422 } 423 NOKPROBE_SYMBOL(kernel_disable_single_step); 424 425 int kernel_active_single_step(void) 426 { 427 WARN_ON(!irqs_disabled()); 428 return mdscr_read() & DBG_MDSCR_SS; 429 } 430 NOKPROBE_SYMBOL(kernel_active_single_step); 431 432 /* ptrace API */ 433 void user_enable_single_step(struct task_struct *task) 434 { 435 struct thread_info *ti = task_thread_info(task); 436 437 if (!test_and_set_ti_thread_flag(ti, TIF_SINGLESTEP)) 438 set_regs_spsr_ss(task_pt_regs(task)); 439 } 440 NOKPROBE_SYMBOL(user_enable_single_step); 441 442 void user_disable_single_step(struct task_struct *task) 443 { 444 clear_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP); 445 } 446 NOKPROBE_SYMBOL(user_disable_single_step); 447