1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel Probes (KProbes) 4 * arch/mips/kernel/kprobes.c 5 * 6 * Copyright 2006 Sony Corp. 7 * Copyright 2010 Cavium Networks 8 * 9 * Some portions copied from the powerpc version. 10 * 11 * Copyright (C) IBM Corporation, 2002, 2004 12 */ 13 14 #include <linux/kprobes.h> 15 #include <linux/preempt.h> 16 #include <linux/uaccess.h> 17 #include <linux/kdebug.h> 18 #include <linux/slab.h> 19 20 #include <asm/ptrace.h> 21 #include <asm/branch.h> 22 #include <asm/break.h> 23 24 #include "probes-common.h" 25 26 static const union mips_instruction breakpoint_insn = { 27 .b_format = { 28 .opcode = spec_op, 29 .code = BRK_KPROBE_BP, 30 .func = break_op 31 } 32 }; 33 34 static const union mips_instruction breakpoint2_insn = { 35 .b_format = { 36 .opcode = spec_op, 37 .code = BRK_KPROBE_SSTEPBP, 38 .func = break_op 39 } 40 }; 41 42 DEFINE_PER_CPU(struct kprobe *, current_kprobe); 43 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 44 45 static int __kprobes insn_has_delayslot(union mips_instruction insn) 46 { 47 return __insn_has_delay_slot(insn); 48 } 49 50 /* 51 * insn_has_ll_or_sc function checks whether instruction is ll or sc 52 * one; putting breakpoint on top of atomic ll/sc pair is bad idea; 53 * so we need to prevent it and refuse kprobes insertion for such 54 * instructions; cannot do much about breakpoint in the middle of 55 * ll/sc pair; it is upto user to avoid those places 56 */ 57 static int __kprobes insn_has_ll_or_sc(union mips_instruction insn) 58 { 59 int ret = 0; 60 61 switch (insn.i_format.opcode) { 62 case ll_op: 63 case lld_op: 64 case sc_op: 65 case scd_op: 66 ret = 1; 67 break; 68 default: 69 break; 70 } 71 return ret; 72 } 73 74 int __kprobes arch_prepare_kprobe(struct kprobe *p) 75 { 76 union mips_instruction insn; 77 union mips_instruction prev_insn; 78 int ret = 0; 79 80 insn = p->addr[0]; 81 82 if (insn_has_ll_or_sc(insn)) { 83 pr_notice("Kprobes for ll and sc instructions are not" 84 "supported\n"); 85 ret = -EINVAL; 86 goto out; 87 } 88 89 if ((probe_kernel_read(&prev_insn, p->addr - 1, 90 sizeof(mips_instruction)) == 0) && 91 insn_has_delayslot(prev_insn)) { 92 pr_notice("Kprobes for branch delayslot are not supported\n"); 93 ret = -EINVAL; 94 goto out; 95 } 96 97 if (__insn_is_compact_branch(insn)) { 98 pr_notice("Kprobes for compact branches are not supported\n"); 99 ret = -EINVAL; 100 goto out; 101 } 102 103 /* insn: must be on special executable page on mips. */ 104 p->ainsn.insn = get_insn_slot(); 105 if (!p->ainsn.insn) { 106 ret = -ENOMEM; 107 goto out; 108 } 109 110 /* 111 * In the kprobe->ainsn.insn[] array we store the original 112 * instruction at index zero and a break trap instruction at 113 * index one. 114 * 115 * On MIPS arch if the instruction at probed address is a 116 * branch instruction, we need to execute the instruction at 117 * Branch Delayslot (BD) at the time of probe hit. As MIPS also 118 * doesn't have single stepping support, the BD instruction can 119 * not be executed in-line and it would be executed on SSOL slot 120 * using a normal breakpoint instruction in the next slot. 121 * So, read the instruction and save it for later execution. 122 */ 123 if (insn_has_delayslot(insn)) 124 memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t)); 125 else 126 memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t)); 127 128 p->ainsn.insn[1] = breakpoint2_insn; 129 p->opcode = *p->addr; 130 131 out: 132 return ret; 133 } 134 135 void __kprobes arch_arm_kprobe(struct kprobe *p) 136 { 137 *p->addr = breakpoint_insn; 138 flush_insn_slot(p); 139 } 140 141 void __kprobes arch_disarm_kprobe(struct kprobe *p) 142 { 143 *p->addr = p->opcode; 144 flush_insn_slot(p); 145 } 146 147 void __kprobes arch_remove_kprobe(struct kprobe *p) 148 { 149 if (p->ainsn.insn) { 150 free_insn_slot(p->ainsn.insn, 0); 151 p->ainsn.insn = NULL; 152 } 153 } 154 155 static void save_previous_kprobe(struct kprobe_ctlblk *kcb) 156 { 157 kcb->prev_kprobe.kp = kprobe_running(); 158 kcb->prev_kprobe.status = kcb->kprobe_status; 159 kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR; 160 kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR; 161 kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc; 162 } 163 164 static void restore_previous_kprobe(struct kprobe_ctlblk *kcb) 165 { 166 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 167 kcb->kprobe_status = kcb->prev_kprobe.status; 168 kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR; 169 kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR; 170 kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc; 171 } 172 173 static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 174 struct kprobe_ctlblk *kcb) 175 { 176 __this_cpu_write(current_kprobe, p); 177 kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE); 178 kcb->kprobe_saved_epc = regs->cp0_epc; 179 } 180 181 /** 182 * evaluate_branch_instrucion - 183 * 184 * Evaluate the branch instruction at probed address during probe hit. The 185 * result of evaluation would be the updated epc. The insturction in delayslot 186 * would actually be single stepped using a normal breakpoint) on SSOL slot. 187 * 188 * The result is also saved in the kprobe control block for later use, 189 * in case we need to execute the delayslot instruction. The latter will be 190 * false for NOP instruction in dealyslot and the branch-likely instructions 191 * when the branch is taken. And for those cases we set a flag as 192 * SKIP_DELAYSLOT in the kprobe control block 193 */ 194 static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs, 195 struct kprobe_ctlblk *kcb) 196 { 197 union mips_instruction insn = p->opcode; 198 long epc; 199 int ret = 0; 200 201 epc = regs->cp0_epc; 202 if (epc & 3) 203 goto unaligned; 204 205 if (p->ainsn.insn->word == 0) 206 kcb->flags |= SKIP_DELAYSLOT; 207 else 208 kcb->flags &= ~SKIP_DELAYSLOT; 209 210 ret = __compute_return_epc_for_insn(regs, insn); 211 if (ret < 0) 212 return ret; 213 214 if (ret == BRANCH_LIKELY_TAKEN) 215 kcb->flags |= SKIP_DELAYSLOT; 216 217 kcb->target_epc = regs->cp0_epc; 218 219 return 0; 220 221 unaligned: 222 pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm); 223 force_sig(SIGBUS); 224 return -EFAULT; 225 226 } 227 228 static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs, 229 struct kprobe_ctlblk *kcb) 230 { 231 int ret = 0; 232 233 regs->cp0_status &= ~ST0_IE; 234 235 /* single step inline if the instruction is a break */ 236 if (p->opcode.word == breakpoint_insn.word || 237 p->opcode.word == breakpoint2_insn.word) 238 regs->cp0_epc = (unsigned long)p->addr; 239 else if (insn_has_delayslot(p->opcode)) { 240 ret = evaluate_branch_instruction(p, regs, kcb); 241 if (ret < 0) { 242 pr_notice("Kprobes: Error in evaluating branch\n"); 243 return; 244 } 245 } 246 regs->cp0_epc = (unsigned long)&p->ainsn.insn[0]; 247 } 248 249 /* 250 * Called after single-stepping. p->addr is the address of the 251 * instruction whose first byte has been replaced by the "break 0" 252 * instruction. To avoid the SMP problems that can occur when we 253 * temporarily put back the original opcode to single-step, we 254 * single-stepped a copy of the instruction. The address of this 255 * copy is p->ainsn.insn. 256 * 257 * This function prepares to return from the post-single-step 258 * breakpoint trap. In case of branch instructions, the target 259 * epc to be restored. 260 */ 261 static void __kprobes resume_execution(struct kprobe *p, 262 struct pt_regs *regs, 263 struct kprobe_ctlblk *kcb) 264 { 265 if (insn_has_delayslot(p->opcode)) 266 regs->cp0_epc = kcb->target_epc; 267 else { 268 unsigned long orig_epc = kcb->kprobe_saved_epc; 269 regs->cp0_epc = orig_epc + 4; 270 } 271 } 272 273 static int __kprobes kprobe_handler(struct pt_regs *regs) 274 { 275 struct kprobe *p; 276 int ret = 0; 277 kprobe_opcode_t *addr; 278 struct kprobe_ctlblk *kcb; 279 280 addr = (kprobe_opcode_t *) regs->cp0_epc; 281 282 /* 283 * We don't want to be preempted for the entire 284 * duration of kprobe processing 285 */ 286 preempt_disable(); 287 kcb = get_kprobe_ctlblk(); 288 289 /* Check we're not actually recursing */ 290 if (kprobe_running()) { 291 p = get_kprobe(addr); 292 if (p) { 293 if (kcb->kprobe_status == KPROBE_HIT_SS && 294 p->ainsn.insn->word == breakpoint_insn.word) { 295 regs->cp0_status &= ~ST0_IE; 296 regs->cp0_status |= kcb->kprobe_saved_SR; 297 goto no_kprobe; 298 } 299 /* 300 * We have reentered the kprobe_handler(), since 301 * another probe was hit while within the handler. 302 * We here save the original kprobes variables and 303 * just single step on the instruction of the new probe 304 * without calling any user handlers. 305 */ 306 save_previous_kprobe(kcb); 307 set_current_kprobe(p, regs, kcb); 308 kprobes_inc_nmissed_count(p); 309 prepare_singlestep(p, regs, kcb); 310 kcb->kprobe_status = KPROBE_REENTER; 311 if (kcb->flags & SKIP_DELAYSLOT) { 312 resume_execution(p, regs, kcb); 313 restore_previous_kprobe(kcb); 314 preempt_enable_no_resched(); 315 } 316 return 1; 317 } else if (addr->word != breakpoint_insn.word) { 318 /* 319 * The breakpoint instruction was removed by 320 * another cpu right after we hit, no further 321 * handling of this interrupt is appropriate 322 */ 323 ret = 1; 324 } 325 goto no_kprobe; 326 } 327 328 p = get_kprobe(addr); 329 if (!p) { 330 if (addr->word != breakpoint_insn.word) { 331 /* 332 * The breakpoint instruction was removed right 333 * after we hit it. Another cpu has removed 334 * either a probepoint or a debugger breakpoint 335 * at this address. In either case, no further 336 * handling of this interrupt is appropriate. 337 */ 338 ret = 1; 339 } 340 /* Not one of ours: let kernel handle it */ 341 goto no_kprobe; 342 } 343 344 set_current_kprobe(p, regs, kcb); 345 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 346 347 if (p->pre_handler && p->pre_handler(p, regs)) { 348 /* handler has already set things up, so skip ss setup */ 349 reset_current_kprobe(); 350 preempt_enable_no_resched(); 351 return 1; 352 } 353 354 prepare_singlestep(p, regs, kcb); 355 if (kcb->flags & SKIP_DELAYSLOT) { 356 kcb->kprobe_status = KPROBE_HIT_SSDONE; 357 if (p->post_handler) 358 p->post_handler(p, regs, 0); 359 resume_execution(p, regs, kcb); 360 preempt_enable_no_resched(); 361 } else 362 kcb->kprobe_status = KPROBE_HIT_SS; 363 364 return 1; 365 366 no_kprobe: 367 preempt_enable_no_resched(); 368 return ret; 369 370 } 371 372 static inline int post_kprobe_handler(struct pt_regs *regs) 373 { 374 struct kprobe *cur = kprobe_running(); 375 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 376 377 if (!cur) 378 return 0; 379 380 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 381 kcb->kprobe_status = KPROBE_HIT_SSDONE; 382 cur->post_handler(cur, regs, 0); 383 } 384 385 resume_execution(cur, regs, kcb); 386 387 regs->cp0_status |= kcb->kprobe_saved_SR; 388 389 /* Restore back the original saved kprobes variables and continue. */ 390 if (kcb->kprobe_status == KPROBE_REENTER) { 391 restore_previous_kprobe(kcb); 392 goto out; 393 } 394 reset_current_kprobe(); 395 out: 396 preempt_enable_no_resched(); 397 398 return 1; 399 } 400 401 int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 402 { 403 struct kprobe *cur = kprobe_running(); 404 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 405 406 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 407 return 1; 408 409 if (kcb->kprobe_status & KPROBE_HIT_SS) { 410 resume_execution(cur, regs, kcb); 411 regs->cp0_status |= kcb->kprobe_old_SR; 412 413 reset_current_kprobe(); 414 preempt_enable_no_resched(); 415 } 416 return 0; 417 } 418 419 /* 420 * Wrapper routine for handling exceptions. 421 */ 422 int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 423 unsigned long val, void *data) 424 { 425 426 struct die_args *args = (struct die_args *)data; 427 int ret = NOTIFY_DONE; 428 429 switch (val) { 430 case DIE_BREAK: 431 if (kprobe_handler(args->regs)) 432 ret = NOTIFY_STOP; 433 break; 434 case DIE_SSTEPBP: 435 if (post_kprobe_handler(args->regs)) 436 ret = NOTIFY_STOP; 437 break; 438 439 case DIE_PAGE_FAULT: 440 /* kprobe_running() needs smp_processor_id() */ 441 preempt_disable(); 442 443 if (kprobe_running() 444 && kprobe_fault_handler(args->regs, args->trapnr)) 445 ret = NOTIFY_STOP; 446 preempt_enable(); 447 break; 448 default: 449 break; 450 } 451 return ret; 452 } 453 454 /* 455 * Function return probe trampoline: 456 * - init_kprobes() establishes a probepoint here 457 * - When the probed function returns, this probe causes the 458 * handlers to fire 459 */ 460 static void __used kretprobe_trampoline_holder(void) 461 { 462 asm volatile( 463 ".set push\n\t" 464 /* Keep the assembler from reordering and placing JR here. */ 465 ".set noreorder\n\t" 466 "nop\n\t" 467 ".global kretprobe_trampoline\n" 468 "kretprobe_trampoline:\n\t" 469 "nop\n\t" 470 ".set pop" 471 : : : "memory"); 472 } 473 474 void kretprobe_trampoline(void); 475 476 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 477 struct pt_regs *regs) 478 { 479 ri->ret_addr = (kprobe_opcode_t *) regs->regs[31]; 480 481 /* Replace the return addr with trampoline addr */ 482 regs->regs[31] = (unsigned long)kretprobe_trampoline; 483 } 484 485 /* 486 * Called when the probe at kretprobe trampoline is hit 487 */ 488 static int __kprobes trampoline_probe_handler(struct kprobe *p, 489 struct pt_regs *regs) 490 { 491 struct kretprobe_instance *ri = NULL; 492 struct hlist_head *head, empty_rp; 493 struct hlist_node *tmp; 494 unsigned long flags, orig_ret_address = 0; 495 unsigned long trampoline_address = (unsigned long)kretprobe_trampoline; 496 497 INIT_HLIST_HEAD(&empty_rp); 498 kretprobe_hash_lock(current, &head, &flags); 499 500 /* 501 * It is possible to have multiple instances associated with a given 502 * task either because an multiple functions in the call path 503 * have a return probe installed on them, and/or more than one return 504 * return probe was registered for a target function. 505 * 506 * We can handle this because: 507 * - instances are always inserted at the head of the list 508 * - when multiple return probes are registered for the same 509 * function, the first instance's ret_addr will point to the 510 * real return address, and all the rest will point to 511 * kretprobe_trampoline 512 */ 513 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 514 if (ri->task != current) 515 /* another task is sharing our hash bucket */ 516 continue; 517 518 if (ri->rp && ri->rp->handler) 519 ri->rp->handler(ri, regs); 520 521 orig_ret_address = (unsigned long)ri->ret_addr; 522 recycle_rp_inst(ri, &empty_rp); 523 524 if (orig_ret_address != trampoline_address) 525 /* 526 * This is the real return address. Any other 527 * instances associated with this task are for 528 * other calls deeper on the call stack 529 */ 530 break; 531 } 532 533 kretprobe_assert(ri, orig_ret_address, trampoline_address); 534 instruction_pointer(regs) = orig_ret_address; 535 536 kretprobe_hash_unlock(current, &flags); 537 538 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 539 hlist_del(&ri->hlist); 540 kfree(ri); 541 } 542 /* 543 * By returning a non-zero value, we are telling 544 * kprobe_handler() that we don't want the post_handler 545 * to run (and have re-enabled preemption) 546 */ 547 return 1; 548 } 549 550 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 551 { 552 if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline) 553 return 1; 554 555 return 0; 556 } 557 558 static struct kprobe trampoline_p = { 559 .addr = (kprobe_opcode_t *)kretprobe_trampoline, 560 .pre_handler = trampoline_probe_handler 561 }; 562 563 int __init arch_init_kprobes(void) 564 { 565 return register_kprobe(&trampoline_p); 566 } 567