1 /* 2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 #include <linux/types.h> 10 #include <linux/kprobes.h> 11 #include <linux/slab.h> 12 #include <linux/module.h> 13 #include <linux/kdebug.h> 14 #include <linux/sched.h> 15 #include <linux/uaccess.h> 16 #include <asm/cacheflush.h> 17 #include <asm/current.h> 18 #include <asm/disasm.h> 19 20 #define MIN_STACK_SIZE(addr) min((unsigned long)MAX_STACK_SIZE, \ 21 (unsigned long)current_thread_info() + THREAD_SIZE - (addr)) 22 23 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 24 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 25 26 int __kprobes arch_prepare_kprobe(struct kprobe *p) 27 { 28 /* Attempt to probe at unaligned address */ 29 if ((unsigned long)p->addr & 0x01) 30 return -EINVAL; 31 32 /* Address should not be in exception handling code */ 33 34 p->ainsn.is_short = is_short_instr((unsigned long)p->addr); 35 p->opcode = *p->addr; 36 37 return 0; 38 } 39 40 void __kprobes arch_arm_kprobe(struct kprobe *p) 41 { 42 *p->addr = UNIMP_S_INSTRUCTION; 43 44 flush_icache_range((unsigned long)p->addr, 45 (unsigned long)p->addr + sizeof(kprobe_opcode_t)); 46 } 47 48 void __kprobes arch_disarm_kprobe(struct kprobe *p) 49 { 50 *p->addr = p->opcode; 51 52 flush_icache_range((unsigned long)p->addr, 53 (unsigned long)p->addr + sizeof(kprobe_opcode_t)); 54 } 55 56 void __kprobes arch_remove_kprobe(struct kprobe *p) 57 { 58 arch_disarm_kprobe(p); 59 60 /* Can we remove the kprobe in the middle of kprobe handling? */ 61 if (p->ainsn.t1_addr) { 62 *(p->ainsn.t1_addr) = p->ainsn.t1_opcode; 63 64 flush_icache_range((unsigned long)p->ainsn.t1_addr, 65 (unsigned long)p->ainsn.t1_addr + 66 sizeof(kprobe_opcode_t)); 67 68 p->ainsn.t1_addr = NULL; 69 } 70 71 if (p->ainsn.t2_addr) { 72 *(p->ainsn.t2_addr) = p->ainsn.t2_opcode; 73 74 flush_icache_range((unsigned long)p->ainsn.t2_addr, 75 (unsigned long)p->ainsn.t2_addr + 76 sizeof(kprobe_opcode_t)); 77 78 p->ainsn.t2_addr = NULL; 79 } 80 } 81 82 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 83 { 84 kcb->prev_kprobe.kp = kprobe_running(); 85 kcb->prev_kprobe.status = kcb->kprobe_status; 86 } 87 88 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 89 { 90 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 91 kcb->kprobe_status = kcb->prev_kprobe.status; 92 } 93 94 static inline void __kprobes set_current_kprobe(struct kprobe *p) 95 { 96 __this_cpu_write(current_kprobe, p); 97 } 98 99 static void __kprobes resume_execution(struct kprobe *p, unsigned long addr, 100 struct pt_regs *regs) 101 { 102 /* Remove the trap instructions inserted for single step and 103 * restore the original instructions 104 */ 105 if (p->ainsn.t1_addr) { 106 *(p->ainsn.t1_addr) = p->ainsn.t1_opcode; 107 108 flush_icache_range((unsigned long)p->ainsn.t1_addr, 109 (unsigned long)p->ainsn.t1_addr + 110 sizeof(kprobe_opcode_t)); 111 112 p->ainsn.t1_addr = NULL; 113 } 114 115 if (p->ainsn.t2_addr) { 116 *(p->ainsn.t2_addr) = p->ainsn.t2_opcode; 117 118 flush_icache_range((unsigned long)p->ainsn.t2_addr, 119 (unsigned long)p->ainsn.t2_addr + 120 sizeof(kprobe_opcode_t)); 121 122 p->ainsn.t2_addr = NULL; 123 } 124 125 return; 126 } 127 128 static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs) 129 { 130 unsigned long next_pc; 131 unsigned long tgt_if_br = 0; 132 int is_branch; 133 unsigned long bta; 134 135 /* Copy the opcode back to the kprobe location and execute the 136 * instruction. Because of this we will not be able to get into the 137 * same kprobe until this kprobe is done 138 */ 139 *(p->addr) = p->opcode; 140 141 flush_icache_range((unsigned long)p->addr, 142 (unsigned long)p->addr + sizeof(kprobe_opcode_t)); 143 144 /* Now we insert the trap at the next location after this instruction to 145 * single step. If it is a branch we insert the trap at possible branch 146 * targets 147 */ 148 149 bta = regs->bta; 150 151 if (regs->status32 & 0x40) { 152 /* We are in a delay slot with the branch taken */ 153 154 next_pc = bta & ~0x01; 155 156 if (!p->ainsn.is_short) { 157 if (bta & 0x01) 158 regs->blink += 2; 159 else { 160 /* Branch not taken */ 161 next_pc += 2; 162 163 /* next pc is taken from bta after executing the 164 * delay slot instruction 165 */ 166 regs->bta += 2; 167 } 168 } 169 170 is_branch = 0; 171 } else 172 is_branch = 173 disasm_next_pc((unsigned long)p->addr, regs, 174 (struct callee_regs *) current->thread.callee_reg, 175 &next_pc, &tgt_if_br); 176 177 p->ainsn.t1_addr = (kprobe_opcode_t *) next_pc; 178 p->ainsn.t1_opcode = *(p->ainsn.t1_addr); 179 *(p->ainsn.t1_addr) = TRAP_S_2_INSTRUCTION; 180 181 flush_icache_range((unsigned long)p->ainsn.t1_addr, 182 (unsigned long)p->ainsn.t1_addr + 183 sizeof(kprobe_opcode_t)); 184 185 if (is_branch) { 186 p->ainsn.t2_addr = (kprobe_opcode_t *) tgt_if_br; 187 p->ainsn.t2_opcode = *(p->ainsn.t2_addr); 188 *(p->ainsn.t2_addr) = TRAP_S_2_INSTRUCTION; 189 190 flush_icache_range((unsigned long)p->ainsn.t2_addr, 191 (unsigned long)p->ainsn.t2_addr + 192 sizeof(kprobe_opcode_t)); 193 } 194 } 195 196 int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs) 197 { 198 struct kprobe *p; 199 struct kprobe_ctlblk *kcb; 200 201 preempt_disable(); 202 203 kcb = get_kprobe_ctlblk(); 204 p = get_kprobe((unsigned long *)addr); 205 206 if (p) { 207 /* 208 * We have reentered the kprobe_handler, since another kprobe 209 * was hit while within the handler, we save the original 210 * kprobes and single step on the instruction of the new probe 211 * without calling any user handlers to avoid recursive 212 * kprobes. 213 */ 214 if (kprobe_running()) { 215 save_previous_kprobe(kcb); 216 set_current_kprobe(p); 217 kprobes_inc_nmissed_count(p); 218 setup_singlestep(p, regs); 219 kcb->kprobe_status = KPROBE_REENTER; 220 return 1; 221 } 222 223 set_current_kprobe(p); 224 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 225 226 /* If we have no pre-handler or it returned 0, we continue with 227 * normal processing. If we have a pre-handler and it returned 228 * non-zero - which is expected from setjmp_pre_handler for 229 * jprobe, we return without single stepping and leave that to 230 * the break-handler which is invoked by a kprobe from 231 * jprobe_return 232 */ 233 if (!p->pre_handler || !p->pre_handler(p, regs)) { 234 setup_singlestep(p, regs); 235 kcb->kprobe_status = KPROBE_HIT_SS; 236 } 237 238 return 1; 239 } else if (kprobe_running()) { 240 p = __this_cpu_read(current_kprobe); 241 if (p->break_handler && p->break_handler(p, regs)) { 242 setup_singlestep(p, regs); 243 kcb->kprobe_status = KPROBE_HIT_SS; 244 return 1; 245 } 246 } 247 248 /* no_kprobe: */ 249 preempt_enable_no_resched(); 250 return 0; 251 } 252 253 static int __kprobes arc_post_kprobe_handler(unsigned long addr, 254 struct pt_regs *regs) 255 { 256 struct kprobe *cur = kprobe_running(); 257 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 258 259 if (!cur) 260 return 0; 261 262 resume_execution(cur, addr, regs); 263 264 /* Rearm the kprobe */ 265 arch_arm_kprobe(cur); 266 267 /* 268 * When we return from trap instruction we go to the next instruction 269 * We restored the actual instruction in resume_exectuiont and we to 270 * return to the same address and execute it 271 */ 272 regs->ret = addr; 273 274 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 275 kcb->kprobe_status = KPROBE_HIT_SSDONE; 276 cur->post_handler(cur, regs, 0); 277 } 278 279 if (kcb->kprobe_status == KPROBE_REENTER) { 280 restore_previous_kprobe(kcb); 281 goto out; 282 } 283 284 reset_current_kprobe(); 285 286 out: 287 preempt_enable_no_resched(); 288 return 1; 289 } 290 291 /* 292 * Fault can be for the instruction being single stepped or for the 293 * pre/post handlers in the module. 294 * This is applicable for applications like user probes, where we have the 295 * probe in user space and the handlers in the kernel 296 */ 297 298 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr) 299 { 300 struct kprobe *cur = kprobe_running(); 301 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 302 303 switch (kcb->kprobe_status) { 304 case KPROBE_HIT_SS: 305 case KPROBE_REENTER: 306 /* 307 * We are here because the instruction being single stepped 308 * caused the fault. We reset the current kprobe and allow the 309 * exception handler as if it is regular exception. In our 310 * case it doesn't matter because the system will be halted 311 */ 312 resume_execution(cur, (unsigned long)cur->addr, regs); 313 314 if (kcb->kprobe_status == KPROBE_REENTER) 315 restore_previous_kprobe(kcb); 316 else 317 reset_current_kprobe(); 318 319 preempt_enable_no_resched(); 320 break; 321 322 case KPROBE_HIT_ACTIVE: 323 case KPROBE_HIT_SSDONE: 324 /* 325 * We are here because the instructions in the pre/post handler 326 * caused the fault. 327 */ 328 329 /* We increment the nmissed count for accounting, 330 * we can also use npre/npostfault count for accounting 331 * these specific fault cases. 332 */ 333 kprobes_inc_nmissed_count(cur); 334 335 /* 336 * We come here because instructions in the pre/post 337 * handler caused the page_fault, this could happen 338 * if handler tries to access user space by 339 * copy_from_user(), get_user() etc. Let the 340 * user-specified handler try to fix it first. 341 */ 342 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 343 return 1; 344 345 /* 346 * In case the user-specified fault handler returned zero, 347 * try to fix up. 348 */ 349 if (fixup_exception(regs)) 350 return 1; 351 352 /* 353 * fixup_exception() could not handle it, 354 * Let do_page_fault() fix it. 355 */ 356 break; 357 358 default: 359 break; 360 } 361 return 0; 362 } 363 364 int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 365 unsigned long val, void *data) 366 { 367 struct die_args *args = data; 368 unsigned long addr = args->err; 369 int ret = NOTIFY_DONE; 370 371 switch (val) { 372 case DIE_IERR: 373 if (arc_kprobe_handler(addr, args->regs)) 374 return NOTIFY_STOP; 375 break; 376 377 case DIE_TRAP: 378 if (arc_post_kprobe_handler(addr, args->regs)) 379 return NOTIFY_STOP; 380 break; 381 382 default: 383 break; 384 } 385 386 return ret; 387 } 388 389 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 390 { 391 struct jprobe *jp = container_of(p, struct jprobe, kp); 392 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 393 unsigned long sp_addr = regs->sp; 394 395 kcb->jprobe_saved_regs = *regs; 396 memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr)); 397 regs->ret = (unsigned long)(jp->entry); 398 399 return 1; 400 } 401 402 void __kprobes jprobe_return(void) 403 { 404 __asm__ __volatile__("unimp_s"); 405 return; 406 } 407 408 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 409 { 410 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 411 unsigned long sp_addr; 412 413 *regs = kcb->jprobe_saved_regs; 414 sp_addr = regs->sp; 415 memcpy((void *)sp_addr, kcb->jprobes_stack, MIN_STACK_SIZE(sp_addr)); 416 preempt_enable_no_resched(); 417 418 return 1; 419 } 420 421 static void __used kretprobe_trampoline_holder(void) 422 { 423 __asm__ __volatile__(".global kretprobe_trampoline\n" 424 "kretprobe_trampoline:\n" "nop\n"); 425 } 426 427 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 428 struct pt_regs *regs) 429 { 430 431 ri->ret_addr = (kprobe_opcode_t *) regs->blink; 432 433 /* Replace the return addr with trampoline addr */ 434 regs->blink = (unsigned long)&kretprobe_trampoline; 435 } 436 437 static int __kprobes trampoline_probe_handler(struct kprobe *p, 438 struct pt_regs *regs) 439 { 440 struct kretprobe_instance *ri = NULL; 441 struct hlist_head *head, empty_rp; 442 struct hlist_node *tmp; 443 unsigned long flags, orig_ret_address = 0; 444 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 445 446 INIT_HLIST_HEAD(&empty_rp); 447 kretprobe_hash_lock(current, &head, &flags); 448 449 /* 450 * It is possible to have multiple instances associated with a given 451 * task either because an multiple functions in the call path 452 * have a return probe installed on them, and/or more than one return 453 * return probe was registered for a target function. 454 * 455 * We can handle this because: 456 * - instances are always inserted at the head of the list 457 * - when multiple return probes are registered for the same 458 * function, the first instance's ret_addr will point to the 459 * real return address, and all the rest will point to 460 * kretprobe_trampoline 461 */ 462 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 463 if (ri->task != current) 464 /* another task is sharing our hash bucket */ 465 continue; 466 467 if (ri->rp && ri->rp->handler) 468 ri->rp->handler(ri, regs); 469 470 orig_ret_address = (unsigned long)ri->ret_addr; 471 recycle_rp_inst(ri, &empty_rp); 472 473 if (orig_ret_address != trampoline_address) { 474 /* 475 * This is the real return address. Any other 476 * instances associated with this task are for 477 * other calls deeper on the call stack 478 */ 479 break; 480 } 481 } 482 483 kretprobe_assert(ri, orig_ret_address, trampoline_address); 484 regs->ret = orig_ret_address; 485 486 reset_current_kprobe(); 487 kretprobe_hash_unlock(current, &flags); 488 preempt_enable_no_resched(); 489 490 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 491 hlist_del(&ri->hlist); 492 kfree(ri); 493 } 494 495 /* By returning a non zero value, we are telling the kprobe handler 496 * that we don't want the post_handler to run 497 */ 498 return 1; 499 } 500 501 static struct kprobe trampoline_p = { 502 .addr = (kprobe_opcode_t *) &kretprobe_trampoline, 503 .pre_handler = trampoline_probe_handler 504 }; 505 506 int __init arch_init_kprobes(void) 507 { 508 /* Registering the trampoline code for the kret probe */ 509 return register_kprobe(&trampoline_p); 510 } 511 512 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 513 { 514 if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline) 515 return 1; 516 517 return 0; 518 } 519 520 void trap_is_kprobe(unsigned long address, struct pt_regs *regs) 521 { 522 notify_die(DIE_TRAP, "kprobe_trap", regs, address, 0, SIGTRAP); 523 } 524