1 /* 2 * Kernel Probes (KProbes) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright (C) IBM Corporation, 2002, 2004 19 * 20 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 21 * Probes initial implementation ( includes contributions from 22 * Rusty Russell). 23 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 24 * interface to access function arguments. 25 * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port 26 * for PPC64 27 */ 28 29 #include <linux/config.h> 30 #include <linux/kprobes.h> 31 #include <linux/ptrace.h> 32 #include <linux/preempt.h> 33 #include <linux/module.h> 34 #include <asm/cacheflush.h> 35 #include <asm/kdebug.h> 36 #include <asm/sstep.h> 37 #include <asm/uaccess.h> 38 39 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 40 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 41 42 int __kprobes arch_prepare_kprobe(struct kprobe *p) 43 { 44 int ret = 0; 45 kprobe_opcode_t insn = *p->addr; 46 47 if ((unsigned long)p->addr & 0x03) { 48 printk("Attempt to register kprobe at an unaligned address\n"); 49 ret = -EINVAL; 50 } else if (IS_MTMSRD(insn) || IS_RFID(insn)) { 51 printk("Cannot register a kprobe on rfid or mtmsrd\n"); 52 ret = -EINVAL; 53 } 54 55 /* insn must be on a special executable page on ppc64 */ 56 if (!ret) { 57 p->ainsn.insn = get_insn_slot(); 58 if (!p->ainsn.insn) 59 ret = -ENOMEM; 60 } 61 62 if (!ret) { 63 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 64 p->opcode = *p->addr; 65 } 66 67 return ret; 68 } 69 70 void __kprobes arch_arm_kprobe(struct kprobe *p) 71 { 72 *p->addr = BREAKPOINT_INSTRUCTION; 73 flush_icache_range((unsigned long) p->addr, 74 (unsigned long) p->addr + sizeof(kprobe_opcode_t)); 75 } 76 77 void __kprobes arch_disarm_kprobe(struct kprobe *p) 78 { 79 *p->addr = p->opcode; 80 flush_icache_range((unsigned long) p->addr, 81 (unsigned long) p->addr + sizeof(kprobe_opcode_t)); 82 } 83 84 void __kprobes arch_remove_kprobe(struct kprobe *p) 85 { 86 mutex_lock(&kprobe_mutex); 87 free_insn_slot(p->ainsn.insn); 88 mutex_unlock(&kprobe_mutex); 89 } 90 91 static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 92 { 93 regs->msr |= MSR_SE; 94 95 /* 96 * On powerpc we should single step on the original 97 * instruction even if the probed insn is a trap 98 * variant as values in regs could play a part in 99 * if the trap is taken or not 100 */ 101 regs->nip = (unsigned long)p->ainsn.insn; 102 } 103 104 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 105 { 106 kcb->prev_kprobe.kp = kprobe_running(); 107 kcb->prev_kprobe.status = kcb->kprobe_status; 108 kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr; 109 } 110 111 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 112 { 113 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 114 kcb->kprobe_status = kcb->prev_kprobe.status; 115 kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; 116 } 117 118 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 119 struct kprobe_ctlblk *kcb) 120 { 121 __get_cpu_var(current_kprobe) = p; 122 kcb->kprobe_saved_msr = regs->msr; 123 } 124 125 /* Called with kretprobe_lock held */ 126 void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, 127 struct pt_regs *regs) 128 { 129 struct kretprobe_instance *ri; 130 131 if ((ri = get_free_rp_inst(rp)) != NULL) { 132 ri->rp = rp; 133 ri->task = current; 134 ri->ret_addr = (kprobe_opcode_t *)regs->link; 135 136 /* Replace the return addr with trampoline addr */ 137 regs->link = (unsigned long)kretprobe_trampoline; 138 add_rp_inst(ri); 139 } else { 140 rp->nmissed++; 141 } 142 } 143 144 static int __kprobes kprobe_handler(struct pt_regs *regs) 145 { 146 struct kprobe *p; 147 int ret = 0; 148 unsigned int *addr = (unsigned int *)regs->nip; 149 struct kprobe_ctlblk *kcb; 150 151 /* 152 * We don't want to be preempted for the entire 153 * duration of kprobe processing 154 */ 155 preempt_disable(); 156 kcb = get_kprobe_ctlblk(); 157 158 /* Check we're not actually recursing */ 159 if (kprobe_running()) { 160 p = get_kprobe(addr); 161 if (p) { 162 kprobe_opcode_t insn = *p->ainsn.insn; 163 if (kcb->kprobe_status == KPROBE_HIT_SS && 164 is_trap(insn)) { 165 regs->msr &= ~MSR_SE; 166 regs->msr |= kcb->kprobe_saved_msr; 167 goto no_kprobe; 168 } 169 /* We have reentered the kprobe_handler(), since 170 * another probe was hit while within the handler. 171 * We here save the original kprobes variables and 172 * just single step on the instruction of the new probe 173 * without calling any user handlers. 174 */ 175 save_previous_kprobe(kcb); 176 set_current_kprobe(p, regs, kcb); 177 kcb->kprobe_saved_msr = regs->msr; 178 kprobes_inc_nmissed_count(p); 179 prepare_singlestep(p, regs); 180 kcb->kprobe_status = KPROBE_REENTER; 181 return 1; 182 } else { 183 if (*addr != BREAKPOINT_INSTRUCTION) { 184 /* If trap variant, then it belongs not to us */ 185 kprobe_opcode_t cur_insn = *addr; 186 if (is_trap(cur_insn)) 187 goto no_kprobe; 188 /* The breakpoint instruction was removed by 189 * another cpu right after we hit, no further 190 * handling of this interrupt is appropriate 191 */ 192 ret = 1; 193 goto no_kprobe; 194 } 195 p = __get_cpu_var(current_kprobe); 196 if (p->break_handler && p->break_handler(p, regs)) { 197 goto ss_probe; 198 } 199 } 200 goto no_kprobe; 201 } 202 203 p = get_kprobe(addr); 204 if (!p) { 205 if (*addr != BREAKPOINT_INSTRUCTION) { 206 /* 207 * PowerPC has multiple variants of the "trap" 208 * instruction. If the current instruction is a 209 * trap variant, it could belong to someone else 210 */ 211 kprobe_opcode_t cur_insn = *addr; 212 if (is_trap(cur_insn)) 213 goto no_kprobe; 214 /* 215 * The breakpoint instruction was removed right 216 * after we hit it. Another cpu has removed 217 * either a probepoint or a debugger breakpoint 218 * at this address. In either case, no further 219 * handling of this interrupt is appropriate. 220 */ 221 ret = 1; 222 } 223 /* Not one of ours: let kernel handle it */ 224 goto no_kprobe; 225 } 226 227 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 228 set_current_kprobe(p, regs, kcb); 229 if (p->pre_handler && p->pre_handler(p, regs)) 230 /* handler has already set things up, so skip ss setup */ 231 return 1; 232 233 ss_probe: 234 prepare_singlestep(p, regs); 235 kcb->kprobe_status = KPROBE_HIT_SS; 236 return 1; 237 238 no_kprobe: 239 preempt_enable_no_resched(); 240 return ret; 241 } 242 243 /* 244 * Function return probe trampoline: 245 * - init_kprobes() establishes a probepoint here 246 * - When the probed function returns, this probe 247 * causes the handlers to fire 248 */ 249 void kretprobe_trampoline_holder(void) 250 { 251 asm volatile(".global kretprobe_trampoline\n" 252 "kretprobe_trampoline:\n" 253 "nop\n"); 254 } 255 256 /* 257 * Called when the probe at kretprobe trampoline is hit 258 */ 259 int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 260 { 261 struct kretprobe_instance *ri = NULL; 262 struct hlist_head *head; 263 struct hlist_node *node, *tmp; 264 unsigned long flags, orig_ret_address = 0; 265 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 266 267 spin_lock_irqsave(&kretprobe_lock, flags); 268 head = kretprobe_inst_table_head(current); 269 270 /* 271 * It is possible to have multiple instances associated with a given 272 * task either because an multiple functions in the call path 273 * have a return probe installed on them, and/or more then one return 274 * return probe was registered for a target function. 275 * 276 * We can handle this because: 277 * - instances are always inserted at the head of the list 278 * - when multiple return probes are registered for the same 279 * function, the first instance's ret_addr will point to the 280 * real return address, and all the rest will point to 281 * kretprobe_trampoline 282 */ 283 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 284 if (ri->task != current) 285 /* another task is sharing our hash bucket */ 286 continue; 287 288 if (ri->rp && ri->rp->handler) 289 ri->rp->handler(ri, regs); 290 291 orig_ret_address = (unsigned long)ri->ret_addr; 292 recycle_rp_inst(ri); 293 294 if (orig_ret_address != trampoline_address) 295 /* 296 * This is the real return address. Any other 297 * instances associated with this task are for 298 * other calls deeper on the call stack 299 */ 300 break; 301 } 302 303 BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); 304 regs->nip = orig_ret_address; 305 306 reset_current_kprobe(); 307 spin_unlock_irqrestore(&kretprobe_lock, flags); 308 preempt_enable_no_resched(); 309 310 /* 311 * By returning a non-zero value, we are telling 312 * kprobe_handler() that we don't want the post_handler 313 * to run (and have re-enabled preemption) 314 */ 315 return 1; 316 } 317 318 /* 319 * Called after single-stepping. p->addr is the address of the 320 * instruction whose first byte has been replaced by the "breakpoint" 321 * instruction. To avoid the SMP problems that can occur when we 322 * temporarily put back the original opcode to single-step, we 323 * single-stepped a copy of the instruction. The address of this 324 * copy is p->ainsn.insn. 325 */ 326 static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) 327 { 328 int ret; 329 unsigned int insn = *p->ainsn.insn; 330 331 regs->nip = (unsigned long)p->addr; 332 ret = emulate_step(regs, insn); 333 if (ret == 0) 334 regs->nip = (unsigned long)p->addr + 4; 335 } 336 337 static int __kprobes post_kprobe_handler(struct pt_regs *regs) 338 { 339 struct kprobe *cur = kprobe_running(); 340 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 341 342 if (!cur) 343 return 0; 344 345 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 346 kcb->kprobe_status = KPROBE_HIT_SSDONE; 347 cur->post_handler(cur, regs, 0); 348 } 349 350 resume_execution(cur, regs); 351 regs->msr |= kcb->kprobe_saved_msr; 352 353 /*Restore back the original saved kprobes variables and continue. */ 354 if (kcb->kprobe_status == KPROBE_REENTER) { 355 restore_previous_kprobe(kcb); 356 goto out; 357 } 358 reset_current_kprobe(); 359 out: 360 preempt_enable_no_resched(); 361 362 /* 363 * if somebody else is singlestepping across a probe point, msr 364 * will have SE set, in which case, continue the remaining processing 365 * of do_debug, as if this is not a probe hit. 366 */ 367 if (regs->msr & MSR_SE) 368 return 0; 369 370 return 1; 371 } 372 373 static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 374 { 375 struct kprobe *cur = kprobe_running(); 376 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 377 const struct exception_table_entry *entry; 378 379 switch(kcb->kprobe_status) { 380 case KPROBE_HIT_SS: 381 case KPROBE_REENTER: 382 /* 383 * We are here because the instruction being single 384 * stepped caused a page fault. We reset the current 385 * kprobe and the nip points back to the probe address 386 * and allow the page fault handler to continue as a 387 * normal page fault. 388 */ 389 regs->nip = (unsigned long)cur->addr; 390 regs->msr &= ~MSR_SE; 391 regs->msr |= kcb->kprobe_saved_msr; 392 if (kcb->kprobe_status == KPROBE_REENTER) 393 restore_previous_kprobe(kcb); 394 else 395 reset_current_kprobe(); 396 preempt_enable_no_resched(); 397 break; 398 case KPROBE_HIT_ACTIVE: 399 case KPROBE_HIT_SSDONE: 400 /* 401 * We increment the nmissed count for accounting, 402 * we can also use npre/npostfault count for accouting 403 * these specific fault cases. 404 */ 405 kprobes_inc_nmissed_count(cur); 406 407 /* 408 * We come here because instructions in the pre/post 409 * handler caused the page_fault, this could happen 410 * if handler tries to access user space by 411 * copy_from_user(), get_user() etc. Let the 412 * user-specified handler try to fix it first. 413 */ 414 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 415 return 1; 416 417 /* 418 * In case the user-specified fault handler returned 419 * zero, try to fix up. 420 */ 421 if ((entry = search_exception_tables(regs->nip)) != NULL) { 422 regs->nip = entry->fixup; 423 return 1; 424 } 425 426 /* 427 * fixup_exception() could not handle it, 428 * Let do_page_fault() fix it. 429 */ 430 break; 431 default: 432 break; 433 } 434 return 0; 435 } 436 437 /* 438 * Wrapper routine to for handling exceptions. 439 */ 440 int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 441 unsigned long val, void *data) 442 { 443 struct die_args *args = (struct die_args *)data; 444 int ret = NOTIFY_DONE; 445 446 if (args->regs && user_mode(args->regs)) 447 return ret; 448 449 switch (val) { 450 case DIE_BPT: 451 if (kprobe_handler(args->regs)) 452 ret = NOTIFY_STOP; 453 break; 454 case DIE_SSTEP: 455 if (post_kprobe_handler(args->regs)) 456 ret = NOTIFY_STOP; 457 break; 458 case DIE_PAGE_FAULT: 459 /* kprobe_running() needs smp_processor_id() */ 460 preempt_disable(); 461 if (kprobe_running() && 462 kprobe_fault_handler(args->regs, args->trapnr)) 463 ret = NOTIFY_STOP; 464 preempt_enable(); 465 break; 466 default: 467 break; 468 } 469 return ret; 470 } 471 472 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 473 { 474 struct jprobe *jp = container_of(p, struct jprobe, kp); 475 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 476 477 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); 478 479 /* setup return addr to the jprobe handler routine */ 480 regs->nip = (unsigned long)(((func_descr_t *)jp->entry)->entry); 481 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); 482 483 return 1; 484 } 485 486 void __kprobes jprobe_return(void) 487 { 488 asm volatile("trap" ::: "memory"); 489 } 490 491 void __kprobes jprobe_return_end(void) 492 { 493 }; 494 495 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 496 { 497 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 498 499 /* 500 * FIXME - we should ideally be validating that we got here 'cos 501 * of the "trap" in jprobe_return() above, before restoring the 502 * saved regs... 503 */ 504 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); 505 preempt_enable_no_resched(); 506 return 1; 507 } 508 509 static struct kprobe trampoline_p = { 510 .addr = (kprobe_opcode_t *) &kretprobe_trampoline, 511 .pre_handler = trampoline_probe_handler 512 }; 513 514 int __init arch_init_kprobes(void) 515 { 516 return register_kprobe(&trampoline_p); 517 } 518