1 /* 2 * Kernel Probes (KProbes) 3 * arch/ppc64/kernel/kprobes.c 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * 19 * Copyright (C) IBM Corporation, 2002, 2004 20 * 21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 22 * Probes initial implementation ( includes contributions from 23 * Rusty Russell). 24 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 25 * interface to access function arguments. 26 * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port 27 * for PPC64 28 */ 29 30 #include <linux/config.h> 31 #include <linux/kprobes.h> 32 #include <linux/ptrace.h> 33 #include <linux/preempt.h> 34 #include <asm/cacheflush.h> 35 #include <asm/kdebug.h> 36 #include <asm/sstep.h> 37 38 static DECLARE_MUTEX(kprobe_mutex); 39 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 40 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 41 42 int __kprobes arch_prepare_kprobe(struct kprobe *p) 43 { 44 int ret = 0; 45 kprobe_opcode_t insn = *p->addr; 46 47 if ((unsigned long)p->addr & 0x03) { 48 printk("Attempt to register kprobe at an unaligned address\n"); 49 ret = -EINVAL; 50 } else if (IS_MTMSRD(insn) || IS_RFID(insn)) { 51 printk("Cannot register a kprobe on rfid or mtmsrd\n"); 52 ret = -EINVAL; 53 } 54 55 /* insn must be on a special executable page on ppc64 */ 56 if (!ret) { 57 down(&kprobe_mutex); 58 p->ainsn.insn = get_insn_slot(); 59 up(&kprobe_mutex); 60 if (!p->ainsn.insn) 61 ret = -ENOMEM; 62 } 63 return ret; 64 } 65 66 void __kprobes arch_copy_kprobe(struct kprobe *p) 67 { 68 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 69 p->opcode = *p->addr; 70 } 71 72 void __kprobes arch_arm_kprobe(struct kprobe *p) 73 { 74 *p->addr = BREAKPOINT_INSTRUCTION; 75 flush_icache_range((unsigned long) p->addr, 76 (unsigned long) p->addr + sizeof(kprobe_opcode_t)); 77 } 78 79 void __kprobes arch_disarm_kprobe(struct kprobe *p) 80 { 81 *p->addr = p->opcode; 82 flush_icache_range((unsigned long) p->addr, 83 (unsigned long) p->addr + sizeof(kprobe_opcode_t)); 84 } 85 86 void __kprobes arch_remove_kprobe(struct kprobe *p) 87 { 88 down(&kprobe_mutex); 89 free_insn_slot(p->ainsn.insn); 90 up(&kprobe_mutex); 91 } 92 93 static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 94 { 95 kprobe_opcode_t insn = *p->ainsn.insn; 96 97 regs->msr |= MSR_SE; 98 99 /* single step inline if it is a trap variant */ 100 if (is_trap(insn)) 101 regs->nip = (unsigned long)p->addr; 102 else 103 regs->nip = (unsigned long)p->ainsn.insn; 104 } 105 106 static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) 107 { 108 kcb->prev_kprobe.kp = kprobe_running(); 109 kcb->prev_kprobe.status = kcb->kprobe_status; 110 kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr; 111 } 112 113 static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) 114 { 115 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 116 kcb->kprobe_status = kcb->prev_kprobe.status; 117 kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; 118 } 119 120 static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 121 struct kprobe_ctlblk *kcb) 122 { 123 __get_cpu_var(current_kprobe) = p; 124 kcb->kprobe_saved_msr = regs->msr; 125 } 126 127 /* Called with kretprobe_lock held */ 128 void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, 129 struct pt_regs *regs) 130 { 131 struct kretprobe_instance *ri; 132 133 if ((ri = get_free_rp_inst(rp)) != NULL) { 134 ri->rp = rp; 135 ri->task = current; 136 ri->ret_addr = (kprobe_opcode_t *)regs->link; 137 138 /* Replace the return addr with trampoline addr */ 139 regs->link = (unsigned long)kretprobe_trampoline; 140 add_rp_inst(ri); 141 } else { 142 rp->nmissed++; 143 } 144 } 145 146 static inline int kprobe_handler(struct pt_regs *regs) 147 { 148 struct kprobe *p; 149 int ret = 0; 150 unsigned int *addr = (unsigned int *)regs->nip; 151 struct kprobe_ctlblk *kcb; 152 153 /* 154 * We don't want to be preempted for the entire 155 * duration of kprobe processing 156 */ 157 preempt_disable(); 158 kcb = get_kprobe_ctlblk(); 159 160 /* Check we're not actually recursing */ 161 if (kprobe_running()) { 162 p = get_kprobe(addr); 163 if (p) { 164 kprobe_opcode_t insn = *p->ainsn.insn; 165 if (kcb->kprobe_status == KPROBE_HIT_SS && 166 is_trap(insn)) { 167 regs->msr &= ~MSR_SE; 168 regs->msr |= kcb->kprobe_saved_msr; 169 goto no_kprobe; 170 } 171 /* We have reentered the kprobe_handler(), since 172 * another probe was hit while within the handler. 173 * We here save the original kprobes variables and 174 * just single step on the instruction of the new probe 175 * without calling any user handlers. 176 */ 177 save_previous_kprobe(kcb); 178 set_current_kprobe(p, regs, kcb); 179 kcb->kprobe_saved_msr = regs->msr; 180 kprobes_inc_nmissed_count(p); 181 prepare_singlestep(p, regs); 182 kcb->kprobe_status = KPROBE_REENTER; 183 return 1; 184 } else { 185 p = __get_cpu_var(current_kprobe); 186 if (p->break_handler && p->break_handler(p, regs)) { 187 goto ss_probe; 188 } 189 } 190 goto no_kprobe; 191 } 192 193 p = get_kprobe(addr); 194 if (!p) { 195 if (*addr != BREAKPOINT_INSTRUCTION) { 196 /* 197 * PowerPC has multiple variants of the "trap" 198 * instruction. If the current instruction is a 199 * trap variant, it could belong to someone else 200 */ 201 kprobe_opcode_t cur_insn = *addr; 202 if (is_trap(cur_insn)) 203 goto no_kprobe; 204 /* 205 * The breakpoint instruction was removed right 206 * after we hit it. Another cpu has removed 207 * either a probepoint or a debugger breakpoint 208 * at this address. In either case, no further 209 * handling of this interrupt is appropriate. 210 */ 211 ret = 1; 212 } 213 /* Not one of ours: let kernel handle it */ 214 goto no_kprobe; 215 } 216 217 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 218 set_current_kprobe(p, regs, kcb); 219 if (p->pre_handler && p->pre_handler(p, regs)) 220 /* handler has already set things up, so skip ss setup */ 221 return 1; 222 223 ss_probe: 224 prepare_singlestep(p, regs); 225 kcb->kprobe_status = KPROBE_HIT_SS; 226 return 1; 227 228 no_kprobe: 229 preempt_enable_no_resched(); 230 return ret; 231 } 232 233 /* 234 * Function return probe trampoline: 235 * - init_kprobes() establishes a probepoint here 236 * - When the probed function returns, this probe 237 * causes the handlers to fire 238 */ 239 void kretprobe_trampoline_holder(void) 240 { 241 asm volatile(".global kretprobe_trampoline\n" 242 "kretprobe_trampoline:\n" 243 "nop\n"); 244 } 245 246 /* 247 * Called when the probe at kretprobe trampoline is hit 248 */ 249 int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 250 { 251 struct kretprobe_instance *ri = NULL; 252 struct hlist_head *head; 253 struct hlist_node *node, *tmp; 254 unsigned long flags, orig_ret_address = 0; 255 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 256 257 spin_lock_irqsave(&kretprobe_lock, flags); 258 head = kretprobe_inst_table_head(current); 259 260 /* 261 * It is possible to have multiple instances associated with a given 262 * task either because an multiple functions in the call path 263 * have a return probe installed on them, and/or more then one return 264 * return probe was registered for a target function. 265 * 266 * We can handle this because: 267 * - instances are always inserted at the head of the list 268 * - when multiple return probes are registered for the same 269 * function, the first instance's ret_addr will point to the 270 * real return address, and all the rest will point to 271 * kretprobe_trampoline 272 */ 273 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 274 if (ri->task != current) 275 /* another task is sharing our hash bucket */ 276 continue; 277 278 if (ri->rp && ri->rp->handler) 279 ri->rp->handler(ri, regs); 280 281 orig_ret_address = (unsigned long)ri->ret_addr; 282 recycle_rp_inst(ri); 283 284 if (orig_ret_address != trampoline_address) 285 /* 286 * This is the real return address. Any other 287 * instances associated with this task are for 288 * other calls deeper on the call stack 289 */ 290 break; 291 } 292 293 BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); 294 regs->nip = orig_ret_address; 295 296 reset_current_kprobe(); 297 spin_unlock_irqrestore(&kretprobe_lock, flags); 298 preempt_enable_no_resched(); 299 300 /* 301 * By returning a non-zero value, we are telling 302 * kprobe_handler() that we don't want the post_handler 303 * to run (and have re-enabled preemption) 304 */ 305 return 1; 306 } 307 308 /* 309 * Called after single-stepping. p->addr is the address of the 310 * instruction whose first byte has been replaced by the "breakpoint" 311 * instruction. To avoid the SMP problems that can occur when we 312 * temporarily put back the original opcode to single-step, we 313 * single-stepped a copy of the instruction. The address of this 314 * copy is p->ainsn.insn. 315 */ 316 static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) 317 { 318 int ret; 319 unsigned int insn = *p->ainsn.insn; 320 321 regs->nip = (unsigned long)p->addr; 322 ret = emulate_step(regs, insn); 323 if (ret == 0) 324 regs->nip = (unsigned long)p->addr + 4; 325 } 326 327 static inline int post_kprobe_handler(struct pt_regs *regs) 328 { 329 struct kprobe *cur = kprobe_running(); 330 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 331 332 if (!cur) 333 return 0; 334 335 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 336 kcb->kprobe_status = KPROBE_HIT_SSDONE; 337 cur->post_handler(cur, regs, 0); 338 } 339 340 resume_execution(cur, regs); 341 regs->msr |= kcb->kprobe_saved_msr; 342 343 /*Restore back the original saved kprobes variables and continue. */ 344 if (kcb->kprobe_status == KPROBE_REENTER) { 345 restore_previous_kprobe(kcb); 346 goto out; 347 } 348 reset_current_kprobe(); 349 out: 350 preempt_enable_no_resched(); 351 352 /* 353 * if somebody else is singlestepping across a probe point, msr 354 * will have SE set, in which case, continue the remaining processing 355 * of do_debug, as if this is not a probe hit. 356 */ 357 if (regs->msr & MSR_SE) 358 return 0; 359 360 return 1; 361 } 362 363 static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 364 { 365 struct kprobe *cur = kprobe_running(); 366 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 367 368 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 369 return 1; 370 371 if (kcb->kprobe_status & KPROBE_HIT_SS) { 372 resume_execution(cur, regs); 373 regs->msr &= ~MSR_SE; 374 regs->msr |= kcb->kprobe_saved_msr; 375 376 reset_current_kprobe(); 377 preempt_enable_no_resched(); 378 } 379 return 0; 380 } 381 382 /* 383 * Wrapper routine to for handling exceptions. 384 */ 385 int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 386 unsigned long val, void *data) 387 { 388 struct die_args *args = (struct die_args *)data; 389 int ret = NOTIFY_DONE; 390 391 switch (val) { 392 case DIE_BPT: 393 if (kprobe_handler(args->regs)) 394 ret = NOTIFY_STOP; 395 break; 396 case DIE_SSTEP: 397 if (post_kprobe_handler(args->regs)) 398 ret = NOTIFY_STOP; 399 break; 400 case DIE_PAGE_FAULT: 401 /* kprobe_running() needs smp_processor_id() */ 402 preempt_disable(); 403 if (kprobe_running() && 404 kprobe_fault_handler(args->regs, args->trapnr)) 405 ret = NOTIFY_STOP; 406 preempt_enable(); 407 break; 408 default: 409 break; 410 } 411 return ret; 412 } 413 414 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 415 { 416 struct jprobe *jp = container_of(p, struct jprobe, kp); 417 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 418 419 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); 420 421 /* setup return addr to the jprobe handler routine */ 422 regs->nip = (unsigned long)(((func_descr_t *)jp->entry)->entry); 423 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); 424 425 return 1; 426 } 427 428 void __kprobes jprobe_return(void) 429 { 430 asm volatile("trap" ::: "memory"); 431 } 432 433 void __kprobes jprobe_return_end(void) 434 { 435 }; 436 437 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 438 { 439 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 440 441 /* 442 * FIXME - we should ideally be validating that we got here 'cos 443 * of the "trap" in jprobe_return() above, before restoring the 444 * saved regs... 445 */ 446 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); 447 preempt_enable_no_resched(); 448 return 1; 449 } 450 451 static struct kprobe trampoline_p = { 452 .addr = (kprobe_opcode_t *) &kretprobe_trampoline, 453 .pre_handler = trampoline_probe_handler 454 }; 455 456 int __init arch_init_kprobes(void) 457 { 458 return register_kprobe(&trampoline_p); 459 } 460