xref: /linux/arch/sh/kernel/kprobes.c (revision 4705b2e8047221142af2ed5e37f54ac4c7f80a7d)
1 /*
2  * Kernel probes (kprobes) for SuperH
3  *
4  * Copyright (C) 2007 Chris Smith <chris.smith@st.com>
5  * Copyright (C) 2006 Lineo Solutions, Inc.
6  *
7  * This file is subject to the terms and conditions of the GNU General Public
8  * License.  See the file "COPYING" in the main directory of this archive
9  * for more details.
10  */
11 #include <linux/kprobes.h>
12 #include <linux/module.h>
13 #include <linux/ptrace.h>
14 #include <linux/preempt.h>
15 #include <linux/kdebug.h>
16 #include <linux/slab.h>
17 #include <asm/cacheflush.h>
18 #include <asm/uaccess.h>
19 
20 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
21 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
22 
23 static struct kprobe saved_current_opcode;
24 static struct kprobe saved_next_opcode;
25 static struct kprobe saved_next_opcode2;
26 
27 #define OPCODE_JMP(x)	(((x) & 0xF0FF) == 0x402b)
28 #define OPCODE_JSR(x)	(((x) & 0xF0FF) == 0x400b)
29 #define OPCODE_BRA(x)	(((x) & 0xF000) == 0xa000)
30 #define OPCODE_BRAF(x)	(((x) & 0xF0FF) == 0x0023)
31 #define OPCODE_BSR(x)	(((x) & 0xF000) == 0xb000)
32 #define OPCODE_BSRF(x)	(((x) & 0xF0FF) == 0x0003)
33 
34 #define OPCODE_BF_S(x)	(((x) & 0xFF00) == 0x8f00)
35 #define OPCODE_BT_S(x)	(((x) & 0xFF00) == 0x8d00)
36 
37 #define OPCODE_BF(x)	(((x) & 0xFF00) == 0x8b00)
38 #define OPCODE_BT(x)	(((x) & 0xFF00) == 0x8900)
39 
40 #define OPCODE_RTS(x)	(((x) & 0x000F) == 0x000b)
41 #define OPCODE_RTE(x)	(((x) & 0xFFFF) == 0x002b)
42 
43 int __kprobes arch_prepare_kprobe(struct kprobe *p)
44 {
45 	kprobe_opcode_t opcode = *(kprobe_opcode_t *) (p->addr);
46 
47 	if (OPCODE_RTE(opcode))
48 		return -EFAULT;	/* Bad breakpoint */
49 
50 	p->opcode = opcode;
51 
52 	return 0;
53 }
54 
55 void __kprobes arch_copy_kprobe(struct kprobe *p)
56 {
57 	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
58 	p->opcode = *p->addr;
59 }
60 
61 void __kprobes arch_arm_kprobe(struct kprobe *p)
62 {
63 	*p->addr = BREAKPOINT_INSTRUCTION;
64 	flush_icache_range((unsigned long)p->addr,
65 			   (unsigned long)p->addr + sizeof(kprobe_opcode_t));
66 }
67 
68 void __kprobes arch_disarm_kprobe(struct kprobe *p)
69 {
70 	*p->addr = p->opcode;
71 	flush_icache_range((unsigned long)p->addr,
72 			   (unsigned long)p->addr + sizeof(kprobe_opcode_t));
73 }
74 
75 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
76 {
77 	if (*p->addr == BREAKPOINT_INSTRUCTION)
78 		return 1;
79 
80 	return 0;
81 }
82 
83 /**
84  * If an illegal slot instruction exception occurs for an address
85  * containing a kprobe, remove the probe.
86  *
87  * Returns 0 if the exception was handled successfully, 1 otherwise.
88  */
89 int __kprobes kprobe_handle_illslot(unsigned long pc)
90 {
91 	struct kprobe *p = get_kprobe((kprobe_opcode_t *) pc + 1);
92 
93 	if (p != NULL) {
94 		printk("Warning: removing kprobe from delay slot: 0x%.8x\n",
95 		       (unsigned int)pc + 2);
96 		unregister_kprobe(p);
97 		return 0;
98 	}
99 
100 	return 1;
101 }
102 
103 void __kprobes arch_remove_kprobe(struct kprobe *p)
104 {
105 	if (saved_next_opcode.addr != 0x0) {
106 		arch_disarm_kprobe(p);
107 		arch_disarm_kprobe(&saved_next_opcode);
108 		saved_next_opcode.addr = 0x0;
109 		saved_next_opcode.opcode = 0x0;
110 
111 		if (saved_next_opcode2.addr != 0x0) {
112 			arch_disarm_kprobe(&saved_next_opcode2);
113 			saved_next_opcode2.addr = 0x0;
114 			saved_next_opcode2.opcode = 0x0;
115 		}
116 	}
117 }
118 
119 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
120 {
121 	kcb->prev_kprobe.kp = kprobe_running();
122 	kcb->prev_kprobe.status = kcb->kprobe_status;
123 }
124 
125 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
126 {
127 	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
128 	kcb->kprobe_status = kcb->prev_kprobe.status;
129 }
130 
131 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
132 					 struct kprobe_ctlblk *kcb)
133 {
134 	__get_cpu_var(current_kprobe) = p;
135 }
136 
137 /*
138  * Singlestep is implemented by disabling the current kprobe and setting one
139  * on the next instruction, following branches. Two probes are set if the
140  * branch is conditional.
141  */
142 static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
143 {
144 	kprobe_opcode_t *addr = NULL;
145 	saved_current_opcode.addr = (kprobe_opcode_t *) (regs->pc);
146 	addr = saved_current_opcode.addr;
147 
148 	if (p != NULL) {
149 		arch_disarm_kprobe(p);
150 
151 		if (OPCODE_JSR(p->opcode) || OPCODE_JMP(p->opcode)) {
152 			unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
153 			saved_next_opcode.addr =
154 			    (kprobe_opcode_t *) regs->regs[reg_nr];
155 		} else if (OPCODE_BRA(p->opcode) || OPCODE_BSR(p->opcode)) {
156 			unsigned long disp = (p->opcode & 0x0FFF);
157 			saved_next_opcode.addr =
158 			    (kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
159 
160 		} else if (OPCODE_BRAF(p->opcode) || OPCODE_BSRF(p->opcode)) {
161 			unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
162 			saved_next_opcode.addr =
163 			    (kprobe_opcode_t *) (regs->pc + 4 +
164 						 regs->regs[reg_nr]);
165 
166 		} else if (OPCODE_RTS(p->opcode)) {
167 			saved_next_opcode.addr = (kprobe_opcode_t *) regs->pr;
168 
169 		} else if (OPCODE_BF(p->opcode) || OPCODE_BT(p->opcode)) {
170 			unsigned long disp = (p->opcode & 0x00FF);
171 			/* case 1 */
172 			saved_next_opcode.addr = p->addr + 1;
173 			/* case 2 */
174 			saved_next_opcode2.addr =
175 			    (kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
176 			saved_next_opcode2.opcode = *(saved_next_opcode2.addr);
177 			arch_arm_kprobe(&saved_next_opcode2);
178 
179 		} else if (OPCODE_BF_S(p->opcode) || OPCODE_BT_S(p->opcode)) {
180 			unsigned long disp = (p->opcode & 0x00FF);
181 			/* case 1 */
182 			saved_next_opcode.addr = p->addr + 2;
183 			/* case 2 */
184 			saved_next_opcode2.addr =
185 			    (kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
186 			saved_next_opcode2.opcode = *(saved_next_opcode2.addr);
187 			arch_arm_kprobe(&saved_next_opcode2);
188 
189 		} else {
190 			saved_next_opcode.addr = p->addr + 1;
191 		}
192 
193 		saved_next_opcode.opcode = *(saved_next_opcode.addr);
194 		arch_arm_kprobe(&saved_next_opcode);
195 	}
196 }
197 
198 /* Called with kretprobe_lock held */
199 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
200 				      struct pt_regs *regs)
201 {
202 	ri->ret_addr = (kprobe_opcode_t *) regs->pr;
203 
204 	/* Replace the return addr with trampoline addr */
205 	regs->pr = (unsigned long)kretprobe_trampoline;
206 }
207 
208 static int __kprobes kprobe_handler(struct pt_regs *regs)
209 {
210 	struct kprobe *p;
211 	int ret = 0;
212 	kprobe_opcode_t *addr = NULL;
213 	struct kprobe_ctlblk *kcb;
214 
215 	/*
216 	 * We don't want to be preempted for the entire
217 	 * duration of kprobe processing
218 	 */
219 	preempt_disable();
220 	kcb = get_kprobe_ctlblk();
221 
222 	addr = (kprobe_opcode_t *) (regs->pc);
223 
224 	/* Check we're not actually recursing */
225 	if (kprobe_running()) {
226 		p = get_kprobe(addr);
227 		if (p) {
228 			if (kcb->kprobe_status == KPROBE_HIT_SS &&
229 			    *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
230 				goto no_kprobe;
231 			}
232 			/* We have reentered the kprobe_handler(), since
233 			 * another probe was hit while within the handler.
234 			 * We here save the original kprobes variables and
235 			 * just single step on the instruction of the new probe
236 			 * without calling any user handlers.
237 			 */
238 			save_previous_kprobe(kcb);
239 			set_current_kprobe(p, regs, kcb);
240 			kprobes_inc_nmissed_count(p);
241 			prepare_singlestep(p, regs);
242 			kcb->kprobe_status = KPROBE_REENTER;
243 			return 1;
244 		} else {
245 			p = __get_cpu_var(current_kprobe);
246 			if (p->break_handler && p->break_handler(p, regs)) {
247 				goto ss_probe;
248 			}
249 		}
250 		goto no_kprobe;
251 	}
252 
253 	p = get_kprobe(addr);
254 	if (!p) {
255 		/* Not one of ours: let kernel handle it */
256 		if (*(kprobe_opcode_t *)addr != BREAKPOINT_INSTRUCTION) {
257 			/*
258 			 * The breakpoint instruction was removed right
259 			 * after we hit it. Another cpu has removed
260 			 * either a probepoint or a debugger breakpoint
261 			 * at this address. In either case, no further
262 			 * handling of this interrupt is appropriate.
263 			 */
264 			ret = 1;
265 		}
266 
267 		goto no_kprobe;
268 	}
269 
270 	set_current_kprobe(p, regs, kcb);
271 	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
272 
273 	if (p->pre_handler && p->pre_handler(p, regs))
274 		/* handler has already set things up, so skip ss setup */
275 		return 1;
276 
277 ss_probe:
278 	prepare_singlestep(p, regs);
279 	kcb->kprobe_status = KPROBE_HIT_SS;
280 	return 1;
281 
282 no_kprobe:
283 	preempt_enable_no_resched();
284 	return ret;
285 }
286 
287 /*
288  * For function-return probes, init_kprobes() establishes a probepoint
289  * here. When a retprobed function returns, this probe is hit and
290  * trampoline_probe_handler() runs, calling the kretprobe's handler.
291  */
292 static void __used kretprobe_trampoline_holder(void)
293 {
294 	asm volatile (".globl kretprobe_trampoline\n"
295 		      "kretprobe_trampoline:\n\t"
296 		      "nop\n");
297 }
298 
299 /*
300  * Called when we hit the probe point at kretprobe_trampoline
301  */
302 int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
303 {
304 	struct kretprobe_instance *ri = NULL;
305 	struct hlist_head *head, empty_rp;
306 	struct hlist_node *node, *tmp;
307 	unsigned long flags, orig_ret_address = 0;
308 	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
309 
310 	INIT_HLIST_HEAD(&empty_rp);
311 	kretprobe_hash_lock(current, &head, &flags);
312 
313 	/*
314 	 * It is possible to have multiple instances associated with a given
315 	 * task either because an multiple functions in the call path
316 	 * have a return probe installed on them, and/or more then one return
317 	 * return probe was registered for a target function.
318 	 *
319 	 * We can handle this because:
320 	 *     - instances are always inserted at the head of the list
321 	 *     - when multiple return probes are registered for the same
322 	 *       function, the first instance's ret_addr will point to the
323 	 *       real return address, and all the rest will point to
324 	 *       kretprobe_trampoline
325 	 */
326 	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
327 		if (ri->task != current)
328 			/* another task is sharing our hash bucket */
329 			continue;
330 
331 		if (ri->rp && ri->rp->handler) {
332 			__get_cpu_var(current_kprobe) = &ri->rp->kp;
333 			ri->rp->handler(ri, regs);
334 			__get_cpu_var(current_kprobe) = NULL;
335 		}
336 
337 		orig_ret_address = (unsigned long)ri->ret_addr;
338 		recycle_rp_inst(ri, &empty_rp);
339 
340 		if (orig_ret_address != trampoline_address)
341 			/*
342 			 * This is the real return address. Any other
343 			 * instances associated with this task are for
344 			 * other calls deeper on the call stack
345 			 */
346 			break;
347 	}
348 
349 	kretprobe_assert(ri, orig_ret_address, trampoline_address);
350 
351 	regs->pc = orig_ret_address;
352 	kretprobe_hash_unlock(current, &flags);
353 
354 	preempt_enable_no_resched();
355 
356 	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
357 		hlist_del(&ri->hlist);
358 		kfree(ri);
359 	}
360 
361 	return orig_ret_address;
362 }
363 
364 static int __kprobes post_kprobe_handler(struct pt_regs *regs)
365 {
366 	struct kprobe *cur = kprobe_running();
367 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
368 	kprobe_opcode_t *addr = NULL;
369 	struct kprobe *p = NULL;
370 
371 	if (!cur)
372 		return 0;
373 
374 	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
375 		kcb->kprobe_status = KPROBE_HIT_SSDONE;
376 		cur->post_handler(cur, regs, 0);
377 	}
378 
379 	if (saved_next_opcode.addr != 0x0) {
380 		arch_disarm_kprobe(&saved_next_opcode);
381 		saved_next_opcode.addr = 0x0;
382 		saved_next_opcode.opcode = 0x0;
383 
384 		addr = saved_current_opcode.addr;
385 		saved_current_opcode.addr = 0x0;
386 
387 		p = get_kprobe(addr);
388 		arch_arm_kprobe(p);
389 
390 		if (saved_next_opcode2.addr != 0x0) {
391 			arch_disarm_kprobe(&saved_next_opcode2);
392 			saved_next_opcode2.addr = 0x0;
393 			saved_next_opcode2.opcode = 0x0;
394 		}
395 	}
396 
397 	/* Restore back the original saved kprobes variables and continue. */
398 	if (kcb->kprobe_status == KPROBE_REENTER) {
399 		restore_previous_kprobe(kcb);
400 		goto out;
401 	}
402 
403 	reset_current_kprobe();
404 
405 out:
406 	preempt_enable_no_resched();
407 
408 	return 1;
409 }
410 
411 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
412 {
413 	struct kprobe *cur = kprobe_running();
414 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
415 	const struct exception_table_entry *entry;
416 
417 	switch (kcb->kprobe_status) {
418 	case KPROBE_HIT_SS:
419 	case KPROBE_REENTER:
420 		/*
421 		 * We are here because the instruction being single
422 		 * stepped caused a page fault. We reset the current
423 		 * kprobe, point the pc back to the probe address
424 		 * and allow the page fault handler to continue as a
425 		 * normal page fault.
426 		 */
427 		regs->pc = (unsigned long)cur->addr;
428 		if (kcb->kprobe_status == KPROBE_REENTER)
429 			restore_previous_kprobe(kcb);
430 		else
431 			reset_current_kprobe();
432 		preempt_enable_no_resched();
433 		break;
434 	case KPROBE_HIT_ACTIVE:
435 	case KPROBE_HIT_SSDONE:
436 		/*
437 		 * We increment the nmissed count for accounting,
438 		 * we can also use npre/npostfault count for accounting
439 		 * these specific fault cases.
440 		 */
441 		kprobes_inc_nmissed_count(cur);
442 
443 		/*
444 		 * We come here because instructions in the pre/post
445 		 * handler caused the page_fault, this could happen
446 		 * if handler tries to access user space by
447 		 * copy_from_user(), get_user() etc. Let the
448 		 * user-specified handler try to fix it first.
449 		 */
450 		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
451 			return 1;
452 
453 		/*
454 		 * In case the user-specified fault handler returned
455 		 * zero, try to fix up.
456 		 */
457 		if ((entry = search_exception_tables(regs->pc)) != NULL) {
458 			regs->pc = entry->fixup;
459 			return 1;
460 		}
461 
462 		/*
463 		 * fixup_exception() could not handle it,
464 		 * Let do_page_fault() fix it.
465 		 */
466 		break;
467 	default:
468 		break;
469 	}
470 
471 	return 0;
472 }
473 
474 /*
475  * Wrapper routine to for handling exceptions.
476  */
477 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
478 				       unsigned long val, void *data)
479 {
480 	struct kprobe *p = NULL;
481 	struct die_args *args = (struct die_args *)data;
482 	int ret = NOTIFY_DONE;
483 	kprobe_opcode_t *addr = NULL;
484 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
485 
486 	addr = (kprobe_opcode_t *) (args->regs->pc);
487 	if (val == DIE_TRAP) {
488 		if (!kprobe_running()) {
489 			if (kprobe_handler(args->regs)) {
490 				ret = NOTIFY_STOP;
491 			} else {
492 				/* Not a kprobe trap */
493 				ret = NOTIFY_DONE;
494 			}
495 		} else {
496 			p = get_kprobe(addr);
497 			if ((kcb->kprobe_status == KPROBE_HIT_SS) ||
498 			    (kcb->kprobe_status == KPROBE_REENTER)) {
499 				if (post_kprobe_handler(args->regs))
500 					ret = NOTIFY_STOP;
501 			} else {
502 				if (kprobe_handler(args->regs)) {
503 					ret = NOTIFY_STOP;
504 				} else {
505 					p = __get_cpu_var(current_kprobe);
506 					if (p->break_handler &&
507 					    p->break_handler(p, args->regs))
508 						ret = NOTIFY_STOP;
509 				}
510 			}
511 		}
512 	}
513 
514 	return ret;
515 }
516 
517 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
518 {
519 	struct jprobe *jp = container_of(p, struct jprobe, kp);
520 	unsigned long addr;
521 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
522 
523 	kcb->jprobe_saved_regs = *regs;
524 	kcb->jprobe_saved_r15 = regs->regs[15];
525 	addr = kcb->jprobe_saved_r15;
526 
527 	/*
528 	 * TBD: As Linus pointed out, gcc assumes that the callee
529 	 * owns the argument space and could overwrite it, e.g.
530 	 * tailcall optimization. So, to be absolutely safe
531 	 * we also save and restore enough stack bytes to cover
532 	 * the argument area.
533 	 */
534 	memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
535 	       MIN_STACK_SIZE(addr));
536 
537 	regs->pc = (unsigned long)(jp->entry);
538 
539 	return 1;
540 }
541 
542 void __kprobes jprobe_return(void)
543 {
544 	asm volatile ("trapa #0x3a\n\t" "jprobe_return_end:\n\t" "nop\n\t");
545 }
546 
547 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
548 {
549 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
550 	unsigned long stack_addr = kcb->jprobe_saved_r15;
551 	u8 *addr = (u8 *)regs->pc;
552 
553 	if ((addr >= (u8 *)jprobe_return) &&
554 	    (addr <= (u8 *)jprobe_return_end)) {
555 		*regs = kcb->jprobe_saved_regs;
556 
557 		memcpy((kprobe_opcode_t *)stack_addr, kcb->jprobes_stack,
558 		       MIN_STACK_SIZE(stack_addr));
559 
560 		kcb->kprobe_status = KPROBE_HIT_SS;
561 		preempt_enable_no_resched();
562 		return 1;
563 	}
564 
565 	return 0;
566 }
567 
568 static struct kprobe trampoline_p = {
569 	.addr = (kprobe_opcode_t *)&kretprobe_trampoline,
570 	.pre_handler = trampoline_probe_handler
571 };
572 
573 int __init arch_init_kprobes(void)
574 {
575 	saved_next_opcode.addr = 0x0;
576 	saved_next_opcode.opcode = 0x0;
577 
578 	saved_current_opcode.addr = 0x0;
579 	saved_current_opcode.opcode = 0x0;
580 
581 	saved_next_opcode2.addr = 0x0;
582 	saved_next_opcode2.opcode = 0x0;
583 
584 	return register_kprobe(&trampoline_p);
585 }
586