xref: /linux/arch/arc/kernel/kprobes.c (revision c3d6b628395fe6ec3442a83ddf02334c54867d43)
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #include <linux/types.h>
10 #include <linux/kprobes.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/kprobes.h>
14 #include <linux/kdebug.h>
15 #include <linux/sched.h>
16 #include <linux/uaccess.h>
17 #include <asm/cacheflush.h>
18 #include <asm/current.h>
19 #include <asm/disasm.h>
20 
21 #define MIN_STACK_SIZE(addr)	min((unsigned long)MAX_STACK_SIZE, \
22 		(unsigned long)current_thread_info() + THREAD_SIZE - (addr))
23 
24 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
25 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
26 
27 int __kprobes arch_prepare_kprobe(struct kprobe *p)
28 {
29 	/* Attempt to probe at unaligned address */
30 	if ((unsigned long)p->addr & 0x01)
31 		return -EINVAL;
32 
33 	/* Address should not be in exception handling code */
34 
35 	p->ainsn.is_short = is_short_instr((unsigned long)p->addr);
36 	p->opcode = *p->addr;
37 
38 	return 0;
39 }
40 
41 void __kprobes arch_arm_kprobe(struct kprobe *p)
42 {
43 	*p->addr = UNIMP_S_INSTRUCTION;
44 
45 	flush_icache_range((unsigned long)p->addr,
46 			   (unsigned long)p->addr + sizeof(kprobe_opcode_t));
47 }
48 
49 void __kprobes arch_disarm_kprobe(struct kprobe *p)
50 {
51 	*p->addr = p->opcode;
52 
53 	flush_icache_range((unsigned long)p->addr,
54 			   (unsigned long)p->addr + sizeof(kprobe_opcode_t));
55 }
56 
57 void __kprobes arch_remove_kprobe(struct kprobe *p)
58 {
59 	arch_disarm_kprobe(p);
60 
61 	/* Can we remove the kprobe in the middle of kprobe handling? */
62 	if (p->ainsn.t1_addr) {
63 		*(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
64 
65 		flush_icache_range((unsigned long)p->ainsn.t1_addr,
66 				   (unsigned long)p->ainsn.t1_addr +
67 				   sizeof(kprobe_opcode_t));
68 
69 		p->ainsn.t1_addr = NULL;
70 	}
71 
72 	if (p->ainsn.t2_addr) {
73 		*(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
74 
75 		flush_icache_range((unsigned long)p->ainsn.t2_addr,
76 				   (unsigned long)p->ainsn.t2_addr +
77 				   sizeof(kprobe_opcode_t));
78 
79 		p->ainsn.t2_addr = NULL;
80 	}
81 }
82 
83 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
84 {
85 	kcb->prev_kprobe.kp = kprobe_running();
86 	kcb->prev_kprobe.status = kcb->kprobe_status;
87 }
88 
89 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
90 {
91 	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
92 	kcb->kprobe_status = kcb->prev_kprobe.status;
93 }
94 
95 static inline void __kprobes set_current_kprobe(struct kprobe *p)
96 {
97 	__get_cpu_var(current_kprobe) = p;
98 }
99 
100 static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
101 				       struct pt_regs *regs)
102 {
103 	/* Remove the trap instructions inserted for single step and
104 	 * restore the original instructions
105 	 */
106 	if (p->ainsn.t1_addr) {
107 		*(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
108 
109 		flush_icache_range((unsigned long)p->ainsn.t1_addr,
110 				   (unsigned long)p->ainsn.t1_addr +
111 				   sizeof(kprobe_opcode_t));
112 
113 		p->ainsn.t1_addr = NULL;
114 	}
115 
116 	if (p->ainsn.t2_addr) {
117 		*(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
118 
119 		flush_icache_range((unsigned long)p->ainsn.t2_addr,
120 				   (unsigned long)p->ainsn.t2_addr +
121 				   sizeof(kprobe_opcode_t));
122 
123 		p->ainsn.t2_addr = NULL;
124 	}
125 
126 	return;
127 }
128 
129 static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
130 {
131 	unsigned long next_pc;
132 	unsigned long tgt_if_br = 0;
133 	int is_branch;
134 	unsigned long bta;
135 
136 	/* Copy the opcode back to the kprobe location and execute the
137 	 * instruction. Because of this we will not be able to get into the
138 	 * same kprobe until this kprobe is done
139 	 */
140 	*(p->addr) = p->opcode;
141 
142 	flush_icache_range((unsigned long)p->addr,
143 			   (unsigned long)p->addr + sizeof(kprobe_opcode_t));
144 
145 	/* Now we insert the trap at the next location after this instruction to
146 	 * single step. If it is a branch we insert the trap at possible branch
147 	 * targets
148 	 */
149 
150 	bta = regs->bta;
151 
152 	if (regs->status32 & 0x40) {
153 		/* We are in a delay slot with the branch taken */
154 
155 		next_pc = bta & ~0x01;
156 
157 		if (!p->ainsn.is_short) {
158 			if (bta & 0x01)
159 				regs->blink += 2;
160 			else {
161 				/* Branch not taken */
162 				next_pc += 2;
163 
164 				/* next pc is taken from bta after executing the
165 				 * delay slot instruction
166 				 */
167 				regs->bta += 2;
168 			}
169 		}
170 
171 		is_branch = 0;
172 	} else
173 		is_branch =
174 		    disasm_next_pc((unsigned long)p->addr, regs,
175 			(struct callee_regs *) current->thread.callee_reg,
176 			&next_pc, &tgt_if_br);
177 
178 	p->ainsn.t1_addr = (kprobe_opcode_t *) next_pc;
179 	p->ainsn.t1_opcode = *(p->ainsn.t1_addr);
180 	*(p->ainsn.t1_addr) = TRAP_S_2_INSTRUCTION;
181 
182 	flush_icache_range((unsigned long)p->ainsn.t1_addr,
183 			   (unsigned long)p->ainsn.t1_addr +
184 			   sizeof(kprobe_opcode_t));
185 
186 	if (is_branch) {
187 		p->ainsn.t2_addr = (kprobe_opcode_t *) tgt_if_br;
188 		p->ainsn.t2_opcode = *(p->ainsn.t2_addr);
189 		*(p->ainsn.t2_addr) = TRAP_S_2_INSTRUCTION;
190 
191 		flush_icache_range((unsigned long)p->ainsn.t2_addr,
192 				   (unsigned long)p->ainsn.t2_addr +
193 				   sizeof(kprobe_opcode_t));
194 	}
195 }
196 
197 int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
198 {
199 	struct kprobe *p;
200 	struct kprobe_ctlblk *kcb;
201 
202 	preempt_disable();
203 
204 	kcb = get_kprobe_ctlblk();
205 	p = get_kprobe((unsigned long *)addr);
206 
207 	if (p) {
208 		/*
209 		 * We have reentered the kprobe_handler, since another kprobe
210 		 * was hit while within the handler, we save the original
211 		 * kprobes and single step on the instruction of the new probe
212 		 * without calling any user handlers to avoid recursive
213 		 * kprobes.
214 		 */
215 		if (kprobe_running()) {
216 			save_previous_kprobe(kcb);
217 			set_current_kprobe(p);
218 			kprobes_inc_nmissed_count(p);
219 			setup_singlestep(p, regs);
220 			kcb->kprobe_status = KPROBE_REENTER;
221 			return 1;
222 		}
223 
224 		set_current_kprobe(p);
225 		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
226 
227 		/* If we have no pre-handler or it returned 0, we continue with
228 		 * normal processing. If we have a pre-handler and it returned
229 		 * non-zero - which is expected from setjmp_pre_handler for
230 		 * jprobe, we return without single stepping and leave that to
231 		 * the break-handler which is invoked by a kprobe from
232 		 * jprobe_return
233 		 */
234 		if (!p->pre_handler || !p->pre_handler(p, regs)) {
235 			setup_singlestep(p, regs);
236 			kcb->kprobe_status = KPROBE_HIT_SS;
237 		}
238 
239 		return 1;
240 	} else if (kprobe_running()) {
241 		p = __get_cpu_var(current_kprobe);
242 		if (p->break_handler && p->break_handler(p, regs)) {
243 			setup_singlestep(p, regs);
244 			kcb->kprobe_status = KPROBE_HIT_SS;
245 			return 1;
246 		}
247 	}
248 
249 	/* no_kprobe: */
250 	preempt_enable_no_resched();
251 	return 0;
252 }
253 
254 static int __kprobes arc_post_kprobe_handler(unsigned long addr,
255 					 struct pt_regs *regs)
256 {
257 	struct kprobe *cur = kprobe_running();
258 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
259 
260 	if (!cur)
261 		return 0;
262 
263 	resume_execution(cur, addr, regs);
264 
265 	/* Rearm the kprobe */
266 	arch_arm_kprobe(cur);
267 
268 	/*
269 	 * When we return from trap instruction we go to the next instruction
270 	 * We restored the actual instruction in resume_exectuiont and we to
271 	 * return to the same address and execute it
272 	 */
273 	regs->ret = addr;
274 
275 	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
276 		kcb->kprobe_status = KPROBE_HIT_SSDONE;
277 		cur->post_handler(cur, regs, 0);
278 	}
279 
280 	if (kcb->kprobe_status == KPROBE_REENTER) {
281 		restore_previous_kprobe(kcb);
282 		goto out;
283 	}
284 
285 	reset_current_kprobe();
286 
287 out:
288 	preempt_enable_no_resched();
289 	return 1;
290 }
291 
292 /*
293  * Fault can be for the instruction being single stepped or for the
294  * pre/post handlers in the module.
295  * This is applicable for applications like user probes, where we have the
296  * probe in user space and the handlers in the kernel
297  */
298 
299 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
300 {
301 	struct kprobe *cur = kprobe_running();
302 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
303 
304 	switch (kcb->kprobe_status) {
305 	case KPROBE_HIT_SS:
306 	case KPROBE_REENTER:
307 		/*
308 		 * We are here because the instruction being single stepped
309 		 * caused the fault. We reset the current kprobe and allow the
310 		 * exception handler as if it is regular exception. In our
311 		 * case it doesn't matter because the system will be halted
312 		 */
313 		resume_execution(cur, (unsigned long)cur->addr, regs);
314 
315 		if (kcb->kprobe_status == KPROBE_REENTER)
316 			restore_previous_kprobe(kcb);
317 		else
318 			reset_current_kprobe();
319 
320 		preempt_enable_no_resched();
321 		break;
322 
323 	case KPROBE_HIT_ACTIVE:
324 	case KPROBE_HIT_SSDONE:
325 		/*
326 		 * We are here because the instructions in the pre/post handler
327 		 * caused the fault.
328 		 */
329 
330 		/* We increment the nmissed count for accounting,
331 		 * we can also use npre/npostfault count for accouting
332 		 * these specific fault cases.
333 		 */
334 		kprobes_inc_nmissed_count(cur);
335 
336 		/*
337 		 * We come here because instructions in the pre/post
338 		 * handler caused the page_fault, this could happen
339 		 * if handler tries to access user space by
340 		 * copy_from_user(), get_user() etc. Let the
341 		 * user-specified handler try to fix it first.
342 		 */
343 		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
344 			return 1;
345 
346 		/*
347 		 * In case the user-specified fault handler returned zero,
348 		 * try to fix up.
349 		 */
350 		if (fixup_exception(regs))
351 			return 1;
352 
353 		/*
354 		 * fixup_exception() could not handle it,
355 		 * Let do_page_fault() fix it.
356 		 */
357 		break;
358 
359 	default:
360 		break;
361 	}
362 	return 0;
363 }
364 
365 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
366 				       unsigned long val, void *data)
367 {
368 	struct die_args *args = data;
369 	unsigned long addr = args->err;
370 	int ret = NOTIFY_DONE;
371 
372 	switch (val) {
373 	case DIE_IERR:
374 		if (arc_kprobe_handler(addr, args->regs))
375 			return NOTIFY_STOP;
376 		break;
377 
378 	case DIE_TRAP:
379 		if (arc_post_kprobe_handler(addr, args->regs))
380 			return NOTIFY_STOP;
381 		break;
382 
383 	default:
384 		break;
385 	}
386 
387 	return ret;
388 }
389 
390 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
391 {
392 	struct jprobe *jp = container_of(p, struct jprobe, kp);
393 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
394 	unsigned long sp_addr = regs->sp;
395 
396 	kcb->jprobe_saved_regs = *regs;
397 	memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
398 	regs->ret = (unsigned long)(jp->entry);
399 
400 	return 1;
401 }
402 
403 void __kprobes jprobe_return(void)
404 {
405 	__asm__ __volatile__("unimp_s");
406 	return;
407 }
408 
409 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
410 {
411 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
412 	unsigned long sp_addr;
413 
414 	*regs = kcb->jprobe_saved_regs;
415 	sp_addr = regs->sp;
416 	memcpy((void *)sp_addr, kcb->jprobes_stack, MIN_STACK_SIZE(sp_addr));
417 	preempt_enable_no_resched();
418 
419 	return 1;
420 }
421 
422 static void __used kretprobe_trampoline_holder(void)
423 {
424 	__asm__ __volatile__(".global kretprobe_trampoline\n"
425 			     "kretprobe_trampoline:\n" "nop\n");
426 }
427 
428 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
429 				      struct pt_regs *regs)
430 {
431 
432 	ri->ret_addr = (kprobe_opcode_t *) regs->blink;
433 
434 	/* Replace the return addr with trampoline addr */
435 	regs->blink = (unsigned long)&kretprobe_trampoline;
436 }
437 
438 static int __kprobes trampoline_probe_handler(struct kprobe *p,
439 					      struct pt_regs *regs)
440 {
441 	struct kretprobe_instance *ri = NULL;
442 	struct hlist_head *head, empty_rp;
443 	struct hlist_node *tmp;
444 	unsigned long flags, orig_ret_address = 0;
445 	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
446 
447 	INIT_HLIST_HEAD(&empty_rp);
448 	kretprobe_hash_lock(current, &head, &flags);
449 
450 	/*
451 	 * It is possible to have multiple instances associated with a given
452 	 * task either because an multiple functions in the call path
453 	 * have a return probe installed on them, and/or more than one return
454 	 * return probe was registered for a target function.
455 	 *
456 	 * We can handle this because:
457 	 *     - instances are always inserted at the head of the list
458 	 *     - when multiple return probes are registered for the same
459 	 *       function, the first instance's ret_addr will point to the
460 	 *       real return address, and all the rest will point to
461 	 *       kretprobe_trampoline
462 	 */
463 	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
464 		if (ri->task != current)
465 			/* another task is sharing our hash bucket */
466 			continue;
467 
468 		if (ri->rp && ri->rp->handler)
469 			ri->rp->handler(ri, regs);
470 
471 		orig_ret_address = (unsigned long)ri->ret_addr;
472 		recycle_rp_inst(ri, &empty_rp);
473 
474 		if (orig_ret_address != trampoline_address) {
475 			/*
476 			 * This is the real return address. Any other
477 			 * instances associated with this task are for
478 			 * other calls deeper on the call stack
479 			 */
480 			break;
481 		}
482 	}
483 
484 	kretprobe_assert(ri, orig_ret_address, trampoline_address);
485 	regs->ret = orig_ret_address;
486 
487 	reset_current_kprobe();
488 	kretprobe_hash_unlock(current, &flags);
489 	preempt_enable_no_resched();
490 
491 	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
492 		hlist_del(&ri->hlist);
493 		kfree(ri);
494 	}
495 
496 	/* By returning a non zero value, we are telling the kprobe handler
497 	 * that we don't want the post_handler to run
498 	 */
499 	return 1;
500 }
501 
502 static struct kprobe trampoline_p = {
503 	.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
504 	.pre_handler = trampoline_probe_handler
505 };
506 
507 int __init arch_init_kprobes(void)
508 {
509 	/* Registering the trampoline code for the kret probe */
510 	return register_kprobe(&trampoline_p);
511 }
512 
513 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
514 {
515 	if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline)
516 		return 1;
517 
518 	return 0;
519 }
520 
521 void trap_is_kprobe(unsigned long cause, unsigned long address,
522 		    struct pt_regs *regs)
523 {
524 	notify_die(DIE_TRAP, "kprobe_trap", regs, address, cause, SIGTRAP);
525 }
526