xref: /linux/kernel/kprobes.c (revision 4bedea94545165364618d403d03b61d797acba0b)
1 /*
2  *  Kernel Probes (KProbes)
3  *  kernel/kprobes.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) IBM Corporation, 2002, 2004
20  *
21  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22  *		Probes initial implementation (includes suggestions from
23  *		Rusty Russell).
24  * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25  *		hlists and exceptions notifier as suggested by Andi Kleen.
26  * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27  *		interface to access function arguments.
28  * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29  *		exceptions notifier to be first on the priority list.
30  * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32  *		<prasanna@in.ibm.com> added function-return probes.
33  */
34 #include <linux/kprobes.h>
35 #include <linux/spinlock.h>
36 #include <linux/hash.h>
37 #include <linux/init.h>
38 #include <linux/module.h>
39 #include <asm/cacheflush.h>
40 #include <asm/errno.h>
41 #include <asm/kdebug.h>
42 
43 #define KPROBE_HASH_BITS 6
44 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
45 
46 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
47 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
48 
49 unsigned int kprobe_cpu = NR_CPUS;
50 static DEFINE_SPINLOCK(kprobe_lock);
51 static struct kprobe *curr_kprobe;
52 
53 /* Locks kprobe: irqs must be disabled */
54 void lock_kprobes(void)
55 {
56 	spin_lock(&kprobe_lock);
57 	kprobe_cpu = smp_processor_id();
58 }
59 
60 void unlock_kprobes(void)
61 {
62 	kprobe_cpu = NR_CPUS;
63 	spin_unlock(&kprobe_lock);
64 }
65 
66 /* You have to be holding the kprobe_lock */
67 struct kprobe *get_kprobe(void *addr)
68 {
69 	struct hlist_head *head;
70 	struct hlist_node *node;
71 
72 	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
73 	hlist_for_each(node, head) {
74 		struct kprobe *p = hlist_entry(node, struct kprobe, hlist);
75 		if (p->addr == addr)
76 			return p;
77 	}
78 	return NULL;
79 }
80 
81 /*
82  * Aggregate handlers for multiple kprobes support - these handlers
83  * take care of invoking the individual kprobe handlers on p->list
84  */
85 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
86 {
87 	struct kprobe *kp;
88 
89 	list_for_each_entry(kp, &p->list, list) {
90 		if (kp->pre_handler) {
91 			curr_kprobe = kp;
92 			if (kp->pre_handler(kp, regs))
93 				return 1;
94 		}
95 		curr_kprobe = NULL;
96 	}
97 	return 0;
98 }
99 
100 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
101 			      unsigned long flags)
102 {
103 	struct kprobe *kp;
104 
105 	list_for_each_entry(kp, &p->list, list) {
106 		if (kp->post_handler) {
107 			curr_kprobe = kp;
108 			kp->post_handler(kp, regs, flags);
109 			curr_kprobe = NULL;
110 		}
111 	}
112 	return;
113 }
114 
115 static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
116 			      int trapnr)
117 {
118 	/*
119 	 * if we faulted "during" the execution of a user specified
120 	 * probe handler, invoke just that probe's fault handler
121 	 */
122 	if (curr_kprobe && curr_kprobe->fault_handler) {
123 		if (curr_kprobe->fault_handler(curr_kprobe, regs, trapnr))
124 			return 1;
125 	}
126 	return 0;
127 }
128 
129 static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
130 {
131 	struct kprobe *kp = curr_kprobe;
132 	if (curr_kprobe && kp->break_handler) {
133 		if (kp->break_handler(kp, regs)) {
134 			curr_kprobe = NULL;
135 			return 1;
136 		}
137 	}
138 	curr_kprobe = NULL;
139 	return 0;
140 }
141 
142 struct kprobe trampoline_p = {
143 		.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
144 		.pre_handler = trampoline_probe_handler,
145 		.post_handler = trampoline_post_handler
146 };
147 
148 struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp)
149 {
150 	struct hlist_node *node;
151 	struct kretprobe_instance *ri;
152 	hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
153 		return ri;
154 	return NULL;
155 }
156 
157 static struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp)
158 {
159 	struct hlist_node *node;
160 	struct kretprobe_instance *ri;
161 	hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
162 		return ri;
163 	return NULL;
164 }
165 
166 struct kretprobe_instance *get_rp_inst(void *sara)
167 {
168 	struct hlist_head *head;
169 	struct hlist_node *node;
170 	struct task_struct *tsk;
171 	struct kretprobe_instance *ri;
172 
173 	tsk = arch_get_kprobe_task(sara);
174 	head = &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
175 	hlist_for_each_entry(ri, node, head, hlist) {
176 		if (ri->stack_addr == sara)
177 			return ri;
178 	}
179 	return NULL;
180 }
181 
182 void add_rp_inst(struct kretprobe_instance *ri)
183 {
184 	struct task_struct *tsk;
185 	/*
186 	 * Remove rp inst off the free list -
187 	 * Add it back when probed function returns
188 	 */
189 	hlist_del(&ri->uflist);
190 	tsk = arch_get_kprobe_task(ri->stack_addr);
191 	/* Add rp inst onto table */
192 	INIT_HLIST_NODE(&ri->hlist);
193 	hlist_add_head(&ri->hlist,
194 			&kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]);
195 
196 	/* Also add this rp inst to the used list. */
197 	INIT_HLIST_NODE(&ri->uflist);
198 	hlist_add_head(&ri->uflist, &ri->rp->used_instances);
199 }
200 
201 void recycle_rp_inst(struct kretprobe_instance *ri)
202 {
203 	/* remove rp inst off the rprobe_inst_table */
204 	hlist_del(&ri->hlist);
205 	if (ri->rp) {
206 		/* remove rp inst off the used list */
207 		hlist_del(&ri->uflist);
208 		/* put rp inst back onto the free list */
209 		INIT_HLIST_NODE(&ri->uflist);
210 		hlist_add_head(&ri->uflist, &ri->rp->free_instances);
211 	} else
212 		/* Unregistering */
213 		kfree(ri);
214 }
215 
216 struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk)
217 {
218 	return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
219 }
220 
221 struct kretprobe_instance *get_rp_inst_tsk(struct task_struct *tk)
222 {
223 	struct task_struct *tsk;
224 	struct hlist_head *head;
225 	struct hlist_node *node;
226 	struct kretprobe_instance *ri;
227 
228 	head = &kretprobe_inst_table[hash_ptr(tk, KPROBE_HASH_BITS)];
229 
230 	hlist_for_each_entry(ri, node, head, hlist) {
231 		tsk = arch_get_kprobe_task(ri->stack_addr);
232 		if (tsk == tk)
233 			return ri;
234 	}
235 	return NULL;
236 }
237 
238 /*
239  * This function is called from do_exit or do_execv when task tk's stack is
240  * about to be recycled. Recycle any function-return probe instances
241  * associated with this task. These represent probed functions that have
242  * been called but may never return.
243  */
244 void kprobe_flush_task(struct task_struct *tk)
245 {
246 	unsigned long flags = 0;
247 	spin_lock_irqsave(&kprobe_lock, flags);
248 	arch_kprobe_flush_task(tk);
249 	spin_unlock_irqrestore(&kprobe_lock, flags);
250 }
251 
252 /*
253  * This kprobe pre_handler is registered with every kretprobe. When probe
254  * hits it will set up the return probe.
255  */
256 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
257 {
258 	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
259 
260 	/*TODO: consider to only swap the RA after the last pre_handler fired */
261 	arch_prepare_kretprobe(rp, regs);
262 	return 0;
263 }
264 
265 static inline void free_rp_inst(struct kretprobe *rp)
266 {
267 	struct kretprobe_instance *ri;
268 	while ((ri = get_free_rp_inst(rp)) != NULL) {
269 		hlist_del(&ri->uflist);
270 		kfree(ri);
271 	}
272 }
273 
274 /*
275  * Keep all fields in the kprobe consistent
276  */
277 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
278 {
279 	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
280 	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
281 }
282 
283 /*
284 * Add the new probe to old_p->list. Fail if this is the
285 * second jprobe at the address - two jprobes can't coexist
286 */
287 static int add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
288 {
289         struct kprobe *kp;
290 
291 	if (p->break_handler) {
292 		list_for_each_entry(kp, &old_p->list, list) {
293 			if (kp->break_handler)
294 				return -EEXIST;
295 		}
296 		list_add_tail(&p->list, &old_p->list);
297 	} else
298 		list_add(&p->list, &old_p->list);
299 	return 0;
300 }
301 
302 /*
303  * Fill in the required fields of the "manager kprobe". Replace the
304  * earlier kprobe in the hlist with the manager kprobe
305  */
306 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
307 {
308 	copy_kprobe(p, ap);
309 	ap->addr = p->addr;
310 	ap->pre_handler = aggr_pre_handler;
311 	ap->post_handler = aggr_post_handler;
312 	ap->fault_handler = aggr_fault_handler;
313 	ap->break_handler = aggr_break_handler;
314 
315 	INIT_LIST_HEAD(&ap->list);
316 	list_add(&p->list, &ap->list);
317 
318 	INIT_HLIST_NODE(&ap->hlist);
319 	hlist_del(&p->hlist);
320 	hlist_add_head(&ap->hlist,
321 		&kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]);
322 }
323 
324 /*
325  * This is the second or subsequent kprobe at the address - handle
326  * the intricacies
327  * TODO: Move kcalloc outside the spinlock
328  */
329 static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p)
330 {
331 	int ret = 0;
332 	struct kprobe *ap;
333 
334 	if (old_p->pre_handler == aggr_pre_handler) {
335 		copy_kprobe(old_p, p);
336 		ret = add_new_kprobe(old_p, p);
337 	} else {
338 		ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC);
339 		if (!ap)
340 			return -ENOMEM;
341 		add_aggr_kprobe(ap, old_p);
342 		copy_kprobe(ap, p);
343 		ret = add_new_kprobe(ap, p);
344 	}
345 	return ret;
346 }
347 
348 /* kprobe removal house-keeping routines */
349 static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
350 {
351 	arch_disarm_kprobe(p);
352 	hlist_del(&p->hlist);
353 	spin_unlock_irqrestore(&kprobe_lock, flags);
354 	arch_remove_kprobe(p);
355 }
356 
357 static inline void cleanup_aggr_kprobe(struct kprobe *old_p,
358 		struct kprobe *p, unsigned long flags)
359 {
360 	list_del(&p->list);
361 	if (list_empty(&old_p->list)) {
362 		cleanup_kprobe(old_p, flags);
363 		kfree(old_p);
364 	} else
365 		spin_unlock_irqrestore(&kprobe_lock, flags);
366 }
367 
368 int register_kprobe(struct kprobe *p)
369 {
370 	int ret = 0;
371 	unsigned long flags = 0;
372 	struct kprobe *old_p;
373 
374 	if ((ret = arch_prepare_kprobe(p)) != 0) {
375 		goto rm_kprobe;
376 	}
377 	spin_lock_irqsave(&kprobe_lock, flags);
378 	old_p = get_kprobe(p->addr);
379 	p->nmissed = 0;
380 	if (old_p) {
381 		ret = register_aggr_kprobe(old_p, p);
382 		goto out;
383 	}
384 
385 	arch_copy_kprobe(p);
386 	INIT_HLIST_NODE(&p->hlist);
387 	hlist_add_head(&p->hlist,
388 		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
389 
390   	arch_arm_kprobe(p);
391 
392 out:
393 	spin_unlock_irqrestore(&kprobe_lock, flags);
394 rm_kprobe:
395 	if (ret == -EEXIST)
396 		arch_remove_kprobe(p);
397 	return ret;
398 }
399 
400 void unregister_kprobe(struct kprobe *p)
401 {
402 	unsigned long flags;
403 	struct kprobe *old_p;
404 
405 	spin_lock_irqsave(&kprobe_lock, flags);
406 	old_p = get_kprobe(p->addr);
407 	if (old_p) {
408 		if (old_p->pre_handler == aggr_pre_handler)
409 			cleanup_aggr_kprobe(old_p, p, flags);
410 		else
411 			cleanup_kprobe(p, flags);
412 	} else
413 		spin_unlock_irqrestore(&kprobe_lock, flags);
414 }
415 
416 static struct notifier_block kprobe_exceptions_nb = {
417 	.notifier_call = kprobe_exceptions_notify,
418 	.priority = 0x7fffffff /* we need to notified first */
419 };
420 
421 int register_jprobe(struct jprobe *jp)
422 {
423 	/* Todo: Verify probepoint is a function entry point */
424 	jp->kp.pre_handler = setjmp_pre_handler;
425 	jp->kp.break_handler = longjmp_break_handler;
426 
427 	return register_kprobe(&jp->kp);
428 }
429 
430 void unregister_jprobe(struct jprobe *jp)
431 {
432 	unregister_kprobe(&jp->kp);
433 }
434 
435 #ifdef ARCH_SUPPORTS_KRETPROBES
436 
437 int register_kretprobe(struct kretprobe *rp)
438 {
439 	int ret = 0;
440 	struct kretprobe_instance *inst;
441 	int i;
442 
443 	rp->kp.pre_handler = pre_handler_kretprobe;
444 
445 	/* Pre-allocate memory for max kretprobe instances */
446 	if (rp->maxactive <= 0) {
447 #ifdef CONFIG_PREEMPT
448 		rp->maxactive = max(10, 2 * NR_CPUS);
449 #else
450 		rp->maxactive = NR_CPUS;
451 #endif
452 	}
453 	INIT_HLIST_HEAD(&rp->used_instances);
454 	INIT_HLIST_HEAD(&rp->free_instances);
455 	for (i = 0; i < rp->maxactive; i++) {
456 		inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
457 		if (inst == NULL) {
458 			free_rp_inst(rp);
459 			return -ENOMEM;
460 		}
461 		INIT_HLIST_NODE(&inst->uflist);
462 		hlist_add_head(&inst->uflist, &rp->free_instances);
463 	}
464 
465 	rp->nmissed = 0;
466 	/* Establish function entry probe point */
467 	if ((ret = register_kprobe(&rp->kp)) != 0)
468 		free_rp_inst(rp);
469 	return ret;
470 }
471 
472 #else /* ARCH_SUPPORTS_KRETPROBES */
473 
474 int register_kretprobe(struct kretprobe *rp)
475 {
476 	return -ENOSYS;
477 }
478 
479 #endif /* ARCH_SUPPORTS_KRETPROBES */
480 
481 void unregister_kretprobe(struct kretprobe *rp)
482 {
483 	unsigned long flags;
484 	struct kretprobe_instance *ri;
485 
486 	unregister_kprobe(&rp->kp);
487 	/* No race here */
488 	spin_lock_irqsave(&kprobe_lock, flags);
489 	free_rp_inst(rp);
490 	while ((ri = get_used_rp_inst(rp)) != NULL) {
491 		ri->rp = NULL;
492 		hlist_del(&ri->uflist);
493 	}
494 	spin_unlock_irqrestore(&kprobe_lock, flags);
495 }
496 
497 static int __init init_kprobes(void)
498 {
499 	int i, err = 0;
500 
501 	/* FIXME allocate the probe table, currently defined statically */
502 	/* initialize all list heads */
503 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
504 		INIT_HLIST_HEAD(&kprobe_table[i]);
505 		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
506 	}
507 
508 	err = register_die_notifier(&kprobe_exceptions_nb);
509 	/* Register the trampoline probe for return probe */
510 	register_kprobe(&trampoline_p);
511 	return err;
512 }
513 
514 __initcall(init_kprobes);
515 
516 EXPORT_SYMBOL_GPL(register_kprobe);
517 EXPORT_SYMBOL_GPL(unregister_kprobe);
518 EXPORT_SYMBOL_GPL(register_jprobe);
519 EXPORT_SYMBOL_GPL(unregister_jprobe);
520 EXPORT_SYMBOL_GPL(jprobe_return);
521 EXPORT_SYMBOL_GPL(register_kretprobe);
522 EXPORT_SYMBOL_GPL(unregister_kretprobe);
523 
524