xref: /linux/kernel/kprobes.c (revision 9ce7677cfd7cd871adb457c80bea3b581b839641)
1 /*
2  *  Kernel Probes (KProbes)
3  *  kernel/kprobes.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) IBM Corporation, 2002, 2004
20  *
21  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22  *		Probes initial implementation (includes suggestions from
23  *		Rusty Russell).
24  * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25  *		hlists and exceptions notifier as suggested by Andi Kleen.
26  * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27  *		interface to access function arguments.
28  * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29  *		exceptions notifier to be first on the priority list.
30  * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32  *		<prasanna@in.ibm.com> added function-return probes.
33  */
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/module.h>
39 #include <linux/moduleloader.h>
40 #include <asm-generic/sections.h>
41 #include <asm/cacheflush.h>
42 #include <asm/errno.h>
43 #include <asm/kdebug.h>
44 
45 #define KPROBE_HASH_BITS 6
46 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
47 
48 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
49 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
50 
51 static DEFINE_SPINLOCK(kprobe_lock);	/* Protects kprobe_table */
52 DEFINE_SPINLOCK(kretprobe_lock);	/* Protects kretprobe_inst_table */
53 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
54 
55 /*
56  * kprobe->ainsn.insn points to the copy of the instruction to be
57  * single-stepped. x86_64, POWER4 and above have no-exec support and
58  * stepping on the instruction on a vmalloced/kmalloced/data page
59  * is a recipe for disaster
60  */
61 #define INSNS_PER_PAGE	(PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
62 
63 struct kprobe_insn_page {
64 	struct hlist_node hlist;
65 	kprobe_opcode_t *insns;		/* Page of instruction slots */
66 	char slot_used[INSNS_PER_PAGE];
67 	int nused;
68 };
69 
70 static struct hlist_head kprobe_insn_pages;
71 
72 /**
73  * get_insn_slot() - Find a slot on an executable page for an instruction.
74  * We allocate an executable page if there's no room on existing ones.
75  */
76 kprobe_opcode_t __kprobes *get_insn_slot(void)
77 {
78 	struct kprobe_insn_page *kip;
79 	struct hlist_node *pos;
80 
81 	hlist_for_each(pos, &kprobe_insn_pages) {
82 		kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
83 		if (kip->nused < INSNS_PER_PAGE) {
84 			int i;
85 			for (i = 0; i < INSNS_PER_PAGE; i++) {
86 				if (!kip->slot_used[i]) {
87 					kip->slot_used[i] = 1;
88 					kip->nused++;
89 					return kip->insns + (i * MAX_INSN_SIZE);
90 				}
91 			}
92 			/* Surprise!  No unused slots.  Fix kip->nused. */
93 			kip->nused = INSNS_PER_PAGE;
94 		}
95 	}
96 
97 	/* All out of space.  Need to allocate a new page. Use slot 0.*/
98 	kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
99 	if (!kip) {
100 		return NULL;
101 	}
102 
103 	/*
104 	 * Use module_alloc so this page is within +/- 2GB of where the
105 	 * kernel image and loaded module images reside. This is required
106 	 * so x86_64 can correctly handle the %rip-relative fixups.
107 	 */
108 	kip->insns = module_alloc(PAGE_SIZE);
109 	if (!kip->insns) {
110 		kfree(kip);
111 		return NULL;
112 	}
113 	INIT_HLIST_NODE(&kip->hlist);
114 	hlist_add_head(&kip->hlist, &kprobe_insn_pages);
115 	memset(kip->slot_used, 0, INSNS_PER_PAGE);
116 	kip->slot_used[0] = 1;
117 	kip->nused = 1;
118 	return kip->insns;
119 }
120 
121 void __kprobes free_insn_slot(kprobe_opcode_t *slot)
122 {
123 	struct kprobe_insn_page *kip;
124 	struct hlist_node *pos;
125 
126 	hlist_for_each(pos, &kprobe_insn_pages) {
127 		kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
128 		if (kip->insns <= slot &&
129 		    slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
130 			int i = (slot - kip->insns) / MAX_INSN_SIZE;
131 			kip->slot_used[i] = 0;
132 			kip->nused--;
133 			if (kip->nused == 0) {
134 				/*
135 				 * Page is no longer in use.  Free it unless
136 				 * it's the last one.  We keep the last one
137 				 * so as not to have to set it up again the
138 				 * next time somebody inserts a probe.
139 				 */
140 				hlist_del(&kip->hlist);
141 				if (hlist_empty(&kprobe_insn_pages)) {
142 					INIT_HLIST_NODE(&kip->hlist);
143 					hlist_add_head(&kip->hlist,
144 						&kprobe_insn_pages);
145 				} else {
146 					module_free(NULL, kip->insns);
147 					kfree(kip);
148 				}
149 			}
150 			return;
151 		}
152 	}
153 }
154 
155 /* We have preemption disabled.. so it is safe to use __ versions */
156 static inline void set_kprobe_instance(struct kprobe *kp)
157 {
158 	__get_cpu_var(kprobe_instance) = kp;
159 }
160 
161 static inline void reset_kprobe_instance(void)
162 {
163 	__get_cpu_var(kprobe_instance) = NULL;
164 }
165 
166 /*
167  * This routine is called either:
168  * 	- under the kprobe_lock spinlock - during kprobe_[un]register()
169  * 				OR
170  * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
171  */
172 struct kprobe __kprobes *get_kprobe(void *addr)
173 {
174 	struct hlist_head *head;
175 	struct hlist_node *node;
176 	struct kprobe *p;
177 
178 	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
179 	hlist_for_each_entry_rcu(p, node, head, hlist) {
180 		if (p->addr == addr)
181 			return p;
182 	}
183 	return NULL;
184 }
185 
186 /*
187  * Aggregate handlers for multiple kprobes support - these handlers
188  * take care of invoking the individual kprobe handlers on p->list
189  */
190 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
191 {
192 	struct kprobe *kp;
193 
194 	list_for_each_entry_rcu(kp, &p->list, list) {
195 		if (kp->pre_handler) {
196 			set_kprobe_instance(kp);
197 			if (kp->pre_handler(kp, regs))
198 				return 1;
199 		}
200 		reset_kprobe_instance();
201 	}
202 	return 0;
203 }
204 
205 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
206 					unsigned long flags)
207 {
208 	struct kprobe *kp;
209 
210 	list_for_each_entry_rcu(kp, &p->list, list) {
211 		if (kp->post_handler) {
212 			set_kprobe_instance(kp);
213 			kp->post_handler(kp, regs, flags);
214 			reset_kprobe_instance();
215 		}
216 	}
217 	return;
218 }
219 
220 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
221 					int trapnr)
222 {
223 	struct kprobe *cur = __get_cpu_var(kprobe_instance);
224 
225 	/*
226 	 * if we faulted "during" the execution of a user specified
227 	 * probe handler, invoke just that probe's fault handler
228 	 */
229 	if (cur && cur->fault_handler) {
230 		if (cur->fault_handler(cur, regs, trapnr))
231 			return 1;
232 	}
233 	return 0;
234 }
235 
236 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
237 {
238 	struct kprobe *cur = __get_cpu_var(kprobe_instance);
239 	int ret = 0;
240 
241 	if (cur && cur->break_handler) {
242 		if (cur->break_handler(cur, regs))
243 			ret = 1;
244 	}
245 	reset_kprobe_instance();
246 	return ret;
247 }
248 
249 /* Walks the list and increments nmissed count for multiprobe case */
250 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
251 {
252 	struct kprobe *kp;
253 	if (p->pre_handler != aggr_pre_handler) {
254 		p->nmissed++;
255 	} else {
256 		list_for_each_entry_rcu(kp, &p->list, list)
257 			kp->nmissed++;
258 	}
259 	return;
260 }
261 
262 /* Called with kretprobe_lock held */
263 struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
264 {
265 	struct hlist_node *node;
266 	struct kretprobe_instance *ri;
267 	hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
268 		return ri;
269 	return NULL;
270 }
271 
272 /* Called with kretprobe_lock held */
273 static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
274 							      *rp)
275 {
276 	struct hlist_node *node;
277 	struct kretprobe_instance *ri;
278 	hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
279 		return ri;
280 	return NULL;
281 }
282 
283 /* Called with kretprobe_lock held */
284 void __kprobes add_rp_inst(struct kretprobe_instance *ri)
285 {
286 	/*
287 	 * Remove rp inst off the free list -
288 	 * Add it back when probed function returns
289 	 */
290 	hlist_del(&ri->uflist);
291 
292 	/* Add rp inst onto table */
293 	INIT_HLIST_NODE(&ri->hlist);
294 	hlist_add_head(&ri->hlist,
295 			&kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]);
296 
297 	/* Also add this rp inst to the used list. */
298 	INIT_HLIST_NODE(&ri->uflist);
299 	hlist_add_head(&ri->uflist, &ri->rp->used_instances);
300 }
301 
302 /* Called with kretprobe_lock held */
303 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri)
304 {
305 	/* remove rp inst off the rprobe_inst_table */
306 	hlist_del(&ri->hlist);
307 	if (ri->rp) {
308 		/* remove rp inst off the used list */
309 		hlist_del(&ri->uflist);
310 		/* put rp inst back onto the free list */
311 		INIT_HLIST_NODE(&ri->uflist);
312 		hlist_add_head(&ri->uflist, &ri->rp->free_instances);
313 	} else
314 		/* Unregistering */
315 		kfree(ri);
316 }
317 
318 struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
319 {
320 	return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
321 }
322 
323 /*
324  * This function is called from exit_thread or flush_thread when task tk's
325  * stack is being recycled so that we can recycle any function-return probe
326  * instances associated with this task. These left over instances represent
327  * probed functions that have been called but will never return.
328  */
329 void __kprobes kprobe_flush_task(struct task_struct *tk)
330 {
331         struct kretprobe_instance *ri;
332         struct hlist_head *head;
333 	struct hlist_node *node, *tmp;
334 	unsigned long flags = 0;
335 
336 	spin_lock_irqsave(&kretprobe_lock, flags);
337         head = kretprobe_inst_table_head(current);
338         hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
339                 if (ri->task == tk)
340                         recycle_rp_inst(ri);
341         }
342 	spin_unlock_irqrestore(&kretprobe_lock, flags);
343 }
344 
345 /*
346  * This kprobe pre_handler is registered with every kretprobe. When probe
347  * hits it will set up the return probe.
348  */
349 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
350 					   struct pt_regs *regs)
351 {
352 	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
353 	unsigned long flags = 0;
354 
355 	/*TODO: consider to only swap the RA after the last pre_handler fired */
356 	spin_lock_irqsave(&kretprobe_lock, flags);
357 	arch_prepare_kretprobe(rp, regs);
358 	spin_unlock_irqrestore(&kretprobe_lock, flags);
359 	return 0;
360 }
361 
362 static inline void free_rp_inst(struct kretprobe *rp)
363 {
364 	struct kretprobe_instance *ri;
365 	while ((ri = get_free_rp_inst(rp)) != NULL) {
366 		hlist_del(&ri->uflist);
367 		kfree(ri);
368 	}
369 }
370 
371 /*
372  * Keep all fields in the kprobe consistent
373  */
374 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
375 {
376 	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
377 	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
378 }
379 
380 /*
381 * Add the new probe to old_p->list. Fail if this is the
382 * second jprobe at the address - two jprobes can't coexist
383 */
384 static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
385 {
386         struct kprobe *kp;
387 
388 	if (p->break_handler) {
389 		list_for_each_entry_rcu(kp, &old_p->list, list) {
390 			if (kp->break_handler)
391 				return -EEXIST;
392 		}
393 		list_add_tail_rcu(&p->list, &old_p->list);
394 	} else
395 		list_add_rcu(&p->list, &old_p->list);
396 	return 0;
397 }
398 
399 /*
400  * Fill in the required fields of the "manager kprobe". Replace the
401  * earlier kprobe in the hlist with the manager kprobe
402  */
403 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
404 {
405 	copy_kprobe(p, ap);
406 	ap->addr = p->addr;
407 	ap->pre_handler = aggr_pre_handler;
408 	ap->post_handler = aggr_post_handler;
409 	ap->fault_handler = aggr_fault_handler;
410 	ap->break_handler = aggr_break_handler;
411 
412 	INIT_LIST_HEAD(&ap->list);
413 	list_add_rcu(&p->list, &ap->list);
414 
415 	hlist_replace_rcu(&p->hlist, &ap->hlist);
416 }
417 
418 /*
419  * This is the second or subsequent kprobe at the address - handle
420  * the intricacies
421  * TODO: Move kcalloc outside the spin_lock
422  */
423 static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
424 					  struct kprobe *p)
425 {
426 	int ret = 0;
427 	struct kprobe *ap;
428 
429 	if (old_p->pre_handler == aggr_pre_handler) {
430 		copy_kprobe(old_p, p);
431 		ret = add_new_kprobe(old_p, p);
432 	} else {
433 		ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC);
434 		if (!ap)
435 			return -ENOMEM;
436 		add_aggr_kprobe(ap, old_p);
437 		copy_kprobe(ap, p);
438 		ret = add_new_kprobe(ap, p);
439 	}
440 	return ret;
441 }
442 
443 /* kprobe removal house-keeping routines */
444 static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
445 {
446 	arch_disarm_kprobe(p);
447 	hlist_del_rcu(&p->hlist);
448 	spin_unlock_irqrestore(&kprobe_lock, flags);
449 	arch_remove_kprobe(p);
450 }
451 
452 static inline void cleanup_aggr_kprobe(struct kprobe *old_p,
453 		struct kprobe *p, unsigned long flags)
454 {
455 	list_del_rcu(&p->list);
456 	if (list_empty(&old_p->list))
457 		cleanup_kprobe(old_p, flags);
458 	else
459 		spin_unlock_irqrestore(&kprobe_lock, flags);
460 }
461 
462 static int __kprobes in_kprobes_functions(unsigned long addr)
463 {
464 	if (addr >= (unsigned long)__kprobes_text_start
465 		&& addr < (unsigned long)__kprobes_text_end)
466 		return -EINVAL;
467 	return 0;
468 }
469 
470 int __kprobes register_kprobe(struct kprobe *p)
471 {
472 	int ret = 0;
473 	unsigned long flags = 0;
474 	struct kprobe *old_p;
475 	struct module *mod;
476 
477 	if ((!kernel_text_address((unsigned long) p->addr)) ||
478 		in_kprobes_functions((unsigned long) p->addr))
479 		return -EINVAL;
480 
481 	if ((mod = module_text_address((unsigned long) p->addr)) &&
482 			(unlikely(!try_module_get(mod))))
483 		return -EINVAL;
484 
485 	if ((ret = arch_prepare_kprobe(p)) != 0)
486 		goto rm_kprobe;
487 
488 	p->nmissed = 0;
489 	spin_lock_irqsave(&kprobe_lock, flags);
490 	old_p = get_kprobe(p->addr);
491 	if (old_p) {
492 		ret = register_aggr_kprobe(old_p, p);
493 		goto out;
494 	}
495 
496 	arch_copy_kprobe(p);
497 	INIT_HLIST_NODE(&p->hlist);
498 	hlist_add_head_rcu(&p->hlist,
499 		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
500 
501   	arch_arm_kprobe(p);
502 
503 out:
504 	spin_unlock_irqrestore(&kprobe_lock, flags);
505 rm_kprobe:
506 	if (ret == -EEXIST)
507 		arch_remove_kprobe(p);
508 	if (ret && mod)
509 		module_put(mod);
510 	return ret;
511 }
512 
513 void __kprobes unregister_kprobe(struct kprobe *p)
514 {
515 	unsigned long flags;
516 	struct kprobe *old_p;
517 	struct module *mod;
518 
519 	spin_lock_irqsave(&kprobe_lock, flags);
520 	old_p = get_kprobe(p->addr);
521 	if (old_p) {
522 		/* cleanup_*_kprobe() does the spin_unlock_irqrestore */
523 		if (old_p->pre_handler == aggr_pre_handler)
524 			cleanup_aggr_kprobe(old_p, p, flags);
525 		else
526 			cleanup_kprobe(p, flags);
527 
528 		synchronize_sched();
529 
530 		if ((mod = module_text_address((unsigned long)p->addr)))
531 			module_put(mod);
532 
533 		if (old_p->pre_handler == aggr_pre_handler &&
534 				list_empty(&old_p->list))
535 			kfree(old_p);
536 	} else
537 		spin_unlock_irqrestore(&kprobe_lock, flags);
538 }
539 
540 static struct notifier_block kprobe_exceptions_nb = {
541 	.notifier_call = kprobe_exceptions_notify,
542 	.priority = 0x7fffffff /* we need to notified first */
543 };
544 
545 int __kprobes register_jprobe(struct jprobe *jp)
546 {
547 	/* Todo: Verify probepoint is a function entry point */
548 	jp->kp.pre_handler = setjmp_pre_handler;
549 	jp->kp.break_handler = longjmp_break_handler;
550 
551 	return register_kprobe(&jp->kp);
552 }
553 
554 void __kprobes unregister_jprobe(struct jprobe *jp)
555 {
556 	unregister_kprobe(&jp->kp);
557 }
558 
559 #ifdef ARCH_SUPPORTS_KRETPROBES
560 
561 int __kprobes register_kretprobe(struct kretprobe *rp)
562 {
563 	int ret = 0;
564 	struct kretprobe_instance *inst;
565 	int i;
566 
567 	rp->kp.pre_handler = pre_handler_kretprobe;
568 
569 	/* Pre-allocate memory for max kretprobe instances */
570 	if (rp->maxactive <= 0) {
571 #ifdef CONFIG_PREEMPT
572 		rp->maxactive = max(10, 2 * NR_CPUS);
573 #else
574 		rp->maxactive = NR_CPUS;
575 #endif
576 	}
577 	INIT_HLIST_HEAD(&rp->used_instances);
578 	INIT_HLIST_HEAD(&rp->free_instances);
579 	for (i = 0; i < rp->maxactive; i++) {
580 		inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
581 		if (inst == NULL) {
582 			free_rp_inst(rp);
583 			return -ENOMEM;
584 		}
585 		INIT_HLIST_NODE(&inst->uflist);
586 		hlist_add_head(&inst->uflist, &rp->free_instances);
587 	}
588 
589 	rp->nmissed = 0;
590 	/* Establish function entry probe point */
591 	if ((ret = register_kprobe(&rp->kp)) != 0)
592 		free_rp_inst(rp);
593 	return ret;
594 }
595 
596 #else /* ARCH_SUPPORTS_KRETPROBES */
597 
598 int __kprobes register_kretprobe(struct kretprobe *rp)
599 {
600 	return -ENOSYS;
601 }
602 
603 #endif /* ARCH_SUPPORTS_KRETPROBES */
604 
605 void __kprobes unregister_kretprobe(struct kretprobe *rp)
606 {
607 	unsigned long flags;
608 	struct kretprobe_instance *ri;
609 
610 	unregister_kprobe(&rp->kp);
611 	/* No race here */
612 	spin_lock_irqsave(&kretprobe_lock, flags);
613 	free_rp_inst(rp);
614 	while ((ri = get_used_rp_inst(rp)) != NULL) {
615 		ri->rp = NULL;
616 		hlist_del(&ri->uflist);
617 	}
618 	spin_unlock_irqrestore(&kretprobe_lock, flags);
619 }
620 
621 static int __init init_kprobes(void)
622 {
623 	int i, err = 0;
624 
625 	/* FIXME allocate the probe table, currently defined statically */
626 	/* initialize all list heads */
627 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
628 		INIT_HLIST_HEAD(&kprobe_table[i]);
629 		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
630 	}
631 
632 	err = arch_init_kprobes();
633 	if (!err)
634 		err = register_die_notifier(&kprobe_exceptions_nb);
635 
636 	return err;
637 }
638 
639 __initcall(init_kprobes);
640 
641 EXPORT_SYMBOL_GPL(register_kprobe);
642 EXPORT_SYMBOL_GPL(unregister_kprobe);
643 EXPORT_SYMBOL_GPL(register_jprobe);
644 EXPORT_SYMBOL_GPL(unregister_jprobe);
645 EXPORT_SYMBOL_GPL(jprobe_return);
646 EXPORT_SYMBOL_GPL(register_kretprobe);
647 EXPORT_SYMBOL_GPL(unregister_kretprobe);
648 
649