xref: /linux/kernel/kprobes.c (revision b0148a98ec5151fec82064d95f11eb9efbc628ea)
1 /*
2  *  Kernel Probes (KProbes)
3  *  kernel/kprobes.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) IBM Corporation, 2002, 2004
20  *
21  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22  *		Probes initial implementation (includes suggestions from
23  *		Rusty Russell).
24  * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25  *		hlists and exceptions notifier as suggested by Andi Kleen.
26  * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27  *		interface to access function arguments.
28  * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29  *		exceptions notifier to be first on the priority list.
30  * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32  *		<prasanna@in.ibm.com> added function-return probes.
33  */
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/module.h>
39 #include <linux/moduleloader.h>
40 #include <linux/kallsyms.h>
41 #include <linux/freezer.h>
42 #include <asm-generic/sections.h>
43 #include <asm/cacheflush.h>
44 #include <asm/errno.h>
45 #include <asm/kdebug.h>
46 
47 #define KPROBE_HASH_BITS 6
48 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
49 
50 
51 /*
52  * Some oddball architectures like 64bit powerpc have function descriptors
53  * so this must be overridable.
54  */
55 #ifndef kprobe_lookup_name
56 #define kprobe_lookup_name(name, addr) \
57 	addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
58 #endif
59 
60 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
61 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
62 static atomic_t kprobe_count;
63 
64 DEFINE_MUTEX(kprobe_mutex);		/* Protects kprobe_table */
65 DEFINE_SPINLOCK(kretprobe_lock);	/* Protects kretprobe_inst_table */
66 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
67 
68 static struct notifier_block kprobe_page_fault_nb = {
69 	.notifier_call = kprobe_exceptions_notify,
70 	.priority = 0x7fffffff /* we need to notified first */
71 };
72 
73 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
74 /*
75  * kprobe->ainsn.insn points to the copy of the instruction to be
76  * single-stepped. x86_64, POWER4 and above have no-exec support and
77  * stepping on the instruction on a vmalloced/kmalloced/data page
78  * is a recipe for disaster
79  */
80 #define INSNS_PER_PAGE	(PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
81 
82 struct kprobe_insn_page {
83 	struct hlist_node hlist;
84 	kprobe_opcode_t *insns;		/* Page of instruction slots */
85 	char slot_used[INSNS_PER_PAGE];
86 	int nused;
87 	int ngarbage;
88 };
89 
90 enum kprobe_slot_state {
91 	SLOT_CLEAN = 0,
92 	SLOT_DIRTY = 1,
93 	SLOT_USED = 2,
94 };
95 
96 static struct hlist_head kprobe_insn_pages;
97 static int kprobe_garbage_slots;
98 static int collect_garbage_slots(void);
99 
100 static int __kprobes check_safety(void)
101 {
102 	int ret = 0;
103 #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
104 	ret = freeze_processes();
105 	if (ret == 0) {
106 		struct task_struct *p, *q;
107 		do_each_thread(p, q) {
108 			if (p != current && p->state == TASK_RUNNING &&
109 			    p->pid != 0) {
110 				printk("Check failed: %s is running\n",p->comm);
111 				ret = -1;
112 				goto loop_end;
113 			}
114 		} while_each_thread(p, q);
115 	}
116 loop_end:
117 	thaw_processes();
118 #else
119 	synchronize_sched();
120 #endif
121 	return ret;
122 }
123 
124 /**
125  * get_insn_slot() - Find a slot on an executable page for an instruction.
126  * We allocate an executable page if there's no room on existing ones.
127  */
128 kprobe_opcode_t __kprobes *get_insn_slot(void)
129 {
130 	struct kprobe_insn_page *kip;
131 	struct hlist_node *pos;
132 
133       retry:
134 	hlist_for_each(pos, &kprobe_insn_pages) {
135 		kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
136 		if (kip->nused < INSNS_PER_PAGE) {
137 			int i;
138 			for (i = 0; i < INSNS_PER_PAGE; i++) {
139 				if (kip->slot_used[i] == SLOT_CLEAN) {
140 					kip->slot_used[i] = SLOT_USED;
141 					kip->nused++;
142 					return kip->insns + (i * MAX_INSN_SIZE);
143 				}
144 			}
145 			/* Surprise!  No unused slots.  Fix kip->nused. */
146 			kip->nused = INSNS_PER_PAGE;
147 		}
148 	}
149 
150 	/* If there are any garbage slots, collect it and try again. */
151 	if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
152 		goto retry;
153 	}
154 	/* All out of space.  Need to allocate a new page. Use slot 0. */
155 	kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
156 	if (!kip) {
157 		return NULL;
158 	}
159 
160 	/*
161 	 * Use module_alloc so this page is within +/- 2GB of where the
162 	 * kernel image and loaded module images reside. This is required
163 	 * so x86_64 can correctly handle the %rip-relative fixups.
164 	 */
165 	kip->insns = module_alloc(PAGE_SIZE);
166 	if (!kip->insns) {
167 		kfree(kip);
168 		return NULL;
169 	}
170 	INIT_HLIST_NODE(&kip->hlist);
171 	hlist_add_head(&kip->hlist, &kprobe_insn_pages);
172 	memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
173 	kip->slot_used[0] = SLOT_USED;
174 	kip->nused = 1;
175 	kip->ngarbage = 0;
176 	return kip->insns;
177 }
178 
179 /* Return 1 if all garbages are collected, otherwise 0. */
180 static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
181 {
182 	kip->slot_used[idx] = SLOT_CLEAN;
183 	kip->nused--;
184 	if (kip->nused == 0) {
185 		/*
186 		 * Page is no longer in use.  Free it unless
187 		 * it's the last one.  We keep the last one
188 		 * so as not to have to set it up again the
189 		 * next time somebody inserts a probe.
190 		 */
191 		hlist_del(&kip->hlist);
192 		if (hlist_empty(&kprobe_insn_pages)) {
193 			INIT_HLIST_NODE(&kip->hlist);
194 			hlist_add_head(&kip->hlist,
195 				       &kprobe_insn_pages);
196 		} else {
197 			module_free(NULL, kip->insns);
198 			kfree(kip);
199 		}
200 		return 1;
201 	}
202 	return 0;
203 }
204 
205 static int __kprobes collect_garbage_slots(void)
206 {
207 	struct kprobe_insn_page *kip;
208 	struct hlist_node *pos, *next;
209 
210 	/* Ensure no-one is preepmted on the garbages */
211 	if (check_safety() != 0)
212 		return -EAGAIN;
213 
214 	hlist_for_each_safe(pos, next, &kprobe_insn_pages) {
215 		int i;
216 		kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
217 		if (kip->ngarbage == 0)
218 			continue;
219 		kip->ngarbage = 0;	/* we will collect all garbages */
220 		for (i = 0; i < INSNS_PER_PAGE; i++) {
221 			if (kip->slot_used[i] == SLOT_DIRTY &&
222 			    collect_one_slot(kip, i))
223 				break;
224 		}
225 	}
226 	kprobe_garbage_slots = 0;
227 	return 0;
228 }
229 
230 void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
231 {
232 	struct kprobe_insn_page *kip;
233 	struct hlist_node *pos;
234 
235 	hlist_for_each(pos, &kprobe_insn_pages) {
236 		kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
237 		if (kip->insns <= slot &&
238 		    slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
239 			int i = (slot - kip->insns) / MAX_INSN_SIZE;
240 			if (dirty) {
241 				kip->slot_used[i] = SLOT_DIRTY;
242 				kip->ngarbage++;
243 			} else {
244 				collect_one_slot(kip, i);
245 			}
246 			break;
247 		}
248 	}
249 	if (dirty && (++kprobe_garbage_slots > INSNS_PER_PAGE)) {
250 		collect_garbage_slots();
251 	}
252 }
253 #endif
254 
255 /* We have preemption disabled.. so it is safe to use __ versions */
256 static inline void set_kprobe_instance(struct kprobe *kp)
257 {
258 	__get_cpu_var(kprobe_instance) = kp;
259 }
260 
261 static inline void reset_kprobe_instance(void)
262 {
263 	__get_cpu_var(kprobe_instance) = NULL;
264 }
265 
266 /*
267  * This routine is called either:
268  * 	- under the kprobe_mutex - during kprobe_[un]register()
269  * 				OR
270  * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
271  */
272 struct kprobe __kprobes *get_kprobe(void *addr)
273 {
274 	struct hlist_head *head;
275 	struct hlist_node *node;
276 	struct kprobe *p;
277 
278 	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
279 	hlist_for_each_entry_rcu(p, node, head, hlist) {
280 		if (p->addr == addr)
281 			return p;
282 	}
283 	return NULL;
284 }
285 
286 /*
287  * Aggregate handlers for multiple kprobes support - these handlers
288  * take care of invoking the individual kprobe handlers on p->list
289  */
290 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
291 {
292 	struct kprobe *kp;
293 
294 	list_for_each_entry_rcu(kp, &p->list, list) {
295 		if (kp->pre_handler) {
296 			set_kprobe_instance(kp);
297 			if (kp->pre_handler(kp, regs))
298 				return 1;
299 		}
300 		reset_kprobe_instance();
301 	}
302 	return 0;
303 }
304 
305 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
306 					unsigned long flags)
307 {
308 	struct kprobe *kp;
309 
310 	list_for_each_entry_rcu(kp, &p->list, list) {
311 		if (kp->post_handler) {
312 			set_kprobe_instance(kp);
313 			kp->post_handler(kp, regs, flags);
314 			reset_kprobe_instance();
315 		}
316 	}
317 	return;
318 }
319 
320 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
321 					int trapnr)
322 {
323 	struct kprobe *cur = __get_cpu_var(kprobe_instance);
324 
325 	/*
326 	 * if we faulted "during" the execution of a user specified
327 	 * probe handler, invoke just that probe's fault handler
328 	 */
329 	if (cur && cur->fault_handler) {
330 		if (cur->fault_handler(cur, regs, trapnr))
331 			return 1;
332 	}
333 	return 0;
334 }
335 
336 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
337 {
338 	struct kprobe *cur = __get_cpu_var(kprobe_instance);
339 	int ret = 0;
340 
341 	if (cur && cur->break_handler) {
342 		if (cur->break_handler(cur, regs))
343 			ret = 1;
344 	}
345 	reset_kprobe_instance();
346 	return ret;
347 }
348 
349 /* Walks the list and increments nmissed count for multiprobe case */
350 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
351 {
352 	struct kprobe *kp;
353 	if (p->pre_handler != aggr_pre_handler) {
354 		p->nmissed++;
355 	} else {
356 		list_for_each_entry_rcu(kp, &p->list, list)
357 			kp->nmissed++;
358 	}
359 	return;
360 }
361 
362 /* Called with kretprobe_lock held */
363 struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
364 {
365 	struct hlist_node *node;
366 	struct kretprobe_instance *ri;
367 	hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
368 		return ri;
369 	return NULL;
370 }
371 
372 /* Called with kretprobe_lock held */
373 static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
374 							      *rp)
375 {
376 	struct hlist_node *node;
377 	struct kretprobe_instance *ri;
378 	hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
379 		return ri;
380 	return NULL;
381 }
382 
383 /* Called with kretprobe_lock held */
384 void __kprobes add_rp_inst(struct kretprobe_instance *ri)
385 {
386 	/*
387 	 * Remove rp inst off the free list -
388 	 * Add it back when probed function returns
389 	 */
390 	hlist_del(&ri->uflist);
391 
392 	/* Add rp inst onto table */
393 	INIT_HLIST_NODE(&ri->hlist);
394 	hlist_add_head(&ri->hlist,
395 			&kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]);
396 
397 	/* Also add this rp inst to the used list. */
398 	INIT_HLIST_NODE(&ri->uflist);
399 	hlist_add_head(&ri->uflist, &ri->rp->used_instances);
400 }
401 
402 /* Called with kretprobe_lock held */
403 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
404 				struct hlist_head *head)
405 {
406 	/* remove rp inst off the rprobe_inst_table */
407 	hlist_del(&ri->hlist);
408 	if (ri->rp) {
409 		/* remove rp inst off the used list */
410 		hlist_del(&ri->uflist);
411 		/* put rp inst back onto the free list */
412 		INIT_HLIST_NODE(&ri->uflist);
413 		hlist_add_head(&ri->uflist, &ri->rp->free_instances);
414 	} else
415 		/* Unregistering */
416 		hlist_add_head(&ri->hlist, head);
417 }
418 
419 struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
420 {
421 	return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
422 }
423 
424 /*
425  * This function is called from finish_task_switch when task tk becomes dead,
426  * so that we can recycle any function-return probe instances associated
427  * with this task. These left over instances represent probed functions
428  * that have been called but will never return.
429  */
430 void __kprobes kprobe_flush_task(struct task_struct *tk)
431 {
432 	struct kretprobe_instance *ri;
433 	struct hlist_head *head, empty_rp;
434 	struct hlist_node *node, *tmp;
435 	unsigned long flags = 0;
436 
437 	INIT_HLIST_HEAD(&empty_rp);
438 	spin_lock_irqsave(&kretprobe_lock, flags);
439 	head = kretprobe_inst_table_head(tk);
440 	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
441 		if (ri->task == tk)
442 			recycle_rp_inst(ri, &empty_rp);
443 	}
444 	spin_unlock_irqrestore(&kretprobe_lock, flags);
445 
446 	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
447 		hlist_del(&ri->hlist);
448 		kfree(ri);
449 	}
450 }
451 
452 static inline void free_rp_inst(struct kretprobe *rp)
453 {
454 	struct kretprobe_instance *ri;
455 	while ((ri = get_free_rp_inst(rp)) != NULL) {
456 		hlist_del(&ri->uflist);
457 		kfree(ri);
458 	}
459 }
460 
461 /*
462  * Keep all fields in the kprobe consistent
463  */
464 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
465 {
466 	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
467 	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
468 }
469 
470 /*
471 * Add the new probe to old_p->list. Fail if this is the
472 * second jprobe at the address - two jprobes can't coexist
473 */
474 static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
475 {
476 	if (p->break_handler) {
477 		if (old_p->break_handler)
478 			return -EEXIST;
479 		list_add_tail_rcu(&p->list, &old_p->list);
480 		old_p->break_handler = aggr_break_handler;
481 	} else
482 		list_add_rcu(&p->list, &old_p->list);
483 	if (p->post_handler && !old_p->post_handler)
484 		old_p->post_handler = aggr_post_handler;
485 	return 0;
486 }
487 
488 /*
489  * Fill in the required fields of the "manager kprobe". Replace the
490  * earlier kprobe in the hlist with the manager kprobe
491  */
492 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
493 {
494 	copy_kprobe(p, ap);
495 	flush_insn_slot(ap);
496 	ap->addr = p->addr;
497 	ap->pre_handler = aggr_pre_handler;
498 	ap->fault_handler = aggr_fault_handler;
499 	if (p->post_handler)
500 		ap->post_handler = aggr_post_handler;
501 	if (p->break_handler)
502 		ap->break_handler = aggr_break_handler;
503 
504 	INIT_LIST_HEAD(&ap->list);
505 	list_add_rcu(&p->list, &ap->list);
506 
507 	hlist_replace_rcu(&p->hlist, &ap->hlist);
508 }
509 
510 /*
511  * This is the second or subsequent kprobe at the address - handle
512  * the intricacies
513  */
514 static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
515 					  struct kprobe *p)
516 {
517 	int ret = 0;
518 	struct kprobe *ap;
519 
520 	if (old_p->pre_handler == aggr_pre_handler) {
521 		copy_kprobe(old_p, p);
522 		ret = add_new_kprobe(old_p, p);
523 	} else {
524 		ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
525 		if (!ap)
526 			return -ENOMEM;
527 		add_aggr_kprobe(ap, old_p);
528 		copy_kprobe(ap, p);
529 		ret = add_new_kprobe(ap, p);
530 	}
531 	return ret;
532 }
533 
534 static int __kprobes in_kprobes_functions(unsigned long addr)
535 {
536 	if (addr >= (unsigned long)__kprobes_text_start
537 		&& addr < (unsigned long)__kprobes_text_end)
538 		return -EINVAL;
539 	return 0;
540 }
541 
542 static int __kprobes __register_kprobe(struct kprobe *p,
543 	unsigned long called_from)
544 {
545 	int ret = 0;
546 	struct kprobe *old_p;
547 	struct module *probed_mod;
548 
549 	/*
550 	 * If we have a symbol_name argument look it up,
551 	 * and add it to the address.  That way the addr
552 	 * field can either be global or relative to a symbol.
553 	 */
554 	if (p->symbol_name) {
555 		if (p->addr)
556 			return -EINVAL;
557 		kprobe_lookup_name(p->symbol_name, p->addr);
558 	}
559 
560 	if (!p->addr)
561 		return -EINVAL;
562 	p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);
563 
564 	if ((!kernel_text_address((unsigned long) p->addr)) ||
565 		in_kprobes_functions((unsigned long) p->addr))
566 		return -EINVAL;
567 
568 	p->mod_refcounted = 0;
569 	/* Check are we probing a module */
570 	if ((probed_mod = module_text_address((unsigned long) p->addr))) {
571 		struct module *calling_mod = module_text_address(called_from);
572 		/* We must allow modules to probe themself and
573 		 * in this case avoid incrementing the module refcount,
574 		 * so as to allow unloading of self probing modules.
575 		 */
576 		if (calling_mod && (calling_mod != probed_mod)) {
577 			if (unlikely(!try_module_get(probed_mod)))
578 				return -EINVAL;
579 			p->mod_refcounted = 1;
580 		} else
581 			probed_mod = NULL;
582 	}
583 
584 	p->nmissed = 0;
585 	mutex_lock(&kprobe_mutex);
586 	old_p = get_kprobe(p->addr);
587 	if (old_p) {
588 		ret = register_aggr_kprobe(old_p, p);
589 		if (!ret)
590 			atomic_inc(&kprobe_count);
591 		goto out;
592 	}
593 
594 	if ((ret = arch_prepare_kprobe(p)) != 0)
595 		goto out;
596 
597 	INIT_HLIST_NODE(&p->hlist);
598 	hlist_add_head_rcu(&p->hlist,
599 		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
600 
601 	if (atomic_add_return(1, &kprobe_count) == \
602 				(ARCH_INACTIVE_KPROBE_COUNT + 1))
603 		register_page_fault_notifier(&kprobe_page_fault_nb);
604 
605 	arch_arm_kprobe(p);
606 
607 out:
608 	mutex_unlock(&kprobe_mutex);
609 
610 	if (ret && probed_mod)
611 		module_put(probed_mod);
612 	return ret;
613 }
614 
615 int __kprobes register_kprobe(struct kprobe *p)
616 {
617 	return __register_kprobe(p,
618 		(unsigned long)__builtin_return_address(0));
619 }
620 
621 void __kprobes unregister_kprobe(struct kprobe *p)
622 {
623 	struct module *mod;
624 	struct kprobe *old_p, *list_p;
625 	int cleanup_p;
626 
627 	mutex_lock(&kprobe_mutex);
628 	old_p = get_kprobe(p->addr);
629 	if (unlikely(!old_p)) {
630 		mutex_unlock(&kprobe_mutex);
631 		return;
632 	}
633 	if (p != old_p) {
634 		list_for_each_entry_rcu(list_p, &old_p->list, list)
635 			if (list_p == p)
636 			/* kprobe p is a valid probe */
637 				goto valid_p;
638 		mutex_unlock(&kprobe_mutex);
639 		return;
640 	}
641 valid_p:
642 	if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
643 		(p->list.next == &old_p->list) &&
644 		(p->list.prev == &old_p->list))) {
645 		/* Only probe on the hash list */
646 		arch_disarm_kprobe(p);
647 		hlist_del_rcu(&old_p->hlist);
648 		cleanup_p = 1;
649 	} else {
650 		list_del_rcu(&p->list);
651 		cleanup_p = 0;
652 	}
653 
654 	mutex_unlock(&kprobe_mutex);
655 
656 	synchronize_sched();
657 	if (p->mod_refcounted &&
658 	    (mod = module_text_address((unsigned long)p->addr)))
659 		module_put(mod);
660 
661 	if (cleanup_p) {
662 		if (p != old_p) {
663 			list_del_rcu(&p->list);
664 			kfree(old_p);
665 		}
666 		arch_remove_kprobe(p);
667 	} else {
668 		mutex_lock(&kprobe_mutex);
669 		if (p->break_handler)
670 			old_p->break_handler = NULL;
671 		if (p->post_handler){
672 			list_for_each_entry_rcu(list_p, &old_p->list, list){
673 				if (list_p->post_handler){
674 					cleanup_p = 2;
675 					break;
676 				}
677 			}
678 			if (cleanup_p == 0)
679 				old_p->post_handler = NULL;
680 		}
681 		mutex_unlock(&kprobe_mutex);
682 	}
683 
684 	/* Call unregister_page_fault_notifier()
685 	 * if no probes are active
686 	 */
687 	mutex_lock(&kprobe_mutex);
688 	if (atomic_add_return(-1, &kprobe_count) == \
689 				ARCH_INACTIVE_KPROBE_COUNT)
690 		unregister_page_fault_notifier(&kprobe_page_fault_nb);
691 	mutex_unlock(&kprobe_mutex);
692 	return;
693 }
694 
695 static struct notifier_block kprobe_exceptions_nb = {
696 	.notifier_call = kprobe_exceptions_notify,
697 	.priority = 0x7fffffff /* we need to be notified first */
698 };
699 
700 
701 int __kprobes register_jprobe(struct jprobe *jp)
702 {
703 	/* Todo: Verify probepoint is a function entry point */
704 	jp->kp.pre_handler = setjmp_pre_handler;
705 	jp->kp.break_handler = longjmp_break_handler;
706 
707 	return __register_kprobe(&jp->kp,
708 		(unsigned long)__builtin_return_address(0));
709 }
710 
711 void __kprobes unregister_jprobe(struct jprobe *jp)
712 {
713 	unregister_kprobe(&jp->kp);
714 }
715 
716 #ifdef ARCH_SUPPORTS_KRETPROBES
717 
718 /*
719  * This kprobe pre_handler is registered with every kretprobe. When probe
720  * hits it will set up the return probe.
721  */
722 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
723 					   struct pt_regs *regs)
724 {
725 	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
726 	unsigned long flags = 0;
727 
728 	/*TODO: consider to only swap the RA after the last pre_handler fired */
729 	spin_lock_irqsave(&kretprobe_lock, flags);
730 	arch_prepare_kretprobe(rp, regs);
731 	spin_unlock_irqrestore(&kretprobe_lock, flags);
732 	return 0;
733 }
734 
735 int __kprobes register_kretprobe(struct kretprobe *rp)
736 {
737 	int ret = 0;
738 	struct kretprobe_instance *inst;
739 	int i;
740 
741 	rp->kp.pre_handler = pre_handler_kretprobe;
742 	rp->kp.post_handler = NULL;
743 	rp->kp.fault_handler = NULL;
744 	rp->kp.break_handler = NULL;
745 
746 	/* Pre-allocate memory for max kretprobe instances */
747 	if (rp->maxactive <= 0) {
748 #ifdef CONFIG_PREEMPT
749 		rp->maxactive = max(10, 2 * NR_CPUS);
750 #else
751 		rp->maxactive = NR_CPUS;
752 #endif
753 	}
754 	INIT_HLIST_HEAD(&rp->used_instances);
755 	INIT_HLIST_HEAD(&rp->free_instances);
756 	for (i = 0; i < rp->maxactive; i++) {
757 		inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
758 		if (inst == NULL) {
759 			free_rp_inst(rp);
760 			return -ENOMEM;
761 		}
762 		INIT_HLIST_NODE(&inst->uflist);
763 		hlist_add_head(&inst->uflist, &rp->free_instances);
764 	}
765 
766 	rp->nmissed = 0;
767 	/* Establish function entry probe point */
768 	if ((ret = __register_kprobe(&rp->kp,
769 		(unsigned long)__builtin_return_address(0))) != 0)
770 		free_rp_inst(rp);
771 	return ret;
772 }
773 
774 #else /* ARCH_SUPPORTS_KRETPROBES */
775 
776 int __kprobes register_kretprobe(struct kretprobe *rp)
777 {
778 	return -ENOSYS;
779 }
780 
781 #endif /* ARCH_SUPPORTS_KRETPROBES */
782 
783 void __kprobes unregister_kretprobe(struct kretprobe *rp)
784 {
785 	unsigned long flags;
786 	struct kretprobe_instance *ri;
787 
788 	unregister_kprobe(&rp->kp);
789 	/* No race here */
790 	spin_lock_irqsave(&kretprobe_lock, flags);
791 	while ((ri = get_used_rp_inst(rp)) != NULL) {
792 		ri->rp = NULL;
793 		hlist_del(&ri->uflist);
794 	}
795 	spin_unlock_irqrestore(&kretprobe_lock, flags);
796 	free_rp_inst(rp);
797 }
798 
799 static int __init init_kprobes(void)
800 {
801 	int i, err = 0;
802 
803 	/* FIXME allocate the probe table, currently defined statically */
804 	/* initialize all list heads */
805 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
806 		INIT_HLIST_HEAD(&kprobe_table[i]);
807 		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
808 	}
809 	atomic_set(&kprobe_count, 0);
810 
811 	err = arch_init_kprobes();
812 	if (!err)
813 		err = register_die_notifier(&kprobe_exceptions_nb);
814 
815 	return err;
816 }
817 
818 __initcall(init_kprobes);
819 
820 EXPORT_SYMBOL_GPL(register_kprobe);
821 EXPORT_SYMBOL_GPL(unregister_kprobe);
822 EXPORT_SYMBOL_GPL(register_jprobe);
823 EXPORT_SYMBOL_GPL(unregister_jprobe);
824 EXPORT_SYMBOL_GPL(jprobe_return);
825 EXPORT_SYMBOL_GPL(register_kretprobe);
826 EXPORT_SYMBOL_GPL(unregister_kretprobe);
827 
828