xref: /linux/kernel/kprobes.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  *  Kernel Probes (KProbes)
3  *  kernel/kprobes.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) IBM Corporation, 2002, 2004
20  *
21  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22  *		Probes initial implementation (includes suggestions from
23  *		Rusty Russell).
24  * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25  *		hlists and exceptions notifier as suggested by Andi Kleen.
26  * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27  *		interface to access function arguments.
28  * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29  *		exceptions notifier to be first on the priority list.
30  * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32  *		<prasanna@in.ibm.com> added function-return probes.
33  */
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/module.h>
39 #include <linux/moduleloader.h>
40 #include <asm-generic/sections.h>
41 #include <asm/cacheflush.h>
42 #include <asm/errno.h>
43 #include <asm/kdebug.h>
44 
45 #define KPROBE_HASH_BITS 6
46 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
47 
48 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
49 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
50 static atomic_t kprobe_count;
51 
52 DEFINE_MUTEX(kprobe_mutex);		/* Protects kprobe_table */
53 DEFINE_SPINLOCK(kretprobe_lock);	/* Protects kretprobe_inst_table */
54 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
55 
56 static struct notifier_block kprobe_page_fault_nb = {
57 	.notifier_call = kprobe_exceptions_notify,
58 	.priority = 0x7fffffff /* we need to notified first */
59 };
60 
61 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
62 /*
63  * kprobe->ainsn.insn points to the copy of the instruction to be
64  * single-stepped. x86_64, POWER4 and above have no-exec support and
65  * stepping on the instruction on a vmalloced/kmalloced/data page
66  * is a recipe for disaster
67  */
68 #define INSNS_PER_PAGE	(PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
69 
70 struct kprobe_insn_page {
71 	struct hlist_node hlist;
72 	kprobe_opcode_t *insns;		/* Page of instruction slots */
73 	char slot_used[INSNS_PER_PAGE];
74 	int nused;
75 };
76 
77 static struct hlist_head kprobe_insn_pages;
78 
79 /**
80  * get_insn_slot() - Find a slot on an executable page for an instruction.
81  * We allocate an executable page if there's no room on existing ones.
82  */
83 kprobe_opcode_t __kprobes *get_insn_slot(void)
84 {
85 	struct kprobe_insn_page *kip;
86 	struct hlist_node *pos;
87 
88 	hlist_for_each(pos, &kprobe_insn_pages) {
89 		kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
90 		if (kip->nused < INSNS_PER_PAGE) {
91 			int i;
92 			for (i = 0; i < INSNS_PER_PAGE; i++) {
93 				if (!kip->slot_used[i]) {
94 					kip->slot_used[i] = 1;
95 					kip->nused++;
96 					return kip->insns + (i * MAX_INSN_SIZE);
97 				}
98 			}
99 			/* Surprise!  No unused slots.  Fix kip->nused. */
100 			kip->nused = INSNS_PER_PAGE;
101 		}
102 	}
103 
104 	/* All out of space.  Need to allocate a new page. Use slot 0.*/
105 	kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
106 	if (!kip) {
107 		return NULL;
108 	}
109 
110 	/*
111 	 * Use module_alloc so this page is within +/- 2GB of where the
112 	 * kernel image and loaded module images reside. This is required
113 	 * so x86_64 can correctly handle the %rip-relative fixups.
114 	 */
115 	kip->insns = module_alloc(PAGE_SIZE);
116 	if (!kip->insns) {
117 		kfree(kip);
118 		return NULL;
119 	}
120 	INIT_HLIST_NODE(&kip->hlist);
121 	hlist_add_head(&kip->hlist, &kprobe_insn_pages);
122 	memset(kip->slot_used, 0, INSNS_PER_PAGE);
123 	kip->slot_used[0] = 1;
124 	kip->nused = 1;
125 	return kip->insns;
126 }
127 
128 void __kprobes free_insn_slot(kprobe_opcode_t *slot)
129 {
130 	struct kprobe_insn_page *kip;
131 	struct hlist_node *pos;
132 
133 	hlist_for_each(pos, &kprobe_insn_pages) {
134 		kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
135 		if (kip->insns <= slot &&
136 		    slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
137 			int i = (slot - kip->insns) / MAX_INSN_SIZE;
138 			kip->slot_used[i] = 0;
139 			kip->nused--;
140 			if (kip->nused == 0) {
141 				/*
142 				 * Page is no longer in use.  Free it unless
143 				 * it's the last one.  We keep the last one
144 				 * so as not to have to set it up again the
145 				 * next time somebody inserts a probe.
146 				 */
147 				hlist_del(&kip->hlist);
148 				if (hlist_empty(&kprobe_insn_pages)) {
149 					INIT_HLIST_NODE(&kip->hlist);
150 					hlist_add_head(&kip->hlist,
151 						&kprobe_insn_pages);
152 				} else {
153 					module_free(NULL, kip->insns);
154 					kfree(kip);
155 				}
156 			}
157 			return;
158 		}
159 	}
160 }
161 #endif
162 
163 /* We have preemption disabled.. so it is safe to use __ versions */
164 static inline void set_kprobe_instance(struct kprobe *kp)
165 {
166 	__get_cpu_var(kprobe_instance) = kp;
167 }
168 
169 static inline void reset_kprobe_instance(void)
170 {
171 	__get_cpu_var(kprobe_instance) = NULL;
172 }
173 
174 /*
175  * This routine is called either:
176  * 	- under the kprobe_mutex - during kprobe_[un]register()
177  * 				OR
178  * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
179  */
180 struct kprobe __kprobes *get_kprobe(void *addr)
181 {
182 	struct hlist_head *head;
183 	struct hlist_node *node;
184 	struct kprobe *p;
185 
186 	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
187 	hlist_for_each_entry_rcu(p, node, head, hlist) {
188 		if (p->addr == addr)
189 			return p;
190 	}
191 	return NULL;
192 }
193 
194 /*
195  * Aggregate handlers for multiple kprobes support - these handlers
196  * take care of invoking the individual kprobe handlers on p->list
197  */
198 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
199 {
200 	struct kprobe *kp;
201 
202 	list_for_each_entry_rcu(kp, &p->list, list) {
203 		if (kp->pre_handler) {
204 			set_kprobe_instance(kp);
205 			if (kp->pre_handler(kp, regs))
206 				return 1;
207 		}
208 		reset_kprobe_instance();
209 	}
210 	return 0;
211 }
212 
213 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
214 					unsigned long flags)
215 {
216 	struct kprobe *kp;
217 
218 	list_for_each_entry_rcu(kp, &p->list, list) {
219 		if (kp->post_handler) {
220 			set_kprobe_instance(kp);
221 			kp->post_handler(kp, regs, flags);
222 			reset_kprobe_instance();
223 		}
224 	}
225 	return;
226 }
227 
228 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
229 					int trapnr)
230 {
231 	struct kprobe *cur = __get_cpu_var(kprobe_instance);
232 
233 	/*
234 	 * if we faulted "during" the execution of a user specified
235 	 * probe handler, invoke just that probe's fault handler
236 	 */
237 	if (cur && cur->fault_handler) {
238 		if (cur->fault_handler(cur, regs, trapnr))
239 			return 1;
240 	}
241 	return 0;
242 }
243 
244 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
245 {
246 	struct kprobe *cur = __get_cpu_var(kprobe_instance);
247 	int ret = 0;
248 
249 	if (cur && cur->break_handler) {
250 		if (cur->break_handler(cur, regs))
251 			ret = 1;
252 	}
253 	reset_kprobe_instance();
254 	return ret;
255 }
256 
257 /* Walks the list and increments nmissed count for multiprobe case */
258 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
259 {
260 	struct kprobe *kp;
261 	if (p->pre_handler != aggr_pre_handler) {
262 		p->nmissed++;
263 	} else {
264 		list_for_each_entry_rcu(kp, &p->list, list)
265 			kp->nmissed++;
266 	}
267 	return;
268 }
269 
270 /* Called with kretprobe_lock held */
271 struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
272 {
273 	struct hlist_node *node;
274 	struct kretprobe_instance *ri;
275 	hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
276 		return ri;
277 	return NULL;
278 }
279 
280 /* Called with kretprobe_lock held */
281 static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
282 							      *rp)
283 {
284 	struct hlist_node *node;
285 	struct kretprobe_instance *ri;
286 	hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
287 		return ri;
288 	return NULL;
289 }
290 
291 /* Called with kretprobe_lock held */
292 void __kprobes add_rp_inst(struct kretprobe_instance *ri)
293 {
294 	/*
295 	 * Remove rp inst off the free list -
296 	 * Add it back when probed function returns
297 	 */
298 	hlist_del(&ri->uflist);
299 
300 	/* Add rp inst onto table */
301 	INIT_HLIST_NODE(&ri->hlist);
302 	hlist_add_head(&ri->hlist,
303 			&kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]);
304 
305 	/* Also add this rp inst to the used list. */
306 	INIT_HLIST_NODE(&ri->uflist);
307 	hlist_add_head(&ri->uflist, &ri->rp->used_instances);
308 }
309 
310 /* Called with kretprobe_lock held */
311 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri)
312 {
313 	/* remove rp inst off the rprobe_inst_table */
314 	hlist_del(&ri->hlist);
315 	if (ri->rp) {
316 		/* remove rp inst off the used list */
317 		hlist_del(&ri->uflist);
318 		/* put rp inst back onto the free list */
319 		INIT_HLIST_NODE(&ri->uflist);
320 		hlist_add_head(&ri->uflist, &ri->rp->free_instances);
321 	} else
322 		/* Unregistering */
323 		kfree(ri);
324 }
325 
326 struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
327 {
328 	return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
329 }
330 
331 /*
332  * This function is called from finish_task_switch when task tk becomes dead,
333  * so that we can recycle any function-return probe instances associated
334  * with this task. These left over instances represent probed functions
335  * that have been called but will never return.
336  */
337 void __kprobes kprobe_flush_task(struct task_struct *tk)
338 {
339         struct kretprobe_instance *ri;
340         struct hlist_head *head;
341 	struct hlist_node *node, *tmp;
342 	unsigned long flags = 0;
343 
344 	spin_lock_irqsave(&kretprobe_lock, flags);
345         head = kretprobe_inst_table_head(tk);
346         hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
347                 if (ri->task == tk)
348                         recycle_rp_inst(ri);
349         }
350 	spin_unlock_irqrestore(&kretprobe_lock, flags);
351 }
352 
353 static inline void free_rp_inst(struct kretprobe *rp)
354 {
355 	struct kretprobe_instance *ri;
356 	while ((ri = get_free_rp_inst(rp)) != NULL) {
357 		hlist_del(&ri->uflist);
358 		kfree(ri);
359 	}
360 }
361 
362 /*
363  * Keep all fields in the kprobe consistent
364  */
365 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
366 {
367 	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
368 	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
369 }
370 
371 /*
372 * Add the new probe to old_p->list. Fail if this is the
373 * second jprobe at the address - two jprobes can't coexist
374 */
375 static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
376 {
377 	if (p->break_handler) {
378 		if (old_p->break_handler)
379 			return -EEXIST;
380 		list_add_tail_rcu(&p->list, &old_p->list);
381 		old_p->break_handler = aggr_break_handler;
382 	} else
383 		list_add_rcu(&p->list, &old_p->list);
384 	if (p->post_handler && !old_p->post_handler)
385 		old_p->post_handler = aggr_post_handler;
386 	return 0;
387 }
388 
389 /*
390  * Fill in the required fields of the "manager kprobe". Replace the
391  * earlier kprobe in the hlist with the manager kprobe
392  */
393 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
394 {
395 	copy_kprobe(p, ap);
396 	ap->addr = p->addr;
397 	ap->pre_handler = aggr_pre_handler;
398 	ap->fault_handler = aggr_fault_handler;
399 	if (p->post_handler)
400 		ap->post_handler = aggr_post_handler;
401 	if (p->break_handler)
402 		ap->break_handler = aggr_break_handler;
403 
404 	INIT_LIST_HEAD(&ap->list);
405 	list_add_rcu(&p->list, &ap->list);
406 
407 	hlist_replace_rcu(&p->hlist, &ap->hlist);
408 }
409 
410 /*
411  * This is the second or subsequent kprobe at the address - handle
412  * the intricacies
413  */
414 static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
415 					  struct kprobe *p)
416 {
417 	int ret = 0;
418 	struct kprobe *ap;
419 
420 	if (old_p->pre_handler == aggr_pre_handler) {
421 		copy_kprobe(old_p, p);
422 		ret = add_new_kprobe(old_p, p);
423 	} else {
424 		ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
425 		if (!ap)
426 			return -ENOMEM;
427 		add_aggr_kprobe(ap, old_p);
428 		copy_kprobe(ap, p);
429 		ret = add_new_kprobe(ap, p);
430 	}
431 	return ret;
432 }
433 
434 static int __kprobes in_kprobes_functions(unsigned long addr)
435 {
436 	if (addr >= (unsigned long)__kprobes_text_start
437 		&& addr < (unsigned long)__kprobes_text_end)
438 		return -EINVAL;
439 	return 0;
440 }
441 
442 static int __kprobes __register_kprobe(struct kprobe *p,
443 	unsigned long called_from)
444 {
445 	int ret = 0;
446 	struct kprobe *old_p;
447 	struct module *probed_mod;
448 
449 	if ((!kernel_text_address((unsigned long) p->addr)) ||
450 		in_kprobes_functions((unsigned long) p->addr))
451 		return -EINVAL;
452 
453 	p->mod_refcounted = 0;
454 	/* Check are we probing a module */
455 	if ((probed_mod = module_text_address((unsigned long) p->addr))) {
456 		struct module *calling_mod = module_text_address(called_from);
457 		/* We must allow modules to probe themself and
458 		 * in this case avoid incrementing the module refcount,
459 		 * so as to allow unloading of self probing modules.
460 		 */
461 		if (calling_mod && (calling_mod != probed_mod)) {
462 			if (unlikely(!try_module_get(probed_mod)))
463 				return -EINVAL;
464 			p->mod_refcounted = 1;
465 		} else
466 			probed_mod = NULL;
467 	}
468 
469 	p->nmissed = 0;
470 	mutex_lock(&kprobe_mutex);
471 	old_p = get_kprobe(p->addr);
472 	if (old_p) {
473 		ret = register_aggr_kprobe(old_p, p);
474 		if (!ret)
475 			atomic_inc(&kprobe_count);
476 		goto out;
477 	}
478 
479 	if ((ret = arch_prepare_kprobe(p)) != 0)
480 		goto out;
481 
482 	INIT_HLIST_NODE(&p->hlist);
483 	hlist_add_head_rcu(&p->hlist,
484 		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
485 
486 	if (atomic_add_return(1, &kprobe_count) == \
487 				(ARCH_INACTIVE_KPROBE_COUNT + 1))
488 		register_page_fault_notifier(&kprobe_page_fault_nb);
489 
490   	arch_arm_kprobe(p);
491 
492 out:
493 	mutex_unlock(&kprobe_mutex);
494 
495 	if (ret && probed_mod)
496 		module_put(probed_mod);
497 	return ret;
498 }
499 
500 int __kprobes register_kprobe(struct kprobe *p)
501 {
502 	return __register_kprobe(p,
503 		(unsigned long)__builtin_return_address(0));
504 }
505 
506 void __kprobes unregister_kprobe(struct kprobe *p)
507 {
508 	struct module *mod;
509 	struct kprobe *old_p, *list_p;
510 	int cleanup_p;
511 
512 	mutex_lock(&kprobe_mutex);
513 	old_p = get_kprobe(p->addr);
514 	if (unlikely(!old_p)) {
515 		mutex_unlock(&kprobe_mutex);
516 		return;
517 	}
518 	if (p != old_p) {
519 		list_for_each_entry_rcu(list_p, &old_p->list, list)
520 			if (list_p == p)
521 			/* kprobe p is a valid probe */
522 				goto valid_p;
523 		mutex_unlock(&kprobe_mutex);
524 		return;
525 	}
526 valid_p:
527 	if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
528 		(p->list.next == &old_p->list) &&
529 		(p->list.prev == &old_p->list))) {
530 		/* Only probe on the hash list */
531 		arch_disarm_kprobe(p);
532 		hlist_del_rcu(&old_p->hlist);
533 		cleanup_p = 1;
534 	} else {
535 		list_del_rcu(&p->list);
536 		cleanup_p = 0;
537 	}
538 
539 	mutex_unlock(&kprobe_mutex);
540 
541 	synchronize_sched();
542 	if (p->mod_refcounted &&
543 	    (mod = module_text_address((unsigned long)p->addr)))
544 		module_put(mod);
545 
546 	if (cleanup_p) {
547 		if (p != old_p) {
548 			list_del_rcu(&p->list);
549 			kfree(old_p);
550 		}
551 		arch_remove_kprobe(p);
552 	} else {
553 		mutex_lock(&kprobe_mutex);
554 		if (p->break_handler)
555 			old_p->break_handler = NULL;
556 		if (p->post_handler){
557 			list_for_each_entry_rcu(list_p, &old_p->list, list){
558 				if (list_p->post_handler){
559 					cleanup_p = 2;
560 					break;
561 				}
562 			}
563 			if (cleanup_p == 0)
564 				old_p->post_handler = NULL;
565 		}
566 		mutex_unlock(&kprobe_mutex);
567 	}
568 
569 	/* Call unregister_page_fault_notifier()
570 	 * if no probes are active
571 	 */
572 	mutex_lock(&kprobe_mutex);
573 	if (atomic_add_return(-1, &kprobe_count) == \
574 				ARCH_INACTIVE_KPROBE_COUNT)
575 		unregister_page_fault_notifier(&kprobe_page_fault_nb);
576 	mutex_unlock(&kprobe_mutex);
577 	return;
578 }
579 
580 static struct notifier_block kprobe_exceptions_nb = {
581 	.notifier_call = kprobe_exceptions_notify,
582 	.priority = 0x7fffffff /* we need to be notified first */
583 };
584 
585 
586 int __kprobes register_jprobe(struct jprobe *jp)
587 {
588 	/* Todo: Verify probepoint is a function entry point */
589 	jp->kp.pre_handler = setjmp_pre_handler;
590 	jp->kp.break_handler = longjmp_break_handler;
591 
592 	return __register_kprobe(&jp->kp,
593 		(unsigned long)__builtin_return_address(0));
594 }
595 
596 void __kprobes unregister_jprobe(struct jprobe *jp)
597 {
598 	unregister_kprobe(&jp->kp);
599 }
600 
601 #ifdef ARCH_SUPPORTS_KRETPROBES
602 
603 /*
604  * This kprobe pre_handler is registered with every kretprobe. When probe
605  * hits it will set up the return probe.
606  */
607 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
608 					   struct pt_regs *regs)
609 {
610 	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
611 	unsigned long flags = 0;
612 
613 	/*TODO: consider to only swap the RA after the last pre_handler fired */
614 	spin_lock_irqsave(&kretprobe_lock, flags);
615 	arch_prepare_kretprobe(rp, regs);
616 	spin_unlock_irqrestore(&kretprobe_lock, flags);
617 	return 0;
618 }
619 
620 int __kprobes register_kretprobe(struct kretprobe *rp)
621 {
622 	int ret = 0;
623 	struct kretprobe_instance *inst;
624 	int i;
625 
626 	rp->kp.pre_handler = pre_handler_kretprobe;
627 	rp->kp.post_handler = NULL;
628 	rp->kp.fault_handler = NULL;
629 	rp->kp.break_handler = NULL;
630 
631 	/* Pre-allocate memory for max kretprobe instances */
632 	if (rp->maxactive <= 0) {
633 #ifdef CONFIG_PREEMPT
634 		rp->maxactive = max(10, 2 * NR_CPUS);
635 #else
636 		rp->maxactive = NR_CPUS;
637 #endif
638 	}
639 	INIT_HLIST_HEAD(&rp->used_instances);
640 	INIT_HLIST_HEAD(&rp->free_instances);
641 	for (i = 0; i < rp->maxactive; i++) {
642 		inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
643 		if (inst == NULL) {
644 			free_rp_inst(rp);
645 			return -ENOMEM;
646 		}
647 		INIT_HLIST_NODE(&inst->uflist);
648 		hlist_add_head(&inst->uflist, &rp->free_instances);
649 	}
650 
651 	rp->nmissed = 0;
652 	/* Establish function entry probe point */
653 	if ((ret = __register_kprobe(&rp->kp,
654 		(unsigned long)__builtin_return_address(0))) != 0)
655 		free_rp_inst(rp);
656 	return ret;
657 }
658 
659 #else /* ARCH_SUPPORTS_KRETPROBES */
660 
661 int __kprobes register_kretprobe(struct kretprobe *rp)
662 {
663 	return -ENOSYS;
664 }
665 
666 #endif /* ARCH_SUPPORTS_KRETPROBES */
667 
668 void __kprobes unregister_kretprobe(struct kretprobe *rp)
669 {
670 	unsigned long flags;
671 	struct kretprobe_instance *ri;
672 
673 	unregister_kprobe(&rp->kp);
674 	/* No race here */
675 	spin_lock_irqsave(&kretprobe_lock, flags);
676 	while ((ri = get_used_rp_inst(rp)) != NULL) {
677 		ri->rp = NULL;
678 		hlist_del(&ri->uflist);
679 	}
680 	spin_unlock_irqrestore(&kretprobe_lock, flags);
681 	free_rp_inst(rp);
682 }
683 
684 static int __init init_kprobes(void)
685 {
686 	int i, err = 0;
687 
688 	/* FIXME allocate the probe table, currently defined statically */
689 	/* initialize all list heads */
690 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
691 		INIT_HLIST_HEAD(&kprobe_table[i]);
692 		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
693 	}
694 	atomic_set(&kprobe_count, 0);
695 
696 	err = arch_init_kprobes();
697 	if (!err)
698 		err = register_die_notifier(&kprobe_exceptions_nb);
699 
700 	return err;
701 }
702 
703 __initcall(init_kprobes);
704 
705 EXPORT_SYMBOL_GPL(register_kprobe);
706 EXPORT_SYMBOL_GPL(unregister_kprobe);
707 EXPORT_SYMBOL_GPL(register_jprobe);
708 EXPORT_SYMBOL_GPL(unregister_jprobe);
709 EXPORT_SYMBOL_GPL(jprobe_return);
710 EXPORT_SYMBOL_GPL(register_kretprobe);
711 EXPORT_SYMBOL_GPL(unregister_kretprobe);
712 
713